hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d3e8418d06138c6c7d035a29acbb7346c90cffce
| 414
|
py
|
Python
|
aliexpress/api/rest/CainiaoGlobalSolutionInquiry.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | 3
|
2021-03-10T16:46:43.000Z
|
2022-03-29T15:28:50.000Z
|
aliexpress/api/rest/CainiaoGlobalSolutionInquiry.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | null | null | null |
aliexpress/api/rest/CainiaoGlobalSolutionInquiry.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | 2
|
2021-10-30T17:09:34.000Z
|
2021-11-25T11:50:52.000Z
|
from aliexpress.api.base import RestApi
class CainiaoGlobalSolutionInquiryRequest(RestApi):
def __init__(self, domain="gw.api.taobao.com", port=80):
RestApi.__init__(self, domain, port)
self.locale = None
self.package_params = None
self.seller_info_param = None
self.trade_order_param = None
def getapiname(self):
return "cainiao.global.solution.inquiry"
| 29.571429
| 60
| 0.695652
|
336ad1fb2a90f41a22cffabd9726a6f360131682
| 6,012
|
py
|
Python
|
admin/admin_daemon.py
|
hku-systems/hams
|
3a5720657252c650c9a6c5d9b674f7ea6153e557
|
[
"Apache-2.0"
] | 6
|
2020-08-19T11:46:23.000Z
|
2021-12-24T07:34:15.000Z
|
admin/admin_daemon.py
|
hku-systems/hams
|
3a5720657252c650c9a6c5d9b674f7ea6153e557
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:40:15.000Z
|
2021-03-25T23:40:15.000Z
|
admin/admin_daemon.py
|
hku-systems/hams
|
3a5720657252c650c9a6c5d9b674f7ea6153e557
|
[
"Apache-2.0"
] | 2
|
2020-10-31T16:48:39.000Z
|
2021-03-07T09:14:25.000Z
|
import base64
import os
import time
from concurrent import futures
from threading import Thread,Lock
from google.protobuf.timestamp_pb2 import Timestamp
import grpc
from hams_admin import (HamsConnection, DockerContainerManager,
graph_parser, redis_client)
from hams_admin.deployers import python as python_deployer
#from hams_admin.grpcclient import grpc_client
from hams_admin.rpc import (management_pb2, management_pb2_grpc, model_pb2,
model_pb2_grpc, prediction_pb2,
prediction_pb2_grpc)
class ManagementServerServicer(management_pb2_grpc.ManagementServerServicer):
def __init__(self, redis_name, redis_port):
self.admin = HamsConnection(DockerContainerManager())
self.mutex = Lock()
#self.redis_name=redis_name
#self.redis_port=redis_port
self.redis_client = redis_client.RedisClient(self.admin.logger, redis_name, redis_port)
def AddRuntimeDAG(self, request, context):
self.redis_client.add_runtime_dag(request)
return management_pb2.Response(status="AddRuntimeDAGSuccessful")
def GetRuntimeDAG(self, request, context):
old_runtime_dag = self.redis_client.get_runtime_dag(request)
return management_pb2.RuntimeDAGInfo(file=old_runtime_dag)
def ReportContainerFailure(self, request, context):
print ("=========== ReportContainerFailure ============")
fail_host_ip = request.hostip
fail_model_id = request.modelid
fail_app_id = request.appid
fail_container_name = request.modelname
#####################################################
## Step 1 : Get Current runtime DAG
#####################################################
self.admin.logger.info("[Recovery] Get existing runtime DAG")
self.mutex.acquire(timeout=60)
fail_dag_name = fail_app_id.split('-')[0]
fail_dag_version = fail_app_id.split('-')[1]
fail_dag_id = '1'
old_runtime_dag = self.redis_client.get_runtime_dag(management_pb2.RuntimeDAGInfo(name=fail_dag_name, version=fail_dag_version, id=fail_dag_id))
#docker_client = self.admin.cm.get_docker_client(fail_host_ip)
print(old_runtime_dag)
runtime_infos = graph_parser.get_model_from_dag(old_runtime_dag, int(fail_model_id))
if not graph_parser.is_running(runtime_infos, fail_container_name):
re = management_pb2.FailureResponse(status="Error")
self.mutex.release()
return re
#####################################################
## Step 2: Boot up a new container
#####################################################
model_name,model_version,model_image = graph_parser.get_name_version(runtime_infos)
isstateful = graph_parser.is_stateful(runtime_infos)
isprimary = False
if isstateful:
self.admin.logger.info("[Recovery] Stateful Failure")
isprimary = graph_parser.is_primary(runtime_infos, fail_container_name)
if isprimary:
self.admin.logger.info("[Recovery] Primary Failure")
else:
self.admin.logger.info("[Recovery] Backup Failure")
self.admin.logger.info("[Recovery] Booting up new container instances")
## here we safely init the model container with know proxy
container_name, container_id, scheduled_host = self.admin.cm.add_replica(model_name, model_version, "22222", model_image)
container_ip = self.admin.cm.get_container_ip(scheduled_host, container_id)
proxy_name, proxy_id = self.admin.cm.set_proxy("ai-proxy:latest",container_name, container_ip, scheduled_host, recovery=True)
proxy_ip = self.admin.cm.get_container_ip(scheduled_host, proxy_id)
time.sleep(1)
self.admin.cm.grpc_client("zsxhku/grpcclient", "--setproxy %s %s %s %s"%(container_ip, "22222", proxy_name, "22223"))
self.admin.logger.info('[Recovery] Set proxy in Model Container ')
#####################################################
## Step 3: Update runtime DAG
#####################################################
self.admin.logger.info('[Recovery] Updating old runtime dag')
new_runtime_dag = graph_parser.gen_new_runtime_dag(old_runtime_dag, model_name, model_version, isstateful, isprimary, container_name, container_id, container_ip, proxy_name, proxy_id, proxy_ip )
self.redis_client.update_runtime_dag(management_pb2.RuntimeDAGInfo(name=fail_dag_name, version=fail_dag_version, id=fail_dag_id, file=new_runtime_dag, format="old"))
#grpc_client.UpdateRuntimeDAG(fail_app_id, )
self.mutex.release()
#####################################################
## Step 4: Return the old/updated runtime DAG
#####################################################
self.admin.logger.info("[Recovery] Returning new runtime dag")
status = ""
re = management_pb2.FailureResponse(newruntimedag=new_runtime_dag, isstateful=isstateful, isprimary=isprimary, status=status, modelid = request.modelid)
return re
def serve():
#model_name = os.environ["MODEL_NAME"]
#model_port = os.environ["MODEL_PORT"]
redis_name = os.environ["REDIS_IP"]
redis_port = os.environ["REDIS_PORT"]
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
service = ManagementServerServicer(redis_name, redis_port)
management_pb2_grpc.add_ManagementServerServicer_to_server(service,server)
server.add_insecure_port('[::]:55555')
# server.add_insecure_port('[::]:{port}'.format(port=model_port))
server.start()
print("Management Daemon Started")
try:
while True:
time.sleep(60*60*24)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| 37.111111
| 202
| 0.637558
|
0ff1bff8f2ab650e4c4f69dfeeac0acc118d2c65
| 6,014
|
py
|
Python
|
inference.py
|
kyungwon-dev/model-optimization
|
65a2dc24f2ad4c0af031562f89bf9743234fc544
|
[
"MIT"
] | null | null | null |
inference.py
|
kyungwon-dev/model-optimization
|
65a2dc24f2ad4c0af031562f89bf9743234fc544
|
[
"MIT"
] | null | null | null |
inference.py
|
kyungwon-dev/model-optimization
|
65a2dc24f2ad4c0af031562f89bf9743234fc544
|
[
"MIT"
] | null | null | null |
"""Example code for submit.
- Author: Junghoon Kim, Jongkuk Lim
- Contact: placidus36@gmail.com, lim.jeikei@gmail.com
"""
import argparse
import json
import os
import time
from datetime import datetime
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import Resize
from tqdm import tqdm
from src.augmentation.policies import simple_augment_test
from src.model import Model
from src.utils.common import read_yaml
if torch.__version__ >= "1.8.1":
from torch import profiler
else:
from torch.autograd import profiler
CLASSES = [
"Metal",
"Paper",
"Paperpack",
"Plastic",
"Plasticbag",
"Styrofoam",
]
class CustomImageFolder(ImageFolder):
"""ImageFolder with filename."""
def __getitem__(self, index):
img_gt = super(CustomImageFolder, self).__getitem__(index)
fdir = self.imgs[index][0]
fname = fdir.rsplit(os.path.sep, 1)[-1]
return img_gt + (fname,)
def get_dataloader(img_root: str, data_config: str) -> DataLoader:
"""Get dataloader.
Note:
Don't forget to set normalization.
"""
# Load yaml
data_config = read_yaml(data_config)
transform_test_args = (
data_config["AUG_TEST_PARAMS"] if data_config.get("AUG_TEST_PARAMS") else None
)
# Transformation for test
transform_test = getattr(
__import__("src.augmentation.policies", fromlist=[""]),
data_config["AUG_TEST"],
)(dataset=data_config["DATASET"], img_size=data_config["IMG_SIZE"])
dataset = CustomImageFolder(root=img_root, transform=transform_test)
dataloader = DataLoader(dataset=dataset, batch_size=1, num_workers=8)
return dataloader
@torch.no_grad()
def inference(model, dataloader, dst_path: str, t0: float) -> None:
"""Run inference with given model and dataloader.
Args:
model: PyTorch model.
dataloader: PyTorch dataset loader.
dst_path: destination path for inference result to be written.
t0: initial time prior to creating model and dataset
by time.monotonic().
"""
model = model.to(device)
model.eval()
profile_ = torch.rand(1, 3, 512, 512).to(device)
for transform in dataloader.dataset.transform.transforms:
if isinstance(transform, Resize):
profile_input = torch.rand(1, 3, *transform.size).to(device)
break
n_profile = 100
print(f"Profile input shape: {profile_input.shape}")
with profiler.profile(use_cuda=True, profile_memory=False) as prof:
for _ in tqdm(range(100), "Running profile ..."):
x = model(profile_input)
avg_time = prof.total_average()
if hasattr(avg_time, "self_cuda_time_total"):
cuda_time = avg_time.self_cuda_time_total / 1e6 / n_profile
else:
cuda_time = avg_time.cuda_time_total / 1e6 / n_profile
cpu_time = avg_time.self_cpu_time_total / 1e6 / n_profile
print(prof.key_averages())
print(f"Average CUDA time: {cuda_time}, CPU time: {cpu_time}")
result = {
"inference": {},
"time": {
"profile": {"cuda": float("inf"), "cpu": float("inf")},
"runtime": {"all": 0, "inference_only": 0},
"inference": {},
},
"macs": float("inf"),
}
time_measure_inference = 0
for img, _, fname in tqdm(dataloader, "Running inference ..."):
t_start = torch.cuda.Event(enable_timing=True)
t_end = torch.cuda.Event(enable_timing=True)
t_start.record()
img = img.to(device)
pred = model(img)
pred = torch.argmax(pred)
t_end.record()
torch.cuda.synchronize()
t_inference = t_start.elapsed_time(t_end) / 1000
time_measure_inference += t_inference
result["inference"][fname[0]] = CLASSES[int(pred.detach())]
result["time"]["inference"][fname[0]] = t_inference
result["time"]["profile"]["cuda"] = cuda_time
result["time"]["profile"]["cpu"] = cpu_time
result["time"]["runtime"]["all"] = time.monotonic() - t0
result["time"]["runtime"]["inference_only"] = time_measure_inference
j = json.dumps(result, indent=4)
save_path = os.path.join(dst_path, "output.csv")
with open(save_path, "w") as outfile:
json.dump(result, outfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Submit.")
parser.add_argument(
"--dst", type=str, help="destination path for submit",
default=os.environ.get('SM_OUTPUT_DATA_DIR')
)
parser.add_argument("--model_dir", type=str, help="Saved model root directory which includes 'best.pt', 'data.yml', and, 'model.yml'", default='/opt/ml/code/exp/latest')
parser.add_argument("--weight_name", type=str, help="Model weight file name. (best.pt, best.ts, ...)", default="best.pt")
parser.add_argument(
"--img_root",
type=str,
help="image folder root. e.g) 'data/test'",
default='/opt/ml/data/test'
)
args = parser.parse_args()
assert args.model_dir != '' and args.img_root != '', "'--model_dir' and '--img_root' must be provided."
args.weight = os.path.join(args.model_dir, args.weight_name)
args.model_config = os.path.join(args.model_dir, "model.yml")
args.data_config = os.path.join(args.model_dir, "data.yml")
t0 = time.monotonic()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# prepare datalaoder
dataloader = get_dataloader(img_root=args.img_root, data_config=args.data_config)
# prepare model
if args.weight.endswith("ts"):
model = torch.jit.load(args.weight)
else:
model_instance = Model(args.model_config, verbose=True)
print(args.weight)
model_instance.model.load_state_dict(
torch.load(args.weight, map_location=torch.device("cpu"))
)
model = model_instance.model
# inference
inference(model, dataloader, args.dst, t0)
| 32.333333
| 173
| 0.654307
|
2f4a2822d4805087816e500f77467fc588016a2b
| 1,736
|
py
|
Python
|
objects_DOMINE/PFAM.py
|
diogo1790team/inphinity_DM
|
b20d75ee0485e1f406a25efcf5f2855631166c38
|
[
"MIT"
] | 1
|
2019-03-11T12:59:37.000Z
|
2019-03-11T12:59:37.000Z
|
objects_DOMINE/PFAM.py
|
diogo1790team/inphinity_DM
|
b20d75ee0485e1f406a25efcf5f2855631166c38
|
[
"MIT"
] | 21
|
2018-10-17T14:52:30.000Z
|
2019-06-03T12:43:58.000Z
|
objects_DOMINE/PFAM.py
|
diogo1790team/inphinity_DM
|
b20d75ee0485e1f406a25efcf5f2855631166c38
|
[
"MIT"
] | 6
|
2019-02-28T07:40:14.000Z
|
2019-09-23T13:31:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 14:49:43 2018
@author: Diogo
"""
from SQL_obj_DOMINE.PFAM_SQL import _PFAM_sql
class PFAM_ddi(object):
"""
This class treat the PFAM object has it exists in PFAM table database DOMINE
By default, all FK are in the lasts positions in the parameters declaration
"""
def __init__(self, domain = "", database_name = "domine_db_out"):
"""
Constructor of the Domain object. All the parameters have a default value
:param domain: name of the domain (PFXXXXX)
:param database_name: name of the database. See Factory_databases_access
:type domain: text - required
:type database_name: text - required
"""
self.domain = domain
self.database_name = database_name
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, dom):
"""
Validation of domain format (remove the version if exists)
"""
if len(dom.split(".")) == 2:
self._domain = dom.split(".")[0]
else:
self._domain = dom
def get_all_domains():
"""
return an array with all the domain in the database 3DID
:return: array of domains
:rtype: array(Domain)
"""
listOfPfamDomains = []
sqlObj = _PFAM_sql(db_name = 'domine_db_out')
results = sqlObj.get_all_domains()
for element in results:
listOfPfamDomains.append(PFAM_ddi(element[0]))
return listOfPfamDomains
def __str__(self):
"""
Overwrite of the str method
"""
message_str = "Domain id: {0}".format(self.domain)
return message_str
| 27.125
| 81
| 0.604839
|
8be8833651d3cd2df08fa0edaaf5397924ac9ef7
| 387
|
py
|
Python
|
Python version/languages/pddl/pddl_input_program.py
|
DomenicoIngrati/EmbASP
|
05000e0e65e7c84609f45e76dd32f7abaa0c5fbe
|
[
"MIT"
] | null | null | null |
Python version/languages/pddl/pddl_input_program.py
|
DomenicoIngrati/EmbASP
|
05000e0e65e7c84609f45e76dd32f7abaa0c5fbe
|
[
"MIT"
] | null | null | null |
Python version/languages/pddl/pddl_input_program.py
|
DomenicoIngrati/EmbASP
|
05000e0e65e7c84609f45e76dd32f7abaa0c5fbe
|
[
"MIT"
] | null | null | null |
from base.input_program import InputProgram
class PDDLInputProgram(InputProgram):
"""A generic PDDL program."""
def __init__(self, progType):
super(PDDLInputProgram, self).__init__()
self.__programs_type = progType # Where type of program are stored
def get_programs_type(self):
"""Return type of programs."""
return self.__programs_type
| 27.642857
| 75
| 0.697674
|
a302cffbd1d31dc31a4a58196d43ed6932bf60d6
| 18,308
|
py
|
Python
|
nipyapi/nifi/models/controller_status_dto.py
|
Jimvin/nipyapi
|
826beac376d4321bd2d69491f09086474c7e7bfb
|
[
"Apache-2.0"
] | 199
|
2017-08-24T12:19:41.000Z
|
2022-03-20T14:50:17.000Z
|
nipyapi/nifi/models/controller_status_dto.py
|
Jimvin/nipyapi
|
826beac376d4321bd2d69491f09086474c7e7bfb
|
[
"Apache-2.0"
] | 275
|
2017-08-28T21:21:49.000Z
|
2022-03-29T17:57:26.000Z
|
nipyapi/nifi/models/controller_status_dto.py
|
Jimvin/nipyapi
|
826beac376d4321bd2d69491f09086474c7e7bfb
|
[
"Apache-2.0"
] | 73
|
2017-09-07T10:13:56.000Z
|
2022-02-28T10:37:21.000Z
|
# coding: utf-8
"""
NiFi Rest API
The Rest API provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.15.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ControllerStatusDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'active_thread_count': 'int',
'terminated_thread_count': 'int',
'queued': 'str',
'flow_files_queued': 'int',
'bytes_queued': 'int',
'running_count': 'int',
'stopped_count': 'int',
'invalid_count': 'int',
'disabled_count': 'int',
'active_remote_port_count': 'int',
'inactive_remote_port_count': 'int',
'up_to_date_count': 'int',
'locally_modified_count': 'int',
'stale_count': 'int',
'locally_modified_and_stale_count': 'int',
'sync_failure_count': 'int'
}
attribute_map = {
'active_thread_count': 'activeThreadCount',
'terminated_thread_count': 'terminatedThreadCount',
'queued': 'queued',
'flow_files_queued': 'flowFilesQueued',
'bytes_queued': 'bytesQueued',
'running_count': 'runningCount',
'stopped_count': 'stoppedCount',
'invalid_count': 'invalidCount',
'disabled_count': 'disabledCount',
'active_remote_port_count': 'activeRemotePortCount',
'inactive_remote_port_count': 'inactiveRemotePortCount',
'up_to_date_count': 'upToDateCount',
'locally_modified_count': 'locallyModifiedCount',
'stale_count': 'staleCount',
'locally_modified_and_stale_count': 'locallyModifiedAndStaleCount',
'sync_failure_count': 'syncFailureCount'
}
def __init__(self, active_thread_count=None, terminated_thread_count=None, queued=None, flow_files_queued=None, bytes_queued=None, running_count=None, stopped_count=None, invalid_count=None, disabled_count=None, active_remote_port_count=None, inactive_remote_port_count=None, up_to_date_count=None, locally_modified_count=None, stale_count=None, locally_modified_and_stale_count=None, sync_failure_count=None):
"""
ControllerStatusDTO - a model defined in Swagger
"""
self._active_thread_count = None
self._terminated_thread_count = None
self._queued = None
self._flow_files_queued = None
self._bytes_queued = None
self._running_count = None
self._stopped_count = None
self._invalid_count = None
self._disabled_count = None
self._active_remote_port_count = None
self._inactive_remote_port_count = None
self._up_to_date_count = None
self._locally_modified_count = None
self._stale_count = None
self._locally_modified_and_stale_count = None
self._sync_failure_count = None
if active_thread_count is not None:
self.active_thread_count = active_thread_count
if terminated_thread_count is not None:
self.terminated_thread_count = terminated_thread_count
if queued is not None:
self.queued = queued
if flow_files_queued is not None:
self.flow_files_queued = flow_files_queued
if bytes_queued is not None:
self.bytes_queued = bytes_queued
if running_count is not None:
self.running_count = running_count
if stopped_count is not None:
self.stopped_count = stopped_count
if invalid_count is not None:
self.invalid_count = invalid_count
if disabled_count is not None:
self.disabled_count = disabled_count
if active_remote_port_count is not None:
self.active_remote_port_count = active_remote_port_count
if inactive_remote_port_count is not None:
self.inactive_remote_port_count = inactive_remote_port_count
if up_to_date_count is not None:
self.up_to_date_count = up_to_date_count
if locally_modified_count is not None:
self.locally_modified_count = locally_modified_count
if stale_count is not None:
self.stale_count = stale_count
if locally_modified_and_stale_count is not None:
self.locally_modified_and_stale_count = locally_modified_and_stale_count
if sync_failure_count is not None:
self.sync_failure_count = sync_failure_count
@property
def active_thread_count(self):
"""
Gets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:return: The active_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_thread_count
@active_thread_count.setter
def active_thread_count(self, active_thread_count):
"""
Sets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:param active_thread_count: The active_thread_count of this ControllerStatusDTO.
:type: int
"""
self._active_thread_count = active_thread_count
@property
def terminated_thread_count(self):
"""
Gets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:return: The terminated_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._terminated_thread_count
@terminated_thread_count.setter
def terminated_thread_count(self, terminated_thread_count):
"""
Sets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:param terminated_thread_count: The terminated_thread_count of this ControllerStatusDTO.
:type: int
"""
self._terminated_thread_count = terminated_thread_count
@property
def queued(self):
"""
Gets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:return: The queued of this ControllerStatusDTO.
:rtype: str
"""
return self._queued
@queued.setter
def queued(self, queued):
"""
Sets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:param queued: The queued of this ControllerStatusDTO.
:type: str
"""
self._queued = queued
@property
def flow_files_queued(self):
"""
Gets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:return: The flow_files_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._flow_files_queued
@flow_files_queued.setter
def flow_files_queued(self, flow_files_queued):
"""
Sets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:param flow_files_queued: The flow_files_queued of this ControllerStatusDTO.
:type: int
"""
self._flow_files_queued = flow_files_queued
@property
def bytes_queued(self):
"""
Gets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:return: The bytes_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._bytes_queued
@bytes_queued.setter
def bytes_queued(self, bytes_queued):
"""
Sets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:param bytes_queued: The bytes_queued of this ControllerStatusDTO.
:type: int
"""
self._bytes_queued = bytes_queued
@property
def running_count(self):
"""
Gets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:return: The running_count of this ControllerStatusDTO.
:rtype: int
"""
return self._running_count
@running_count.setter
def running_count(self, running_count):
"""
Sets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:param running_count: The running_count of this ControllerStatusDTO.
:type: int
"""
self._running_count = running_count
@property
def stopped_count(self):
"""
Gets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:return: The stopped_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stopped_count
@stopped_count.setter
def stopped_count(self, stopped_count):
"""
Sets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:param stopped_count: The stopped_count of this ControllerStatusDTO.
:type: int
"""
self._stopped_count = stopped_count
@property
def invalid_count(self):
"""
Gets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:return: The invalid_count of this ControllerStatusDTO.
:rtype: int
"""
return self._invalid_count
@invalid_count.setter
def invalid_count(self, invalid_count):
"""
Sets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:param invalid_count: The invalid_count of this ControllerStatusDTO.
:type: int
"""
self._invalid_count = invalid_count
@property
def disabled_count(self):
"""
Gets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:return: The disabled_count of this ControllerStatusDTO.
:rtype: int
"""
return self._disabled_count
@disabled_count.setter
def disabled_count(self, disabled_count):
"""
Sets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:param disabled_count: The disabled_count of this ControllerStatusDTO.
:type: int
"""
self._disabled_count = disabled_count
@property
def active_remote_port_count(self):
"""
Gets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:return: The active_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_remote_port_count
@active_remote_port_count.setter
def active_remote_port_count(self, active_remote_port_count):
"""
Sets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:param active_remote_port_count: The active_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._active_remote_port_count = active_remote_port_count
@property
def inactive_remote_port_count(self):
"""
Gets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:return: The inactive_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._inactive_remote_port_count
@inactive_remote_port_count.setter
def inactive_remote_port_count(self, inactive_remote_port_count):
"""
Sets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:param inactive_remote_port_count: The inactive_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._inactive_remote_port_count = inactive_remote_port_count
@property
def up_to_date_count(self):
"""
Gets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:return: The up_to_date_count of this ControllerStatusDTO.
:rtype: int
"""
return self._up_to_date_count
@up_to_date_count.setter
def up_to_date_count(self, up_to_date_count):
"""
Sets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:param up_to_date_count: The up_to_date_count of this ControllerStatusDTO.
:type: int
"""
self._up_to_date_count = up_to_date_count
@property
def locally_modified_count(self):
"""
Gets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:return: The locally_modified_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_count
@locally_modified_count.setter
def locally_modified_count(self, locally_modified_count):
"""
Sets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:param locally_modified_count: The locally_modified_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_count = locally_modified_count
@property
def stale_count(self):
"""
Gets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:return: The stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stale_count
@stale_count.setter
def stale_count(self, stale_count):
"""
Sets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:param stale_count: The stale_count of this ControllerStatusDTO.
:type: int
"""
self._stale_count = stale_count
@property
def locally_modified_and_stale_count(self):
"""
Gets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:return: The locally_modified_and_stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_and_stale_count
@locally_modified_and_stale_count.setter
def locally_modified_and_stale_count(self, locally_modified_and_stale_count):
"""
Sets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:param locally_modified_and_stale_count: The locally_modified_and_stale_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_and_stale_count = locally_modified_and_stale_count
@property
def sync_failure_count(self):
"""
Gets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:return: The sync_failure_count of this ControllerStatusDTO.
:rtype: int
"""
return self._sync_failure_count
@sync_failure_count.setter
def sync_failure_count(self, sync_failure_count):
"""
Sets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:param sync_failure_count: The sync_failure_count of this ControllerStatusDTO.
:type: int
"""
self._sync_failure_count = sync_failure_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ControllerStatusDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 33.531136
| 478
| 0.655615
|
9866bdd039da49d9aecf7bcc5eea2f3c77aad3e9
| 26,676
|
py
|
Python
|
quarkchain/cluster/tests/test_cluster.py
|
tim-yoshi/pyquarkchain
|
1847542c166a180b5ffc3c6e917751be85fa15a6
|
[
"MIT"
] | 3
|
2019-03-14T17:08:07.000Z
|
2019-10-02T11:13:53.000Z
|
quarkchain/cluster/tests/test_cluster.py
|
tim-yoshi/pyquarkchain
|
1847542c166a180b5ffc3c6e917751be85fa15a6
|
[
"MIT"
] | null | null | null |
quarkchain/cluster/tests/test_cluster.py
|
tim-yoshi/pyquarkchain
|
1847542c166a180b5ffc3c6e917751be85fa15a6
|
[
"MIT"
] | 1
|
2019-05-04T22:57:29.000Z
|
2019-05-04T22:57:29.000Z
|
import unittest
from quarkchain.genesis import GenesisManager
from quarkchain.cluster.tests.test_utils import (
create_transfer_transaction,
ClusterContext,
)
from quarkchain.core import Address, Branch, Identity, TokenBalanceMap
from quarkchain.evm import opcodes
from quarkchain.utils import call_async, assert_true_with_timeout
class TestCluster(unittest.TestCase):
def test_single_cluster(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
with ClusterContext(1, acc1) as clusters:
self.assertEqual(len(clusters), 1)
def test_three_clusters(self):
with ClusterContext(3) as clusters:
self.assertEqual(len(clusters), 3)
def test_create_shard_at_different_height(self):
acc1 = Address.create_random_account()
id1 = 0 << 16 | 1 | 0
id2 = 1 << 16 | 1 | 0
genesis_root_heights = {id1: 1, id2: 2}
with ClusterContext(
1,
acc1,
chain_size=2,
shard_size=1,
genesis_root_heights=genesis_root_heights,
) as clusters:
master = clusters[0].master
self.assertIsNone(clusters[0].get_shard(id1))
self.assertIsNone(clusters[0].get_shard(id2))
root = call_async(master.get_next_block_to_mine(acc1, branch_value=None))
self.assertEqual(len(root.minor_block_header_list), 0)
call_async(master.add_root_block(root))
# shard 0 created at root height 1
self.assertIsNotNone(clusters[0].get_shard(id1))
self.assertIsNone(clusters[0].get_shard(id2))
root = call_async(master.get_next_block_to_mine(acc1, branch_value=None))
self.assertEqual(len(root.minor_block_header_list), 1)
call_async(master.add_root_block(root))
self.assertIsNotNone(clusters[0].get_shard(id1))
# shard 1 created at root height 2
self.assertIsNotNone(clusters[0].get_shard(id2))
block = call_async(master.get_next_block_to_mine(acc1, branch_value=None))
self.assertEqual(len(root.minor_block_header_list), 1)
call_async(master.add_root_block(root))
def test_get_primary_account_data(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_random_account(full_shard_key=1)
with ClusterContext(1, acc1) as clusters:
master = clusters[0].master
slaves = clusters[0].slave_list
self.assertEqual(
call_async(master.get_primary_account_data(acc1)).transaction_count, 0
)
tx = create_transfer_transaction(
shard_state=clusters[0].get_shard_state(0b10),
key=id1.get_key(),
from_address=acc1,
to_address=acc1,
value=12345,
)
self.assertTrue(slaves[0].add_tx(tx))
root = call_async(
master.get_next_block_to_mine(address=acc1, branch_value=None)
)
call_async(master.add_root_block(root))
block1 = call_async(
master.get_next_block_to_mine(address=acc1, branch_value=0b10)
)
self.assertTrue(
call_async(
master.add_raw_minor_block(block1.header.branch, block1.serialize())
)
)
self.assertEqual(
call_async(master.get_primary_account_data(acc1)).transaction_count, 1
)
self.assertEqual(
call_async(master.get_primary_account_data(acc2)).transaction_count, 0
)
def test_add_transaction(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc2 = Address.create_from_identity(id1, full_shard_key=1)
with ClusterContext(2, acc1) as clusters:
master = clusters[0].master
root = call_async(master.get_next_block_to_mine(acc1, branch_value=None))
call_async(master.add_root_block(root))
tx1 = create_transfer_transaction(
shard_state=clusters[0].get_shard_state(0b10),
key=id1.get_key(),
from_address=acc1,
to_address=acc1,
value=12345,
)
self.assertTrue(call_async(master.add_transaction(tx1)))
self.assertEqual(len(clusters[0].get_shard_state(0b10).tx_queue), 1)
branch1 = Branch(2 | 1)
tx2 = create_transfer_transaction(
shard_state=clusters[0].get_shard_state(0b11),
key=id1.get_key(),
from_address=acc2,
to_address=acc1,
value=12345,
gas=30000,
)
self.assertTrue(call_async(master.add_transaction(tx2)))
self.assertEqual(len(clusters[0].get_shard_state(0b11).tx_queue), 1)
# check the tx is received by the other cluster
tx_queue = clusters[1].get_shard_state(0b10).tx_queue
assert_true_with_timeout(lambda: len(tx_queue) == 1)
self.assertEqual(tx_queue.pop_transaction(), tx1.tx.to_evm_tx())
tx_queue = clusters[1].get_shard_state(0b11).tx_queue
assert_true_with_timeout(lambda: len(tx_queue) == 1)
self.assertEqual(tx_queue.pop_transaction(), tx2.tx.to_evm_tx())
def test_add_minor_block_request_list(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
with ClusterContext(2, acc1) as clusters:
shard_state = clusters[0].get_shard_state(0b10)
coinbase_amount = (
shard_state.env.quark_chain_config.shards[
shard_state.full_shard_id
].COINBASE_AMOUNT
// 2
)
b1 = shard_state.get_tip().create_block_to_append()
evm_state = shard_state.run_block(b1)
coinbase_amount_map = TokenBalanceMap(evm_state.block_fee_tokens)
coinbase_amount_map.add(
{shard_state.env.quark_chain_config.genesis_token: coinbase_amount}
)
b1.finalize(evm_state=evm_state, coinbase_amount_map=coinbase_amount_map)
add_result = call_async(
clusters[0].master.add_raw_minor_block(b1.header.branch, b1.serialize())
)
self.assertTrue(add_result)
# Make sure the xshard list is not broadcasted to the other shard
self.assertFalse(
clusters[0]
.get_shard_state(0b11)
.contain_remote_minor_block_hash(b1.header.get_hash())
)
self.assertTrue(
clusters[0].master.root_state.is_minor_block_validated(
b1.header.get_hash()
)
)
# Make sure another cluster received the new block
assert_true_with_timeout(
lambda: clusters[0]
.get_shard_state(0b10)
.contain_block_by_hash(b1.header.get_hash())
)
assert_true_with_timeout(
lambda: clusters[1].master.root_state.is_minor_block_validated(
b1.header.get_hash()
)
)
def test_add_root_block_request_list(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
with ClusterContext(2, acc1) as clusters:
# shutdown cluster connection
clusters[1].peer.close()
# add blocks in cluster 0
block_header_list = [clusters[0].get_shard_state(2 | 0).header_tip]
shard_state0 = clusters[0].get_shard_state(0b10)
coinbase_amount = (
shard_state0.env.quark_chain_config.shards[
shard_state0.full_shard_id
].COINBASE_AMOUNT
// 2
)
for i in range(7):
b1 = shard_state0.get_tip().create_block_to_append()
evm_state = shard_state0.run_block(b1)
coinbase_amount_map = TokenBalanceMap(evm_state.block_fee_tokens)
coinbase_amount_map.add(
{shard_state0.env.quark_chain_config.genesis_token: coinbase_amount}
)
b1.finalize(
evm_state=evm_state, coinbase_amount_map=coinbase_amount_map
)
add_result = call_async(
clusters[0].master.add_raw_minor_block(
b1.header.branch, b1.serialize()
)
)
self.assertTrue(add_result)
block_header_list.append(b1.header)
block_header_list.append(clusters[0].get_shard_state(2 | 1).header_tip)
shard_state0 = clusters[0].get_shard_state(0b11)
coinbase_amount = (
shard_state0.env.quark_chain_config.shards[
shard_state0.full_shard_id
].COINBASE_AMOUNT
// 2
)
b2 = shard_state0.get_tip().create_block_to_append()
evm_state = shard_state0.run_block(b2)
coinbase_amount_map = TokenBalanceMap(evm_state.block_fee_tokens)
coinbase_amount_map.add(
{shard_state0.env.quark_chain_config.genesis_token: coinbase_amount}
)
b2.finalize(evm_state=evm_state, coinbase_amount_map=coinbase_amount_map)
add_result = call_async(
clusters[0].master.add_raw_minor_block(b2.header.branch, b2.serialize())
)
self.assertTrue(add_result)
block_header_list.append(b2.header)
# add 1 block in cluster 1
shard_state1 = clusters[1].get_shard_state(0b11)
coinbase_amount = (
shard_state1.env.quark_chain_config.shards[
shard_state1.full_shard_id
].COINBASE_AMOUNT
// 2
)
b3 = shard_state1.get_tip().create_block_to_append()
evm_state = shard_state1.run_block(b3)
coinbase_amount_map = TokenBalanceMap(evm_state.block_fee_tokens)
coinbase_amount_map.add(
{shard_state1.env.quark_chain_config.genesis_token: coinbase_amount}
)
b3.finalize(evm_state=evm_state, coinbase_amount_map=coinbase_amount_map)
add_result = call_async(
clusters[1].master.add_raw_minor_block(b3.header.branch, b3.serialize())
)
self.assertTrue(add_result)
self.assertEqual(clusters[1].get_shard_state(0b11).header_tip, b3.header)
# reestablish cluster connection
call_async(
clusters[1].network.connect(
"127.0.0.1",
clusters[0].master.env.cluster_config.SIMPLE_NETWORK.BOOTSTRAP_PORT,
)
)
root_block1 = clusters[0].master.root_state.create_block_to_mine(
block_header_list, acc1
)
call_async(clusters[0].master.add_root_block(root_block1))
# Make sure the root block tip of local cluster is changed
self.assertEqual(clusters[0].master.root_state.tip, root_block1.header)
# Make sure the root block tip of cluster 1 is changed
assert_true_with_timeout(
lambda: clusters[1].master.root_state.tip == root_block1.header, 2
)
# Minor block is downloaded
self.assertEqual(b1.header.height, 7)
assert_true_with_timeout(
lambda: clusters[1].get_shard_state(0b10).header_tip == b1.header
)
# The tip is overwritten due to root chain first consensus
assert_true_with_timeout(
lambda: clusters[1].get_shard_state(0b11).header_tip == b2.header
)
def test_shard_synchronizer_with_fork(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
with ClusterContext(2, acc1) as clusters:
# shutdown cluster connection
clusters[1].peer.close()
block_list = []
# cluster 0 has 13 blocks added
shard_state0 = clusters[0].get_shard_state(0b10)
coinbase_amount = (
shard_state0.env.quark_chain_config.shards[
shard_state0.full_shard_id
].COINBASE_AMOUNT
// 2
)
for i in range(13):
block = shard_state0.get_tip().create_block_to_append()
evm_state = shard_state0.run_block(block)
coinbase_amount_map = TokenBalanceMap(evm_state.block_fee_tokens)
coinbase_amount_map.add(
{shard_state0.env.quark_chain_config.genesis_token: coinbase_amount}
)
block.finalize(
evm_state=evm_state, coinbase_amount_map=coinbase_amount_map
)
add_result = call_async(
clusters[0].master.add_raw_minor_block(
block.header.branch, block.serialize()
)
)
self.assertTrue(add_result)
block_list.append(block)
self.assertEqual(clusters[0].get_shard_state(0b10).header_tip.height, 13)
# cluster 1 has 12 blocks added
shard_state0 = clusters[1].get_shard_state(0b10)
coinbase_amount = (
shard_state0.env.quark_chain_config.shards[
shard_state0.full_shard_id
].COINBASE_AMOUNT
// 2
)
for i in range(12):
block = shard_state0.get_tip().create_block_to_append()
evm_state = shard_state0.run_block(block)
coinbase_amount_map = TokenBalanceMap(evm_state.block_fee_tokens)
coinbase_amount_map.add(
{shard_state0.env.quark_chain_config.genesis_token: coinbase_amount}
)
block.finalize(
evm_state=evm_state, coinbase_amount_map=coinbase_amount_map
)
add_result = call_async(
clusters[1].master.add_raw_minor_block(
block.header.branch, block.serialize()
)
)
self.assertTrue(add_result)
self.assertEqual(clusters[1].get_shard_state(0b10).header_tip.height, 12)
# reestablish cluster connection
call_async(
clusters[1].network.connect(
"127.0.0.1",
clusters[0].master.env.cluster_config.SIMPLE_NETWORK.BOOTSTRAP_PORT,
)
)
# a new block from cluster 0 will trigger sync in cluster 1
shard_state0 = clusters[0].get_shard_state(0b10)
coinbase_amount = (
shard_state0.env.quark_chain_config.shards[
shard_state0.full_shard_id
].COINBASE_AMOUNT
// 2
)
block = shard_state0.get_tip().create_block_to_append()
evm_state = shard_state0.run_block(block)
coinbase_amount_map = TokenBalanceMap(evm_state.block_fee_tokens)
coinbase_amount_map.add(
{shard_state0.env.quark_chain_config.genesis_token: coinbase_amount}
)
block.finalize(evm_state=evm_state, coinbase_amount_map=coinbase_amount_map)
add_result = call_async(
clusters[0].master.add_raw_minor_block(
block.header.branch, block.serialize()
)
)
self.assertTrue(add_result)
block_list.append(block)
# expect cluster 1 has all the blocks from cluter 0 and
# has the same tip as cluster 0
for block in block_list:
assert_true_with_timeout(
lambda: clusters[1]
.slave_list[0]
.shards[Branch(0b10)]
.state.contain_block_by_hash(block.header.get_hash())
)
assert_true_with_timeout(
lambda: clusters[1].master.root_state.is_minor_block_validated(
block.header.get_hash()
)
)
self.assertEqual(
clusters[1].get_shard_state(0b10).header_tip,
clusters[0].get_shard_state(0b10).header_tip,
)
def test_shard_genesis_fork_fork(self):
""" Test shard forks at genesis blocks due to root chain fork at GENESIS.ROOT_HEIGHT"""
acc1 = Address.create_random_account(0)
acc2 = Address.create_random_account(1)
genesis_root_heights = {2: 0, 3: 1}
with ClusterContext(
2,
acc1,
chain_size=1,
shard_size=2,
genesis_root_heights=genesis_root_heights,
) as clusters:
# shutdown cluster connection
clusters[1].peer.close()
master0 = clusters[0].master
root0 = call_async(master0.get_next_block_to_mine(acc1, branch_value=None))
call_async(master0.add_root_block(root0))
genesis0 = (
clusters[0].get_shard_state(2 | 1).db.get_minor_block_by_height(0)
)
self.assertEqual(
genesis0.header.hash_prev_root_block, root0.header.get_hash()
)
master1 = clusters[1].master
root1 = call_async(master1.get_next_block_to_mine(acc2, branch_value=None))
self.assertNotEqual(root0.header.get_hash(), root1.header.get_hash())
call_async(master1.add_root_block(root1))
genesis1 = (
clusters[1].get_shard_state(2 | 1).db.get_minor_block_by_height(0)
)
self.assertEqual(
genesis1.header.hash_prev_root_block, root1.header.get_hash()
)
self.assertNotEqual(genesis0.header.get_hash(), genesis1.header.get_hash())
# let's make cluster1's root chain longer than cluster0's
root2 = call_async(master1.get_next_block_to_mine(acc2, branch_value=None))
call_async(master1.add_root_block(root2))
self.assertEqual(master1.root_state.tip.height, 2)
# reestablish cluster connection
call_async(
clusters[1].network.connect(
"127.0.0.1",
clusters[0].master.env.cluster_config.SIMPLE_NETWORK.BOOTSTRAP_PORT,
)
)
# Expect cluster0's genesis change to genesis1
assert_true_with_timeout(
lambda: clusters[0]
.get_shard_state(2 | 1)
.db.get_minor_block_by_height(0)
.header.get_hash()
== genesis1.header.get_hash()
)
self.assertTrue(clusters[0].get_shard_state(2 | 1).root_tip == root2.header)
def test_broadcast_cross_shard_transactions(self):
""" Test the cross shard transactions are broadcasted to the destination shards """
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
acc3 = Address.create_random_account(full_shard_key=1)
with ClusterContext(1, acc1) as clusters:
master = clusters[0].master
slaves = clusters[0].slave_list
genesis_token = (
clusters[0].get_shard_state(2 | 0).env.quark_chain_config.genesis_token
)
# Add a root block first so that later minor blocks referring to this root
# can be broadcasted to other shards
root_block = call_async(
master.get_next_block_to_mine(
Address.create_empty_account(), branch_value=None
)
)
call_async(master.add_root_block(root_block))
tx1 = create_transfer_transaction(
shard_state=clusters[0].get_shard_state(2 | 0),
key=id1.get_key(),
from_address=acc1,
to_address=acc3,
value=54321,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
)
self.assertTrue(slaves[0].add_tx(tx1))
b1 = clusters[0].get_shard_state(2 | 0).create_block_to_mine(address=acc1)
b2 = clusters[0].get_shard_state(2 | 0).create_block_to_mine(address=acc1)
b2.header.create_time += 1
self.assertNotEqual(b1.header.get_hash(), b2.header.get_hash())
call_async(clusters[0].get_shard(2 | 0).add_block(b1))
# expect shard 1 got the CrossShardTransactionList of b1
xshard_tx_list = (
clusters[0]
.get_shard_state(2 | 1)
.db.get_minor_block_xshard_tx_list(b1.header.get_hash())
)
self.assertEqual(len(xshard_tx_list.tx_list), 1)
self.assertEqual(xshard_tx_list.tx_list[0].tx_hash, tx1.get_hash())
self.assertEqual(xshard_tx_list.tx_list[0].from_address, acc1)
self.assertEqual(xshard_tx_list.tx_list[0].to_address, acc3)
self.assertEqual(xshard_tx_list.tx_list[0].value, 54321)
call_async(clusters[0].get_shard(2 | 0).add_block(b2))
# b2 doesn't update tip
self.assertEqual(clusters[0].get_shard_state(2 | 0).header_tip, b1.header)
# expect shard 1 got the CrossShardTransactionList of b2
xshard_tx_list = (
clusters[0]
.get_shard_state(2 | 1)
.db.get_minor_block_xshard_tx_list(b2.header.get_hash())
)
self.assertEqual(len(xshard_tx_list.tx_list), 1)
self.assertEqual(xshard_tx_list.tx_list[0].tx_hash, tx1.get_hash())
self.assertEqual(xshard_tx_list.tx_list[0].from_address, acc1)
self.assertEqual(xshard_tx_list.tx_list[0].to_address, acc3)
self.assertEqual(xshard_tx_list.tx_list[0].value, 54321)
b3 = (
clusters[0]
.get_shard_state(2 | 1)
.create_block_to_mine(address=acc1.address_in_shard(1))
)
call_async(master.add_raw_minor_block(b3.header.branch, b3.serialize()))
root_block = call_async(
master.get_next_block_to_mine(address=acc1, branch_value=None)
)
call_async(master.add_root_block(root_block))
# b4 should include the withdraw of tx1
b4 = (
clusters[0]
.get_shard_state(2 | 1)
.create_block_to_mine(address=acc1.address_in_shard(1))
)
# adding b1, b2, b3 again shouldn't affect b4 to be added later
self.assertTrue(
call_async(master.add_raw_minor_block(b1.header.branch, b1.serialize()))
)
self.assertTrue(
call_async(master.add_raw_minor_block(b2.header.branch, b2.serialize()))
)
self.assertTrue(
call_async(master.add_raw_minor_block(b3.header.branch, b3.serialize()))
)
self.assertTrue(
call_async(master.add_raw_minor_block(b4.header.branch, b4.serialize()))
)
self.assertEqual(
call_async(
master.get_primary_account_data(acc3)
).token_balances.balance_map,
{genesis_token: 54321},
)
def test_broadcast_cross_shard_transactions_to_neighbor_only(self):
""" Test the broadcast is only done to the neighbors """
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_key=0)
# create 64 shards so that the neighbor rule can kick in
# explicitly set num_slaves to 4 so that it does not spin up 64 slaves
with ClusterContext(1, acc1, shard_size=64, num_slaves=4) as clusters:
master = clusters[0].master
# Add a root block first so that later minor blocks referring to this root
# can be broadcasted to other shards
root_block = call_async(
master.get_next_block_to_mine(
Address.create_empty_account(), branch_value=None
)
)
call_async(master.add_root_block(root_block))
b1 = clusters[0].get_shard_state(64).create_block_to_mine(address=acc1)
self.assertTrue(
call_async(master.add_raw_minor_block(b1.header.branch, b1.serialize()))
)
neighbor_shards = [2 ** i for i in range(6)]
for shard_id in range(64):
xshard_tx_list = (
clusters[0]
.get_shard_state(64 | shard_id)
.db.get_minor_block_xshard_tx_list(b1.header.get_hash())
)
# Only neighbor should have it
if shard_id in neighbor_shards:
self.assertIsNotNone(xshard_tx_list)
else:
self.assertIsNone(xshard_tx_list)
def test_get_work_from_slave(self):
genesis = Address.create_empty_account(full_shard_key=0)
with ClusterContext(1, genesis, remote_mining=True) as clusters:
slaves = clusters[0].slave_list
# no posw
state = clusters[0].get_shard_state(2 | 0)
branch = state.create_block_to_mine().header.branch
work = call_async(slaves[0].get_work(branch))
self.assertEqual(work.difficulty, 10)
# enable posw, with total stakes cover all the window
state.shard_config.POSW_CONFIG.ENABLED = True
state.shard_config.POSW_CONFIG.TOTAL_STAKE_PER_BLOCK = 500000
work = call_async(slaves[0].get_work(branch))
self.assertEqual(work.difficulty, 0)
| 41.877551
| 95
| 0.587494
|
038319c71ac137b03d1c090207d37505d3559e2d
| 5,611
|
py
|
Python
|
components/dash-table/tests/selenium/test_markdown.py
|
emilhe/dash
|
c313d1b1c5aba054073b4925122accef5642ce5d
|
[
"MIT"
] | 17,143
|
2015-07-14T17:19:05.000Z
|
2022-03-31T10:03:39.000Z
|
components/dash-table/tests/selenium/test_markdown.py
|
sthagen/dash
|
b3918ff798a51462687ff36e9e56c079c9f463cb
|
[
"MIT"
] | 1,630
|
2015-11-17T22:15:41.000Z
|
2022-03-31T09:15:07.000Z
|
components/dash-table/tests/selenium/test_markdown.py
|
sthagen/dash
|
b3918ff798a51462687ff36e9e56c079c9f463cb
|
[
"MIT"
] | 1,970
|
2015-07-12T07:05:14.000Z
|
2022-03-30T19:58:09.000Z
|
import dash
from dash.testing import wait
from utils import get_props, generate_markdown_mock_data
from dash.dash_table import DataTable
import pytest
def get_app(props=dict(), data_fn=generate_markdown_mock_data, assets_folder=None):
app = (
dash.Dash(__name__)
if assets_folder is None
else dash.Dash(__name__, assets_folder=assets_folder)
)
baseProps = get_props(data_fn=data_fn)
baseProps.update(dict(filter_action="native", sort_action="native"))
baseProps.update(props)
app.layout = DataTable(**baseProps)
return app
def test_mark001_header(test):
test.start_server(get_app())
target = test.table("table")
target.column(0).sort(1)
assert (
target.cell(0, "markdown-headers")
.get()
.find_element_by_css_selector(".dash-cell-value > p")
.get_attribute("innerHTML")
== "row 0"
)
target.column(0).sort(1)
assert (
target.cell(0, "markdown-headers")
.get()
.find_element_by_css_selector(".dash-cell-value > h5")
.get_attribute("innerHTML")
== "row 95"
)
def test_mark002_emphasized_text(test):
test.start_server(get_app())
target = test.table("table")
target.column(1).sort(1)
assert (
target.cell(0, "markdown-italics")
.get()
.find_element_by_css_selector(".dash-cell-value > p > em")
.get_attribute("innerHTML")
== "1"
)
target.column(1).sort(1)
assert (
target.cell(0, "markdown-italics")
.get()
.find_element_by_css_selector(".dash-cell-value > p > em")
.get_attribute("innerHTML")
== "98"
)
def test_mark003_link(test):
test.start_server(get_app())
target = test.table("table")
target.column(2).sort(1)
assert (
target.cell(0, "markdown-links")
.get()
.find_element_by_css_selector(".dash-cell-value > p > a")
.get_attribute("innerHTML")
== "Learn about 0"
)
target.column(2).sort(1)
assert (
target.cell(0, "markdown-links")
.get()
.find_element_by_css_selector(".dash-cell-value > p > a")
.get_attribute("innerHTML")
== "Learn about 9"
)
def test_mark004_image(test):
test.start_server(get_app())
target = test.table("table")
target.column(8).sort(1)
assert (
target.cell(0, "markdown-images")
.get()
.find_element_by_css_selector(".dash-cell-value > p > img")
.get_attribute("alt")
== "image 0 alt text"
)
target.column(8).sort(1)
assert (
target.cell(0, "markdown-images")
.get()
.find_element_by_css_selector(".dash-cell-value > p > img")
.get_attribute("alt")
== "image 99 alt text"
)
def test_mark005_table(test):
test.start_server(get_app())
target = test.table("table")
target.column(4).sort(1)
assert (
target.cell(0, "markdown-tables")
.get()
.find_element_by_css_selector(".dash-cell-value > table > tbody > tr > td")
.get_attribute("innerHTML")
== "0"
)
target.column(4).sort(1)
assert (
target.cell(0, "markdown-tables")
.get()
.find_element_by_css_selector(".dash-cell-value > table > tbody > tr > td")
.get_attribute("innerHTML")
== "99"
)
@pytest.mark.parametrize(
"filter",
["Learn about 97", "/wiki/97"],
)
def test_mark006_filter_link_text(test, filter):
test.start_server(get_app())
target = test.table("table")
target.column("markdown-links").filter_value(filter)
assert (
target.cell(0, "markdown-links")
.get()
.find_element_by_css_selector(".dash-cell-value > p > a")
.get_attribute("href")
== "http://en.wikipedia.org/wiki/97"
)
assert not target.cell(1, "markdown-links").exists()
def test_mark007_filter_image_alt_text(test):
test.start_server(get_app())
target = test.table("table")
target.column("markdown-images").filter_value("97")
assert (
target.cell(0, "markdown-images")
.get()
.find_element_by_css_selector(".dash-cell-value > p > img")
.get_attribute("alt")
== "image 97 alt text"
)
assert not target.cell(1, "markdown-images").exists()
def test_mark008_loads_highlightjs(test):
test.start_server(get_app())
target = test.table("table")
wait.until(
lambda: len(
target.cell(0, "markdown-code-blocks")
.get()
.find_elements_by_css_selector("code.language-python")
)
== 1,
3,
)
# table loads a private instance of hljs that isn't shared globally
wait.until(lambda: not test.driver.execute_script("return !!window.hljs"), 3)
assert test.get_log_errors() == []
def test_mark009_loads_custom_highlightjs(test):
test.start_server(get_app(assets_folder="./test_markdown_assets"))
target = test.table("table")
wait.until(
lambda: len(
target.cell(0, "markdown-code-blocks")
.get()
.find_elements_by_css_selector("code.language-python")
)
== 1,
3,
)
wait.until(
lambda: target.cell(0, "markdown-code-blocks")
.get()
.find_element_by_css_selector("code.language-python")
.get_attribute("innerHTML")
== "hljs override",
3,
)
wait.until(lambda: test.driver.execute_script("return !!window.hljs"), 3)
assert test.get_log_errors() == []
| 24.718062
| 83
| 0.602032
|
18e861d37e21e819bc9fae1c260ab097bb77e00d
| 32,602
|
py
|
Python
|
tests/test_cli_main.py
|
cgomesu/streamlink
|
dd63e63c81f0fe3b7acf79ab094ec393b516b981
|
[
"BSD-2-Clause"
] | 4
|
2017-04-30T11:08:08.000Z
|
2022-01-31T04:26:05.000Z
|
tests/test_cli_main.py
|
cgomesu/streamlink
|
dd63e63c81f0fe3b7acf79ab094ec393b516b981
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_cli_main.py
|
cgomesu/streamlink
|
dd63e63c81f0fe3b7acf79ab094ec393b516b981
|
[
"BSD-2-Clause"
] | 3
|
2021-10-10T12:30:16.000Z
|
2022-03-03T12:51:38.000Z
|
import datetime
import os
import sys
import unittest
from pathlib import Path, PosixPath, WindowsPath
from textwrap import dedent
from unittest.mock import Mock, call, patch
import freezegun
import streamlink_cli.main
import tests.resources
from streamlink.session import Streamlink
from streamlink.stream.stream import Stream
from streamlink_cli.compat import DeprecatedPath, is_win32, stdout
from streamlink_cli.main import (
Formatter,
NoPluginError,
check_file_output,
create_output,
format_valid_streams,
handle_stream,
handle_url,
resolve_stream_name,
setup_config_args
)
from streamlink_cli.output import FileOutput, PlayerOutput
from tests.plugin.testplugin import TestPlugin as _TestPlugin
class FakePlugin(_TestPlugin):
module = "fake"
arguments = []
_streams = {}
def streams(self, *args, **kwargs):
return self._streams
def _get_streams(self): # pragma: no cover
pass
class TestCLIMain(unittest.TestCase):
def test_resolve_stream_name(self):
a = Mock()
b = Mock()
c = Mock()
d = Mock()
e = Mock()
streams = {
"160p": a,
"360p": b,
"480p": c,
"720p": d,
"1080p": e,
"worst": b,
"best": d,
"worst-unfiltered": a,
"best-unfiltered": e
}
self.assertEqual(resolve_stream_name(streams, "unknown"), "unknown")
self.assertEqual(resolve_stream_name(streams, "160p"), "160p")
self.assertEqual(resolve_stream_name(streams, "360p"), "360p")
self.assertEqual(resolve_stream_name(streams, "480p"), "480p")
self.assertEqual(resolve_stream_name(streams, "720p"), "720p")
self.assertEqual(resolve_stream_name(streams, "1080p"), "1080p")
self.assertEqual(resolve_stream_name(streams, "worst"), "360p")
self.assertEqual(resolve_stream_name(streams, "best"), "720p")
self.assertEqual(resolve_stream_name(streams, "worst-unfiltered"), "160p")
self.assertEqual(resolve_stream_name(streams, "best-unfiltered"), "1080p")
def test_format_valid_streams(self):
a = Mock()
b = Mock()
c = Mock()
streams = {
"audio": a,
"720p": b,
"1080p": c,
"worst": b,
"best": c
}
self.assertEqual(
format_valid_streams(_TestPlugin, streams),
", ".join([
"audio",
"720p (worst)",
"1080p (best)"
])
)
streams = {
"audio": a,
"720p": b,
"1080p": c,
"worst-unfiltered": b,
"best-unfiltered": c
}
self.assertEqual(
format_valid_streams(_TestPlugin, streams),
", ".join([
"audio",
"720p (worst-unfiltered)",
"1080p (best-unfiltered)"
])
)
class TestCLIMainJsonAndStreamUrl(unittest.TestCase):
@patch("streamlink_cli.main.args", json=True, stream_url=True, subprocess_cmdline=False)
@patch("streamlink_cli.main.console")
def test_handle_stream_with_json_and_stream_url(self, console, args):
stream = Mock()
streams = dict(best=stream)
plugin = FakePlugin("")
plugin._streams = streams
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [call(
stream,
metadata=dict(
id="test-id-1234-5678",
author="Tѥst Āuƭhǿr",
category=None,
title="Test Title"
)
)])
self.assertEqual(console.error.mock_calls, [])
console.msg_json.mock_calls.clear()
args.json = False
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [call(stream.to_url())])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.error.mock_calls, [])
console.msg.mock_calls.clear()
stream.to_url.side_effect = TypeError()
handle_stream(plugin, streams, "best")
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.exit.mock_calls, [call("The stream specified cannot be translated to a URL")])
@patch("streamlink_cli.main.args", json=True, stream_url=True, stream=[], default_stream=[], retry_max=0, retry_streams=0)
@patch("streamlink_cli.main.console")
def test_handle_url_with_json_and_stream_url(self, console, args):
stream = Mock()
streams = dict(worst=Mock(), best=stream)
class _FakePlugin(FakePlugin):
_streams = streams
with patch("streamlink_cli.main.streamlink", resolve_url=Mock(return_value=(_FakePlugin, ""))):
handle_url()
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [call(
plugin="fake",
metadata=dict(
id="test-id-1234-5678",
author="Tѥst Āuƭhǿr",
category=None,
title="Test Title"
),
streams=streams
)])
self.assertEqual(console.error.mock_calls, [])
console.msg_json.mock_calls.clear()
args.json = False
handle_url()
self.assertEqual(console.msg.mock_calls, [call(stream.to_manifest_url())])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.error.mock_calls, [])
console.msg.mock_calls.clear()
stream.to_manifest_url.side_effect = TypeError()
handle_url()
self.assertEqual(console.msg.mock_calls, [])
self.assertEqual(console.msg_json.mock_calls, [])
self.assertEqual(console.exit.mock_calls, [call("The stream specified cannot be translated to a URL")])
console.exit.mock_calls.clear()
class TestCLIMainCheckFileOutput(unittest.TestCase):
@staticmethod
def mock_path(path, is_file=True):
return Mock(
spec=Path(path),
is_file=Mock(return_value=is_file),
__str__=Mock(return_value=path)
)
def test_check_file_output(self):
path = self.mock_path("foo", is_file=False)
output = check_file_output(path, False)
self.assertIsInstance(output, FileOutput)
self.assertIs(output.filename, path)
def test_check_file_output_exists_force(self):
path = self.mock_path("foo", is_file=True)
output = check_file_output(path, True)
self.assertIsInstance(output, FileOutput)
self.assertIs(output.filename, path)
@patch("streamlink_cli.main.console")
@patch("streamlink_cli.main.sys")
def test_check_file_output_exists_ask_yes(self, mock_sys: Mock, mock_console: Mock):
mock_sys.stdin.isatty.return_value = True
mock_console.ask = Mock(return_value="y")
path = self.mock_path("foo", is_file=True)
output = check_file_output(path, False)
self.assertEqual(mock_console.ask.call_args_list, [call("File foo already exists! Overwrite it? [y/N] ")])
self.assertIsInstance(output, FileOutput)
self.assertIs(output.filename, path)
@patch("streamlink_cli.main.console")
@patch("streamlink_cli.main.sys")
def test_check_file_output_exists_ask_no(self, mock_sys: Mock, mock_console: Mock):
mock_sys.stdin.isatty.return_value = True
mock_sys.exit.side_effect = SystemExit
mock_console.ask = Mock(return_value="N")
path = self.mock_path("foo", is_file=True)
with self.assertRaises(SystemExit):
check_file_output(path, False)
self.assertEqual(mock_console.ask.call_args_list, [call("File foo already exists! Overwrite it? [y/N] ")])
@patch("streamlink_cli.main.console")
@patch("streamlink_cli.main.sys")
def test_check_file_output_exists_notty(self, mock_sys: Mock, mock_console: Mock):
mock_sys.stdin.isatty.return_value = False
mock_sys.exit.side_effect = SystemExit
path = self.mock_path("foo", is_file=True)
with self.assertRaises(SystemExit):
check_file_output(path, False)
self.assertEqual(mock_console.ask.call_args_list, [])
class TestCLIMainCreateOutput(unittest.TestCase):
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.console", Mock())
@patch("streamlink_cli.main.DEFAULT_STREAM_METADATA", {"title": "bar"})
def test_create_output_no_file_output_options(self, args: Mock):
formatter = Formatter({
"author": lambda: "foo"
})
args.output = None
args.stdout = None
args.record = None
args.record_and_pipe = None
args.title = None
args.url = "URL"
args.player = "mpv"
args.player_args = ""
output = create_output(formatter)
self.assertIsInstance(output, PlayerOutput)
self.assertEqual(output.title, "URL")
args.title = "{author} - {title}"
output = create_output(formatter)
self.assertIsInstance(output, PlayerOutput)
self.assertEqual(output.title, "foo - bar")
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.check_file_output")
def test_create_output_file_output(self, mock_check_file_output: Mock, args: Mock):
formatter = Formatter({})
mock_check_file_output.side_effect = lambda path, force: FileOutput(path)
args.output = "foo"
args.stdout = None
args.record = None
args.record_and_pipe = None
args.force = False
args.fs_safe_rules = None
output = create_output(formatter)
self.assertEqual(mock_check_file_output.call_args_list, [call(Path("foo"), False)])
self.assertIsInstance(output, FileOutput)
self.assertEqual(output.filename, Path("foo"))
self.assertIsNone(output.fd)
self.assertIsNone(output.record)
@patch("streamlink_cli.main.args")
def test_create_output_stdout(self, args: Mock):
formatter = Formatter({})
args.output = None
args.stdout = True
args.record = None
args.record_and_pipe = None
output = create_output(formatter)
self.assertIsInstance(output, FileOutput)
self.assertIsNone(output.filename)
self.assertIs(output.fd, stdout)
self.assertIsNone(output.record)
args.output = "-"
args.stdout = False
output = create_output(formatter)
self.assertIsInstance(output, FileOutput)
self.assertIsNone(output.filename)
self.assertIs(output.fd, stdout)
self.assertIsNone(output.record)
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.check_file_output")
def test_create_output_record_and_pipe(self, mock_check_file_output: Mock, args: Mock):
formatter = Formatter({})
mock_check_file_output.side_effect = lambda path, force: FileOutput(path)
args.output = None
args.stdout = None
args.record_and_pipe = "foo"
args.force = False
args.fs_safe_rules = None
output = create_output(formatter)
self.assertEqual(mock_check_file_output.call_args_list, [call(Path("foo"), False)])
self.assertIsInstance(output, FileOutput)
self.assertIsNone(output.filename)
self.assertIs(output.fd, stdout)
self.assertIsInstance(output.record, FileOutput)
self.assertEqual(output.record.filename, Path("foo"))
self.assertIsNone(output.record.fd)
self.assertIsNone(output.record.record)
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.check_file_output")
@patch("streamlink_cli.main.DEFAULT_STREAM_METADATA", {"title": "bar"})
def test_create_output_record(self, mock_check_file_output: Mock, args: Mock):
formatter = Formatter({
"author": lambda: "foo"
})
mock_check_file_output.side_effect = lambda path, force: FileOutput(path)
args.output = None
args.stdout = None
args.record = "foo"
args.record_and_pipe = None
args.force = False
args.fs_safe_rules = None
args.title = None
args.url = "URL"
args.player = "mpv"
args.player_args = ""
args.player_fifo = None
args.player_http = None
output = create_output(formatter)
self.assertIsInstance(output, PlayerOutput)
self.assertEqual(output.title, "URL")
self.assertIsInstance(output.record, FileOutput)
self.assertEqual(output.record.filename, Path("foo"))
self.assertIsNone(output.record.fd)
self.assertIsNone(output.record.record)
args.title = "{author} - {title}"
output = create_output(formatter)
self.assertIsInstance(output, PlayerOutput)
self.assertEqual(output.title, "foo - bar")
self.assertIsInstance(output.record, FileOutput)
self.assertEqual(output.record.filename, Path("foo"))
self.assertIsNone(output.record.fd)
self.assertIsNone(output.record.record)
@patch("streamlink_cli.main.args")
@patch("streamlink_cli.main.console")
def test_create_output_record_and_other_file_output(self, console: Mock, args: Mock):
formatter = Formatter({})
args.output = None
args.stdout = True
args.record_and_pipe = True
create_output(formatter)
console.exit.assert_called_with("Cannot use record options with other file output options.")
class TestCLIMainHandleStream(unittest.TestCase):
@patch("streamlink_cli.main.output_stream")
@patch("streamlink_cli.main.args")
def test_handle_stream_output_stream(self, args: Mock, mock_output_stream: Mock):
"""
Test that the formatter does define the correct variables
"""
args.json = False
args.subprocess_cmdline = False
args.stream_url = False
args.output = False
args.stdout = False
args.url = "URL"
args.player_passthrough = []
args.player_external_http = False
args.player_continuous_http = False
mock_output_stream.return_value = True
plugin = _TestPlugin("")
plugin.author = "AUTHOR"
plugin.category = "CATEGORY"
plugin.title = "TITLE"
stream = Stream(session=Mock())
streams = {"best": stream}
handle_stream(plugin, streams, "best")
self.assertEqual(mock_output_stream.call_count, 1)
paramStream, paramFormatter = mock_output_stream.call_args[0]
self.assertIs(paramStream, stream)
self.assertIsInstance(paramFormatter, Formatter)
self.assertEqual(
paramFormatter.title("{url} - {author} - {category}/{game} - {title}"),
"URL - AUTHOR - CATEGORY/CATEGORY - TITLE"
)
@patch("streamlink_cli.main.log")
class TestCLIMainSetupConfigArgs(unittest.TestCase):
configdir = Path(tests.resources.__path__[0], "cli", "config")
parser = Mock()
@classmethod
def subject(cls, config_files, **args):
def resolve_url(name):
if name == "noplugin":
raise NoPluginError()
return Mock(module="testplugin"), name
session = Mock()
session.resolve_url.side_effect = resolve_url
args.setdefault("url", "testplugin")
with patch("streamlink_cli.main.setup_args") as mock_setup_args, \
patch("streamlink_cli.main.args", **args), \
patch("streamlink_cli.main.streamlink", session), \
patch("streamlink_cli.main.CONFIG_FILES", config_files):
setup_config_args(cls.parser)
return mock_setup_args
def test_no_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=None,
url="noplugin"
)
expected = [self.configdir / "primary"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_default_primary(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=None
)
expected = [self.configdir / "primary", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_default_secondary_deprecated(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "non-existent", DeprecatedPath(self.configdir / "secondary")],
config=None
)
expected = [self.configdir / "secondary", self.configdir / "secondary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [
call(f"Loaded config from deprecated path, see CLI docs for how to migrate: {expected[0]}"),
call(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {expected[1]}")
])
def test_custom_with_primary_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "custom")]
)
expected = [self.configdir / "custom", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
def test_custom_with_deprecated_plugin(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "non-existent", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "custom")]
)
expected = [self.configdir / "custom", DeprecatedPath(self.configdir / "secondary.testplugin")]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [
call(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {expected[1]}")
])
def test_custom_multiple(self, mock_log):
mock_setup_args = self.subject(
[self.configdir / "primary", DeprecatedPath(self.configdir / "secondary")],
config=[str(self.configdir / "non-existent"), str(self.configdir / "primary"), str(self.configdir / "secondary")]
)
expected = [self.configdir / "secondary", self.configdir / "primary", self.configdir / "primary.testplugin"]
mock_setup_args.assert_called_once_with(self.parser, expected, ignore_unknown=False)
self.assertEqual(mock_log.info.mock_calls, [])
class _TestCLIMainLogging(unittest.TestCase):
@classmethod
def subject(cls, argv):
session = Streamlink()
session.load_plugins(os.path.join(os.path.dirname(__file__), "plugin"))
# stop test execution at the setup_signals() call, as we're not interested in what comes afterwards
class StopTest(Exception):
pass
with patch("streamlink_cli.main.streamlink", session), \
patch("streamlink_cli.main.setup_signals", side_effect=StopTest), \
patch("streamlink_cli.main.CONFIG_FILES", []), \
patch("streamlink_cli.main.setup_streamlink"), \
patch("streamlink_cli.main.setup_plugins"), \
patch("streamlink_cli.main.setup_http_session"), \
patch("streamlink.session.Streamlink.load_builtin_plugins"), \
patch("sys.argv") as mock_argv:
mock_argv.__getitem__.side_effect = lambda x: argv[x]
try:
streamlink_cli.main.main()
except StopTest:
pass
def tearDown(self):
streamlink_cli.main.logger.root.handlers.clear()
# python >=3.7.2: https://bugs.python.org/issue35046
_write_calls = (
([call("[cli][info] foo\n")]
if sys.version_info >= (3, 7, 2)
else [call("[cli][info] foo"), call("\n")])
+ [call("bar\n")]
)
def write_file_and_assert(self, mock_mkdir: Mock, mock_write: Mock, mock_stdout: Mock):
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(mock_mkdir.mock_calls, [call(parents=True, exist_ok=True)])
self.assertEqual(mock_write.mock_calls, self._write_calls)
self.assertFalse(mock_stdout.write.called)
class TestCLIMainLogging(_TestCLIMainLogging):
@unittest.skipIf(is_win32, "test only applicable on a POSIX OS")
@patch("streamlink_cli.main.log")
@patch("streamlink_cli.main.os.geteuid", Mock(return_value=0))
def test_log_root_warning(self, mock_log):
self.subject(["streamlink"])
self.assertEqual(mock_log.info.mock_calls, [call("streamlink is running as root! Be careful!")])
@patch("streamlink_cli.main.log")
@patch("streamlink_cli.main.streamlink_version", "streamlink")
@patch("streamlink_cli.main.requests.__version__", "requests")
@patch("streamlink_cli.main.socks_version", "socks")
@patch("streamlink_cli.main.websocket_version", "websocket")
@patch("platform.python_version", Mock(return_value="python"))
def test_log_current_versions(self, mock_log):
self.subject(["streamlink", "--loglevel", "info"])
self.assertEqual(mock_log.debug.mock_calls, [], "Doesn't log anything if not debug logging")
with patch("sys.platform", "linux"), \
patch("platform.platform", Mock(return_value="linux")):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: linux"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
with patch("sys.platform", "darwin"), \
patch("platform.mac_ver", Mock(return_value=["0.0.0"])):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: macOS 0.0.0"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
with patch("sys.platform", "win32"), \
patch("platform.system", Mock(return_value="Windows")), \
patch("platform.release", Mock(return_value="0.0.0")):
self.subject(["streamlink", "--loglevel", "debug"])
self.assertEqual(
mock_log.debug.mock_calls[:4],
[
call("OS: Windows 0.0.0"),
call("Python: python"),
call("Streamlink: streamlink"),
call("Requests(requests), Socks(socks), Websocket(websocket)")
]
)
mock_log.debug.reset_mock()
@patch("streamlink_cli.main.log")
def test_log_current_arguments(self, mock_log):
self.subject([
"streamlink",
"--loglevel", "info"
])
self.assertEqual(mock_log.debug.mock_calls, [], "Doesn't log anything if not debug logging")
self.subject([
"streamlink",
"--loglevel", "debug",
"-p", "custom",
"--testplugin-bool",
"--testplugin-password=secret",
"test.se/channel",
"best,worst"
])
self.assertEqual(
mock_log.debug.mock_calls[-7:],
[
call("Arguments:"),
call(" url=test.se/channel"),
call(" stream=['best', 'worst']"),
call(" --loglevel=debug"),
call(" --player=custom"),
call(" --testplugin-bool=True"),
call(" --testplugin-password=********")
]
)
class TestCLIMainLoggingLogfile(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
def test_logfile_no_logfile(self, mock_open, mock_stdout):
self.subject(["streamlink"])
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(streamlink_cli.main.console.output, sys.stdout)
self.assertFalse(mock_open.called)
self.assertEqual(mock_stdout.write.mock_calls, self._write_calls)
@patch("sys.stdout")
@patch("builtins.open")
def test_logfile_loglevel_none(self, mock_open, mock_stdout):
self.subject(["streamlink", "--loglevel", "none", "--logfile", "foo"])
streamlink_cli.main.log.info("foo")
streamlink_cli.main.console.msg("bar")
self.assertEqual(streamlink_cli.main.console.output, sys.stdout)
self.assertFalse(mock_open.called)
self.assertEqual(mock_stdout.write.mock_calls, [call("bar\n")])
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_relative(self, mock_open, mock_stdout):
path = Path("foo").resolve()
self.subject(["streamlink", "--logfile", "foo"])
self.write_file_and_assert(
mock_mkdir=path.mkdir,
mock_write=mock_open(str(path), "a").write,
mock_stdout=mock_stdout
)
@unittest.skipIf(is_win32, "test only applicable on a POSIX OS")
class TestCLIMainLoggingLogfilePosix(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_absolute(self, mock_open, mock_stdout):
self.subject(["streamlink", "--logfile", "/foo/bar"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_expanduser(self, mock_open, mock_stdout):
with patch.dict(os.environ, {"HOME": "/foo"}):
self.subject(["streamlink", "--logfile", "~/bar"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
@freezegun.freeze_time(datetime.datetime(2000, 1, 2, 3, 4, 5))
def test_logfile_path_auto(self, mock_open, mock_stdout):
with patch("streamlink_cli.constants.LOG_DIR", PosixPath("/foo")):
self.subject(["streamlink", "--logfile", "-"])
self.write_file_and_assert(
mock_mkdir=PosixPath("/foo").mkdir,
mock_write=mock_open("/foo/2000-01-02_03-04-05.log", "a").write,
mock_stdout=mock_stdout
)
@unittest.skipIf(not is_win32, "test only applicable on Windows")
class TestCLIMainLoggingLogfileWindows(_TestCLIMainLogging):
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_absolute(self, mock_open, mock_stdout):
self.subject(["streamlink", "--logfile", "C:\\foo\\bar"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
def test_logfile_path_expanduser(self, mock_open, mock_stdout):
with patch.dict(os.environ, {"USERPROFILE": "C:\\foo"}):
self.subject(["streamlink", "--logfile", "~\\bar"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\bar", "a").write,
mock_stdout=mock_stdout
)
@patch("sys.stdout")
@patch("builtins.open")
@patch("pathlib.Path.mkdir", Mock())
@freezegun.freeze_time(datetime.datetime(2000, 1, 2, 3, 4, 5))
def test_logfile_path_auto(self, mock_open, mock_stdout):
with patch("streamlink_cli.constants.LOG_DIR", WindowsPath("C:\\foo")):
self.subject(["streamlink", "--logfile", "-"])
self.write_file_and_assert(
mock_mkdir=WindowsPath("C:\\foo").mkdir,
mock_write=mock_open("C:\\foo\\2000-01-02_03-04-05.log", "a").write,
mock_stdout=mock_stdout
)
class TestCLIMainPrint(unittest.TestCase):
def subject(self):
with patch.object(Streamlink, "load_builtin_plugins"), \
patch.object(Streamlink, "resolve_url") as mock_resolve_url, \
patch.object(Streamlink, "resolve_url_no_redirect") as mock_resolve_url_no_redirect:
session = Streamlink()
session.load_plugins(os.path.join(os.path.dirname(__file__), "plugin"))
with patch("streamlink_cli.main.streamlink", session), \
patch("streamlink_cli.main.CONFIG_FILES", []), \
patch("streamlink_cli.main.setup_streamlink"), \
patch("streamlink_cli.main.setup_plugins"), \
patch("streamlink_cli.main.setup_http_session"), \
patch("streamlink_cli.main.setup_signals"), \
patch("streamlink_cli.main.setup_options") as mock_setup_options:
with self.assertRaises(SystemExit) as cm:
streamlink_cli.main.main()
self.assertEqual(cm.exception.code, 0)
mock_resolve_url.assert_not_called()
mock_resolve_url_no_redirect.assert_not_called()
mock_setup_options.assert_not_called()
@staticmethod
def get_stdout(mock_stdout):
return "".join([call_arg[0][0] for call_arg in mock_stdout.write.call_args_list])
@patch("sys.stdout")
@patch("sys.argv", ["streamlink"])
def test_print_usage(self, mock_stdout):
self.subject()
self.assertEqual(
self.get_stdout(mock_stdout),
"usage: streamlink [OPTIONS] <URL> [STREAM]\n\n"
+ "Use -h/--help to see the available options or read the manual at https://streamlink.github.io\n"
)
@patch("sys.stdout")
@patch("sys.argv", ["streamlink", "--help"])
def test_print_help(self, mock_stdout):
self.subject()
output = self.get_stdout(mock_stdout)
self.assertIn(
"usage: streamlink [OPTIONS] <URL> [STREAM]",
output
)
self.assertIn(
dedent("""
Streamlink is a command-line utility that extracts streams from various
services and pipes them into a video player of choice.
"""),
output
)
self.assertIn(
dedent("""
For more in-depth documentation see:
https://streamlink.github.io
Please report broken plugins or bugs to the issue tracker on Github:
https://github.com/streamlink/streamlink/issues
"""),
output
)
@patch("sys.stdout")
@patch("sys.argv", ["streamlink", "--plugins"])
def test_print_plugins(self, mock_stdout):
self.subject()
self.assertEqual(self.get_stdout(mock_stdout), "Loaded plugins: testplugin\n")
@patch("sys.stdout")
@patch("sys.argv", ["streamlink", "--plugins", "--json"])
def test_print_plugins_json(self, mock_stdout):
self.subject()
self.assertEqual(self.get_stdout(mock_stdout), """[\n "testplugin"\n]\n""")
| 39.807082
| 126
| 0.620913
|
53d680752f6a5cb84d5a1f1a7365d8dea81c4947
| 2,212
|
py
|
Python
|
pyEX/tests/test_alternative.py
|
briangu/pyEX
|
2eacc322932f4b686817b3d162cb1e4f399fd696
|
[
"Apache-2.0"
] | null | null | null |
pyEX/tests/test_alternative.py
|
briangu/pyEX
|
2eacc322932f4b686817b3d162cb1e4f399fd696
|
[
"Apache-2.0"
] | null | null | null |
pyEX/tests/test_alternative.py
|
briangu/pyEX
|
2eacc322932f4b686817b3d162cb1e4f399fd696
|
[
"Apache-2.0"
] | null | null | null |
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the jupyterlab_templates library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
# for Coverage
import time
from mock import MagicMock, patch
SYMBOL = "aapl"
class TestAlternative:
def teardown(self):
time.sleep(0.1) # prevent being blocked
def test_crypto(self):
from pyEX import crypto
with patch("requests.get") as mock, patch("pickle.dump"):
mock.return_value = MagicMock()
mock.return_value.status_code = 200
crypto("test")
def test_cryptoDF(self):
from pyEX import cryptoDF
with patch("requests.get") as mock, patch("pickle.dump"):
mock.return_value = MagicMock()
mock.return_value.status_code = 200
mock.return_value.json = MagicMock(return_value=[])
cryptoDF("test")
def test_sentiment(self):
from pyEX import sentiment
with patch("requests.get") as mock, patch("pickle.dump"):
mock.return_value = MagicMock()
mock.return_value.status_code = 200
sentiment("test")
def test_sentimentDF(self):
from pyEX import sentimentDF
with patch("requests.get") as mock, patch("pickle.dump"):
mock.return_value = MagicMock()
mock.return_value.status_code = 200
mock.return_value.json = MagicMock(return_value=[])
sentimentDF("test")
def test_ceoComp(self):
from pyEX import ceoCompensation
with patch("requests.get") as mock, patch("pickle.dump"):
mock.return_value = MagicMock()
mock.return_value.status_code = 200
ceoCompensation("test")
def test_ceoCompDF(self):
from pyEX import ceoCompensationDF
with patch("requests.get") as mock, patch("pickle.dump"):
mock.return_value = MagicMock()
mock.return_value.status_code = 200
mock.return_value.json = MagicMock(return_value=[])
ceoCompensationDF("test")
| 31.15493
| 87
| 0.607143
|
0503261b0a9299bf73042392cf49dce0d2c9b5aa
| 2,865
|
py
|
Python
|
tests/test_double_headers.py
|
papapumpnz/slowapi-1
|
8cf5292c8d32e835bcde3998e8f3b23763709974
|
[
"MIT"
] | null | null | null |
tests/test_double_headers.py
|
papapumpnz/slowapi-1
|
8cf5292c8d32e835bcde3998e8f3b23763709974
|
[
"MIT"
] | null | null | null |
tests/test_double_headers.py
|
papapumpnz/slowapi-1
|
8cf5292c8d32e835bcde3998e8f3b23763709974
|
[
"MIT"
] | null | null | null |
import hiro # type: ignore
import pytest # type: ignore
from starlette.requests import Request
from starlette.responses import PlainTextResponse, Response
from starlette.testclient import TestClient
from slowapi.util import get_ipaddr
from tests import TestSlowapi
def get_user_rate_limit() -> str:
return "1/10 seconds"
def default_identifier(request: Request) -> str: #SNM
"""
Returns a known api key contained in a request, else returns either client ip or local host
:param request: [required] FastAPI request object
:return str: the known api_key contained in a request, or client ip or local host
"""
identifier = None
try:
identifier = request.state.user['client_id']
except AttributeError as e:
identifier = request.client.host or "127.0.0.1"
return identifier
class TestHeaders(TestSlowapi):
def test_double_header(self): #SNM
app, limiter = self.build_fastapi_app(key_func=default_identifier,
headers_enabled=True,
in_memory_fallback_enabled=True,
swallow_errors=True)
@app.get("/t1")
@limiter.limit(get_user_rate_limit)
async def t1(request: Request,
response: Response,
):
return PlainTextResponse("test")
client = TestClient(app)
response = client.get("/t1")
assert response.status_code == 200
# assert x_ratelimit_limit header is a single int
x_ratelimit_limit = response.headers['x-ratelimit-limit']
x_ratelimit_limit_arr = x_ratelimit_limit.split(',')
assert len(x_ratelimit_limit_arr) >=1
assert response.headers['x-ratelimit-limit']
assert response.headers['x-ratelimit-remaining']
assert response.headers['x-ratelimit-reset']
def test_single_header(self): #SNM
app, limiter = self.build_fastapi_app(key_func=default_identifier,
headers_enabled=True,
in_memory_fallback_enabled=True,
swallow_errors=True)
@app.get("/t2")
@limiter.limit("1/10 seconds")
async def t2(request: Request,
response: Response,
):
return PlainTextResponse("test")
client = TestClient(app)
response = client.get("/t2")
assert response.status_code == 200
# assert x_ratelimit_limit header is a single int
x_ratelimit_limit = response.headers['x-ratelimit-limit']
x_ratelimit_limit_arr = x_ratelimit_limit.split(',')
assert len(x_ratelimit_limit_arr) <=1
# assert rate limit headers
assert response.headers['x-ratelimit-limit']
assert response.headers['x-ratelimit-remaining']
assert response.headers['x-ratelimit-reset']
| 32.191011
| 95
| 0.644677
|
ce06e89d4886a3547dd3ba54eace2fc3d02e8598
| 8,387
|
py
|
Python
|
example/ssd/symbol/symbol_vgg16_reduced.py
|
Abusnina/mxnet
|
7f8d94a24bf64fe0f24712a7952a09725c2df9bd
|
[
"Apache-2.0"
] | 6
|
2017-06-09T02:32:10.000Z
|
2020-03-18T03:17:00.000Z
|
example/ssd/symbol/symbol_vgg16_reduced.py
|
dmmiller612/mxnet
|
3f410c23cb02df64625d7c8f9f299b580236f6a5
|
[
"Apache-2.0"
] | null | null | null |
example/ssd/symbol/symbol_vgg16_reduced.py
|
dmmiller612/mxnet
|
3f410c23cb02df64625d7c8f9f299b580236f6a5
|
[
"Apache-2.0"
] | 6
|
2017-06-27T06:52:40.000Z
|
2019-11-04T14:34:25.000Z
|
import mxnet as mx
from common import conv_act_layer
from common import multibox_layer
def get_symbol_train(num_classes=20):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is a training network with losses
Parameters:
----------
num_classes: int
number of object classes not including background
Returns:
----------
mx.Symbol
"""
data = mx.symbol.Variable(name="data")
label = mx.symbol.Variable(name="label")
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), \
pooling_convention="full", name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
pool5 = mx.symbol.Pooling(
data=relu5_3, pool_type="max", kernel=(3, 3), stride=(1, 1),
pad=(1,1), name="pool5")
# group 6
conv6 = mx.symbol.Convolution(
data=pool5, kernel=(3, 3), pad=(6, 6), dilate=(6, 6),
num_filter=1024, name="conv6")
relu6 = mx.symbol.Activation(data=conv6, act_type="relu", name="relu6")
# drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
conv7 = mx.symbol.Convolution(
data=relu6, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="conv7")
relu7 = mx.symbol.Activation(data=conv7, act_type="relu", name="relu7")
# drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
### ssd extra layers ###
conv8_1, relu8_1 = conv_act_layer(relu7, "8_1", 256, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv8_2, relu8_2 = conv_act_layer(relu8_1, "8_2", 512, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv9_1, relu9_1 = conv_act_layer(relu8_2, "9_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv9_2, relu9_2 = conv_act_layer(relu9_1, "9_2", 256, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv10_1, relu10_1 = conv_act_layer(relu9_2, "10_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv10_2, relu10_2 = conv_act_layer(relu10_1, "10_2", 256, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
# global Pooling
pool10 = mx.symbol.Pooling(data=relu10_2, pool_type="avg",
global_pool=True, kernel=(1,1), name='pool10')
# specific parameters for VGG16 network
from_layers = [relu4_3, relu7, relu8_2, relu9_2, relu10_2, pool10]
sizes = [[.1], [.2,.276], [.38, .461], [.56, .644], [.74, .825], [.92, 1.01]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5,3,1./3], [1,2,.5,3,1./3]]
normalizations = [20, -1, -1, -1, -1, -1]
num_channels = [512]
loc_preds, cls_preds, anchor_boxes = multibox_layer(from_layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_channels, clip=True, interm_layer=0)
tmp = mx.symbol.MultiBoxTarget(
*[anchor_boxes, label, cls_preds], overlap_threshold=.5, \
ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \
negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),
name="multibox_target")
loc_target = tmp[0]
loc_target_mask = tmp[1]
cls_target = tmp[2]
cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \
ignore_label=-1, use_ignore=True, grad_scale=3., multi_output=True, \
normalization='valid', name="cls_prob")
loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \
data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)
loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \
normalization='valid', name="loc_loss")
# monitoring training status
cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label")
# group output
out = mx.symbol.Group([cls_prob, loc_loss, cls_label])
return out
def get_symbol(num_classes=20, nms_thresh=0.5, force_suppress=True):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
Returns:
----------
mx.Symbol
"""
net = get_symbol_train(num_classes)
# print net.get_internals().list_outputs()
cls_preds = net.get_internals()["multibox_cls_pred_output"]
loc_preds = net.get_internals()["multibox_loc_pred_output"]
anchor_boxes = net.get_internals()["multibox_anchors_output"]
cls_prob = mx.symbol.SoftmaxActivation(data=cls_preds, mode='channel', \
name='cls_prob')
# group output
# out = mx.symbol.Group([loc_preds, cls_preds, anchor_boxes])
out = mx.symbol.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2))
return out
| 47.384181
| 89
| 0.650888
|
c6bda0d2036532c262bb3f4057718ae41c059d26
| 2,121
|
py
|
Python
|
cosmosis/datablock/generate_sections.py
|
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
|
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
|
[
"BSD-2-Clause"
] | 1
|
2021-09-15T10:10:26.000Z
|
2021-09-15T10:10:26.000Z
|
cosmosis/datablock/generate_sections.py
|
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
|
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
|
[
"BSD-2-Clause"
] | null | null | null |
cosmosis/datablock/generate_sections.py
|
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
|
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
|
[
"BSD-2-Clause"
] | 1
|
2021-06-11T15:29:43.000Z
|
2021-06-11T15:29:43.000Z
|
from __future__ import print_function
import sys
fortran_template = """
! This module is auto-generated from the file cosmosis/datablock/section_names.txt.
! Edit that and then re-make to add your own pre-defined section names.
module cosmosis_section_names
implicit none
{0}
end module
"""
c_template = """
// This header file is auto-generated from the file cosmosis/datablock/section_names.txt.
// Edit that and then re-make to add your own pre-defined section names.
{0}
"""
python_template = """
# This module is auto-generated from the file cosmosis/datablock/section_names.txt.
# Edit that and then re-make to add your own pre-defined section names.
{0}
"""
def generate_python(section_names, filename):
sections = "\n".join('{0} = "{0}"'.format(name) for name in section_names)
open(filename,'w').write(python_template.format(sections))
def generate_fortran(section_names, filename):
sections = "\n".join(' character(*), parameter :: {0}_section = "{0}"'.format(name) for name in section_names)
open(filename,'w').write(fortran_template.format(sections))
def generate_c(section_names, filename):
sections = "\n".join('#define {0}_SECTION "{1}"'.format(name.upper(),name) for name in section_names)
open(filename,'w').write(c_template.format(sections))
def generate(section_list_filename, language, output_filename):
section_names = []
for line in open(section_list_filename):
line=line.strip()
if line.startswith('#') or not line:
continue
line=line.split('#')[0].strip()
section_names.append(line)
if language == "c":
generate_c(section_names, output_filename)
elif language == "python":
generate_python(section_names, output_filename)
elif language == "fortran":
generate_fortran(section_names, output_filename)
else:
print("Unknown language specified: %s" % language)
sys.exit(2)
if __name__ == '__main__':
# Required syntax is:
# $ python generate_sections.py <template-file> <target-language> <output-file>
#
if len(sys.argv) != 4:
print("%s requires 4 arguments" % sys.argv[0])
sys.exit(1)
generate(sys.argv[1], sys.argv[2], sys.argv[3])
| 29.054795
| 114
| 0.72843
|
77e5e28957587a8bf66ee5405bac96d7321ab040
| 4,576
|
py
|
Python
|
Lib/test/test_dictcomps.py
|
mainsail-org/RustPython
|
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
|
[
"CC-BY-4.0",
"MIT"
] | 11,058
|
2018-05-29T07:40:06.000Z
|
2022-03-31T11:38:42.000Z
|
Lib/test/test_dictcomps.py
|
mainsail-org/RustPython
|
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
|
[
"CC-BY-4.0",
"MIT"
] | 2,105
|
2018-06-01T10:07:16.000Z
|
2022-03-31T14:56:42.000Z
|
Lib/test/test_dictcomps.py
|
mainsail-org/RustPython
|
5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127
|
[
"CC-BY-4.0",
"MIT"
] | 914
|
2018-07-27T09:36:14.000Z
|
2022-03-31T19:56:34.000Z
|
import unittest
# For scope testing.
g = "Global variable"
class DictComprehensionTest(unittest.TestCase):
def test_basics(self):
expected = {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17,
8: 18, 9: 19}
actual = {k: k + 10 for k in range(10)}
self.assertEqual(actual, expected)
expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
actual = {k: v for k in range(10) for v in range(10) if k == v}
self.assertEqual(actual, expected)
def test_scope_isolation(self):
k = "Local Variable"
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {k: None for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(k, "Local Variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {k: v for v in range(10) for k in range(v * 9, v * 10)}
self.assertEqual(k, "Local Variable")
self.assertEqual(actual, expected)
def test_scope_isolation_from_global(self):
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {g: None for g in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(g, "Global variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {g: v for v in range(10) for g in range(v * 9, v * 10)}
self.assertEqual(g, "Global variable")
self.assertEqual(actual, expected)
def test_global_visibility(self):
expected = {0: 'Global variable', 1: 'Global variable',
2: 'Global variable', 3: 'Global variable',
4: 'Global variable', 5: 'Global variable',
6: 'Global variable', 7: 'Global variable',
8: 'Global variable', 9: 'Global variable'}
actual = {k: g for k in range(10)}
self.assertEqual(actual, expected)
def test_local_visibility(self):
v = "Local variable"
expected = {0: 'Local variable', 1: 'Local variable',
2: 'Local variable', 3: 'Local variable',
4: 'Local variable', 5: 'Local variable',
6: 'Local variable', 7: 'Local variable',
8: 'Local variable', 9: 'Local variable'}
actual = {k: v for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(v, "Local variable")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_illegal_assignment(self):
with self.assertRaisesRegex(SyntaxError, "cannot assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
"exec")
with self.assertRaisesRegex(SyntaxError, "cannot assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
"exec")
def test_evaluation_order(self):
expected = {
'H': 'W',
'e': 'o',
'l': 'l',
'o': 'd',
}
expected_calls = [
('key', 'H'), ('value', 'W'),
('key', 'e'), ('value', 'o'),
('key', 'l'), ('value', 'r'),
('key', 'l'), ('value', 'l'),
('key', 'o'), ('value', 'd'),
]
actual_calls = []
def add_call(pos, value):
actual_calls.append((pos, value))
return value
actual = {
add_call('key', k): add_call('value', v)
for k, v in zip('Hello', 'World')
}
self.assertEqual(actual, expected)
self.assertEqual(actual_calls, expected_calls)
if __name__ == "__main__":
unittest.main()
| 38.779661
| 79
| 0.476399
|
aa587db0b8353d794b61c2fa0cdf9cffbf74bfdb
| 752
|
py
|
Python
|
wesay/sikuli/examples/test_and_log/yattag_templates/thing_yattag.py
|
rrmhearts/sikuli
|
63151fe306a0a86d24b5f4c1845ccbeee5c53939
|
[
"MIT"
] | null | null | null |
wesay/sikuli/examples/test_and_log/yattag_templates/thing_yattag.py
|
rrmhearts/sikuli
|
63151fe306a0a86d24b5f4c1845ccbeee5c53939
|
[
"MIT"
] | null | null | null |
wesay/sikuli/examples/test_and_log/yattag_templates/thing_yattag.py
|
rrmhearts/sikuli
|
63151fe306a0a86d24b5f4c1845ccbeee5c53939
|
[
"MIT"
] | null | null | null |
from __future__ import with_statement
from sikuli import *
from yattag import Doc
import type0, type1, type2
def template(failed_test_data):
doc, tag, text = Doc().tagtext()
doc.asis('<!DOCTYPE html>')
with tag("html", lang="en_us"):
with tag("head"):
with tag("title"):
text("Test page")
with tag("body"):
with tag("ul"):
for fail in failed_test_data:
if fail.type == 0:
doc.asis(type0.template(fail))
elif fail.type == 1:
doc.asis(type1.template(fail))
else:
doc.asis(type2.template(fail))
return doc
| 28.923077
| 55
| 0.484043
|
9df43967d48e51ba913c7247f9c1fc084de44efb
| 38,866
|
py
|
Python
|
src/sage/combinat/ribbon_tableau.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | 1
|
2021-10-18T01:24:04.000Z
|
2021-10-18T01:24:04.000Z
|
src/sage/combinat/ribbon_tableau.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/ribbon_tableau.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | null | null | null |
r"""
Ribbon Tableaux
"""
# ****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import division, print_function, absolute_import
from sage.structure.parent import Parent
from sage.structure.element import parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.categories.sets_cat import Sets
from sage.rings.all import QQ, ZZ
from sage.rings.integer import Integer
from sage.combinat.combinat import CombinatorialElement
from sage.combinat.skew_partition import SkewPartition, SkewPartitions
from sage.combinat.skew_tableau import SkewTableau, SkewTableaux, SemistandardSkewTableaux
from sage.combinat.tableau import Tableaux
from sage.combinat.partition import Partition, _Partitions
from . import permutation
import functools
from sage.combinat.permutation import to_standard
class RibbonTableau(SkewTableau):
r"""
A ribbon tableau.
A ribbon is a connected skew shape which does not contain any
`2 \times 2` boxes. A ribbon tableau is a skew tableau
whose shape is partitioned into ribbons, each of which is filled
with identical entries.
EXAMPLES::
sage: rt = RibbonTableau([[None, 1],[2,3]]); rt
[[None, 1], [2, 3]]
sage: rt.inner_shape()
[1]
sage: rt.outer_shape()
[2, 2]
sage: rt = RibbonTableau([[None, None, 0, 0, 0], [None, 0, 0, 2], [1, 0, 1]]); rt.pp()
. . 0 0 0
. 0 0 2
1 0 1
In the previous example, each ribbon is uniquely determined by a
non-zero entry. The 0 entries are used to fill in the rest of the
skew shape.
.. NOTE::
Sanity checks are not performed; lists can contain any object.
::
sage: RibbonTableau(expr=[[1,1],[[5],[3,4],[1,2]]])
[[None, 1, 2], [None, 3, 4], [5]]
TESTS::
sage: RibbonTableau([[0, 0, 3, 0], [1, 1, 0], [2, 0, 4]]).evaluation()
[2, 1, 1, 1]
"""
#The following method is private and will only get called
#when calling RibbonTableau() directly, and not via element_class
@staticmethod
def __classcall_private__(cls, rt=None, expr=None):
"""
Return a ribbon tableau object.
EXAMPLES::
sage: rt = RibbonTableau([[None, 1],[2,3]]); rt
[[None, 1], [2, 3]]
sage: TestSuite(rt).run()
"""
if expr is not None:
return RibbonTableaux().from_expr(expr)
try:
rt = [tuple(row) for row in rt]
except TypeError:
raise TypeError("each element of the ribbon tableau must be an iterable")
if not all(row for row in rt):
raise TypeError("a ribbon tableau cannot have empty rows")
#calls the inherited __init__ method (of SkewTableau )
return RibbonTableaux()(rt)
def length(self):
"""
Return the length of the ribbons into a ribbon tableau.
EXAMPLES::
sage: RibbonTableau([[None, 1],[2,3]]).length()
1
sage: RibbonTableau([[1,0],[2,0]]).length()
2
"""
if self.to_expr() == [[], []]:
return 0
tableau = self.to_expr()[1]
l = 0
t = 0
for k in range(len(tableau)):
t += len([x for x in tableau[k] if x is not None and x > -1])
l += len([x for x in tableau[k] if x is not None and x > 0])
if l == 0:
return t
else:
return t // l
def to_word(self):
"""
Return a word obtained from a row reading of ``self``.
.. WARNING::
Unlike the ``to_word`` method on skew tableaux (which are a
superclass of this), this method does not filter out
``None`` entries.
EXAMPLES::
sage: R = RibbonTableau([[0, 0, 3, 0], [1, 1, 0], [2, 0, 4]])
sage: R.to_word()
word: 2041100030
"""
from sage.combinat.words.word import Word
return Word([letter for row in reversed(self) for letter in row])
#####################
# Ribbon Tableaux #
#####################
class RibbonTableaux(UniqueRepresentation, Parent):
r"""
Ribbon tableaux.
A ribbon tableau is a skew tableau whose skew shape ``shape`` is
tiled by ribbons of length ``length``. The weight ``weight`` is
calculated from the labels on the ribbons.
.. NOTE::
Here we impose the condition that the ribbon tableaux are semistandard.
INPUT(Optional):
- ``shape`` -- skew shape as a list of lists or an object of type
SkewPartition
- ``length`` -- integer, ``shape`` is partitioned into ribbons of
length ``length``
- ``weight`` -- list of integers, computed from the values of
non-zero entries labeling the ribbons
EXAMPLES::
sage: RibbonTableaux([[2,1],[]], [1,1,1], 1)
Ribbon tableaux of shape [2, 1] / [] and weight [1, 1, 1] with 1-ribbons
sage: R = RibbonTableaux([[5,4,3],[2,1]], [2,1], 3)
sage: for i in R: i.pp(); print("\n")
. . 0 0 0
. 0 0 2
1 0 1
<BLANKLINE>
. . 1 0 0
. 0 0 0
1 0 2
<BLANKLINE>
. . 0 0 0
. 1 0 1
2 0 0
<BLANKLINE>
REFERENCES:
.. [vanLeeuwen91] Marc. A. A. van Leeuwen, *Edge sequences, ribbon tableaux,
and an action of affine permutations*. Europe J. Combinatorics. **20**
(1999). http://wwwmathlabo.univ-poitiers.fr/~maavl/pdf/edgeseqs.pdf
"""
@staticmethod
def __classcall_private__(cls, shape=None, weight=None, length=None):
"""
Return the correct parent object.
EXAMPLES::
sage: R = RibbonTableaux([[2,1],[]],[1,1,1],1)
sage: R2 = RibbonTableaux(SkewPartition([[2,1],[]]),(1,1,1),1)
sage: R is R2
True
"""
if shape is None and weight is None and length is None:
return super(RibbonTableaux, cls).__classcall__(cls)
return RibbonTableaux_shape_weight_length(shape, weight, length)
def __init__(self):
"""
EXAMPLES::
sage: R = RibbonTableaux()
sage: TestSuite(R).run()
"""
Parent.__init__(self, category=Sets())
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: RibbonTableaux()
Ribbon tableaux
"""
return "Ribbon tableaux"
def _element_constructor_(self, rt):
"""
Construct an element of ``self`` from ``rt``.
EXAMPLES::
sage: R = RibbonTableaux()
sage: elt = R([[0, 0, 3, 0], [1, 1, 0], [2, 0, 4]]); elt
[[0, 0, 3, 0], [1, 1, 0], [2, 0, 4]]
sage: elt.parent() is R
True
"""
return self.element_class(self, rt)
def from_expr(self, l):
"""
Return a :class:`RibbonTableau` from a MuPAD-Combinat expr for a skew
tableau. The first list in ``expr`` is the inner shape of the skew
tableau. The second list are the entries in the rows of the skew
tableau from bottom to top.
Provided primarily for compatibility with MuPAD-Combinat.
EXAMPLES::
sage: RibbonTableaux().from_expr([[1,1],[[5],[3,4],[1,2]]])
[[None, 1, 2], [None, 3, 4], [5]]
"""
return self.element_class(self, SkewTableaux().from_expr(l))
Element = RibbonTableau
options = Tableaux.options
class RibbonTableaux_shape_weight_length(RibbonTableaux):
"""
Ribbon tableaux of a given shape, weight, and length.
"""
@staticmethod
def __classcall_private__(cls, shape, weight, length):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: R = RibbonTableaux([[2,1],[]],[1,1,1],1)
sage: R2 = RibbonTableaux(SkewPartition([[2,1],[]]),(1,1,1),1)
sage: R is R2
True
"""
if shape in _Partitions:
shape = _Partitions(shape)
shape = SkewPartition([shape, shape.core(length)])
else:
shape = SkewPartition(shape)
if shape.size() != length*sum(weight):
raise ValueError("Incompatible shape and weight")
return super(RibbonTableaux, cls).__classcall__(cls, shape, tuple(weight), length)
def __init__(self, shape, weight, length):
"""
EXAMPLES::
sage: R = RibbonTableaux([[2,1],[]],[1,1,1],1)
sage: TestSuite(R).run()
"""
self._shape = shape
self._weight = weight
self._length = length
Parent.__init__(self, category=FiniteEnumeratedSets())
def __iter__(self):
"""
EXAMPLES::
sage: RibbonTableaux([[2,1],[]],[1,1,1],1).list()
[[[1, 3], [2]], [[1, 2], [3]]]
sage: RibbonTableaux([[2,2],[]],[1,1],2).list()
[[[0, 0], [1, 2]], [[1, 0], [2, 0]]]
"""
for x in graph_implementation_rec(self._shape, self._weight, self._length, list_rec):
yield self.from_expr(x)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: RibbonTableaux([[2,1],[]], [1,1,1], 1)
Ribbon tableaux of shape [2, 1] / [] and weight [1, 1, 1] with 1-ribbons
"""
return "Ribbon tableaux of shape %s and weight %s with %s-ribbons" % (repr(self._shape), list(self._weight), self._length)
def __contains__(self, x):
"""
Note that this just checks to see if ``x`` appears in ``self``.
This should be improved to provide actual checking.
EXAMPLES::
sage: r = RibbonTableaux([[2,2],[]],[1,1],2)
sage: [[0, 0], [1, 2]] in r
True
sage: [[1, 0], [2, 0]] in r
True
sage: [[0, 1], [2, 0]] in r
False
"""
try:
x = RibbonTableau(x)
except (ValueError, TypeError):
return False
return x in self.list()
#return x.is_ribbon() and x.shape() == self._shape \
#and tuple(x.weight()) == self._weight and x in list(self)
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: RibbonTableaux([[2,1],[]],[1,1,1],1).cardinality()
2
sage: RibbonTableaux([[2,2],[]],[1,1],2).cardinality()
2
sage: RibbonTableaux([[4,3,3],[]],[2,1,1,1],2).cardinality()
5
TESTS::
sage: RibbonTableaux([6,6,6], [4,2], 3).cardinality()
6
sage: RibbonTableaux([3,3,3,2,1], [3,1], 3).cardinality()
1
sage: RibbonTableaux([3,3,3,2,1], [2,2], 3).cardinality()
2
sage: RibbonTableaux([3,3,3,2,1], [2,1,1], 3).cardinality()
5
sage: RibbonTableaux([3,3,3,2,1], [1,1,1,1], 3).cardinality()
12
sage: RibbonTableaux([5,4,3,2,1], [2,2,1], 3).cardinality()
10
::
sage: RibbonTableaux([8,7,6,5,1,1], [3,2,2,1], 3).cardinality()
85
sage: RibbonTableaux([5,4,3,2,1,1,1], [2,2,1], 3).cardinality()
10
::
sage: RibbonTableaux([7,7,7,2,1,1], [3,2,0,1,1], 3).cardinality()
25
Weights with some zeros in the middle and end::
sage: RibbonTableaux([3,3,3], [0,1,0,2,0], 3).cardinality()
3
sage: RibbonTableaux([3,3,3], [1,0,1,0,1,0,0,0], 3).cardinality()
6
"""
# Strip zeros for graph_implementation_rec
wt = [i for i in self._weight if i != 0]
return Integer(graph_implementation_rec(self._shape, wt, self._length, count_rec)[0])
def insertion_tableau(skp, perm, evaluation, tableau, length):
"""
INPUT:
- ``skp`` -- skew partitions
- ``perm, evaluation`` -- non-negative integers
- ``tableau`` -- skew tableau
- ``length`` -- integer
TESTS::
sage: from sage.combinat.ribbon_tableau import insertion_tableau
sage: insertion_tableau([[1], []], [1], 1, [[], []], 1)
[[], [[1]]]
sage: insertion_tableau([[2, 1], []], [1, 1], 2, [[], [[1]]], 1)
[[], [[2], [1, 2]]]
sage: insertion_tableau([[2, 1], []], [0, 0], 3, [[], [[2], [1, 2]]], 1)
[[], [[2], [1, 2]]]
sage: insertion_tableau([[1, 1], []], [1], 2, [[], [[1]]], 1)
[[], [[2], [1]]]
sage: insertion_tableau([[2], []], [0, 1], 2, [[], [[1]]], 1)
[[], [[1, 2]]]
sage: insertion_tableau([[2, 1], []], [0, 1], 3, [[], [[2], [1]]], 1)
[[], [[2], [1, 3]]]
sage: insertion_tableau([[1, 1], []], [2], 1, [[], []], 2)
[[], [[1], [0]]]
sage: insertion_tableau([[2], []], [2, 0], 1, [[], []], 2)
[[], [[1, 0]]]
sage: insertion_tableau([[2, 2], []], [0, 2], 2, [[], [[1], [0]]], 2)
[[], [[1, 2], [0, 0]]]
sage: insertion_tableau([[2, 2], []], [2, 0], 2, [[], [[1, 0]]], 2)
[[], [[2, 0], [1, 0]]]
sage: insertion_tableau([[2, 2], [1]], [3, 0], 1, [[], []], 3)
[[1], [[1, 0], [0]]]
"""
psave = Partition(skp[1])
partc = skp[1] + [0]*(len(skp[0])-len(skp[1]))
tableau = SkewTableau(expr=tableau).to_expr()[1]
for k in range(len(tableau)):
tableau[-(k+1)] += [0] * ( skp[0][k] - partc[k] - len(tableau[-(k+1)]))
## We construct a tableau from the southwest corner to the northeast one
tableau = [[0] * (skp[0][k] - partc[k])
for k in reversed(range(len(tableau), len(skp[0])))] + tableau
tableau = SkewTableaux().from_expr([skp[1], tableau]).conjugate()
tableau = tableau.to_expr()[1]
skp = SkewPartition(skp).conjugate().to_list()
skp[1].extend( [0]*(len(skp[0])-len(skp[1])) )
if len(perm) > len(skp[0]):
return None
for k in range(len(perm)):
if perm[ -(k+1) ] !=0:
tableau[len(tableau)-len(perm)+k][ skp[0][len(perm)-(k+1)] - skp[1][ len(perm)-(k+1) ] - 1 ] = evaluation
return SkewTableau(expr=[psave.conjugate(),tableau]).conjugate().to_expr()
def count_rec(nexts, current, part, weight, length):
"""
INPUT:
- ``nexts, current, part`` -- skew partitions
- ``weight`` -- non-negative integer list
- ``length`` -- integer
TESTS::
sage: from sage.combinat.ribbon_tableau import count_rec
sage: count_rec([], [], [[2, 1, 1], []], [2], 2)
[0]
sage: count_rec([[0], [1]], [[[2, 1, 1], [0, 0, 2, 0]], [[4], [2, 0, 0, 0]]], [[4, 1, 1], []], [2, 1], 2)
[1]
sage: count_rec([], [[[], [2, 2]]], [[2, 2], []], [2], 2)
[1]
sage: count_rec([], [[[], [2, 0, 2, 0]]], [[4], []], [2], 2)
[1]
sage: count_rec([[1], [1]], [[[2, 2], [0, 0, 2, 0]], [[4], [2, 0, 0, 0]]], [[4, 2], []], [2, 1], 2)
[2]
sage: count_rec([[1], [1], [2]], [[[2, 2, 2], [0, 0, 2, 0]], [[4, 1, 1], [0, 2, 0, 0]], [[4, 2], [2, 0, 0, 0]]], [[4, 2, 2], []], [2, 1, 1], 2)
[4]
sage: count_rec([[4], [1]], [[[4, 2, 2], [0, 0, 2, 0]], [[4, 3, 1], [0, 2, 0, 0]]], [[4, 3, 3], []], [2, 1, 1, 1], 2)
[5]
"""
if not current:
return [0]
if nexts:
return [sum(j for i in nexts for j in i)]
else:
return [len(current)]
def list_rec(nexts, current, part, weight, length):
"""
INPUT:
- ``nexts, current, part`` -- skew partitions
- ``weight`` -- non-negative integer list
- ``length`` -- integer
TESTS::
sage: from sage.combinat.ribbon_tableau import list_rec
sage: list_rec([], [[[], [1]]], [[1], []], [1], 1)
[[[], [[1]]]]
sage: list_rec([[[[], [[1]]]]], [[[1], [1, 1]]], [[2, 1], []], [1, 2], 1)
[[[], [[2], [1, 2]]]]
sage: list_rec([], [[[1], [3, 0]]], [[2, 2], [1]], [1], 3)
[[[1], [[1, 0], [0]]]]
sage: list_rec([[[[], [[2]]]]], [[[1], [1, 1]]], [[2, 1], []], [0, 1, 2], 1)
[[[], [[3], [2, 3]]]]
sage: list_rec([], [[[], [2]]], [[1, 1], []], [1], 2)
[[[], [[1], [0]]]]
sage: list_rec([], [[[], [2, 0]]], [[2], []], [1], 2)
[[[], [[1, 0]]]]
sage: list_rec([[[[], [[1], [0]]]], [[[], [[1, 0]]]]], [[[1, 1], [0, 2]], [[2], [2, 0]]], [[2, 2], []], [1, 1], 2)
[[[], [[1, 2], [0, 0]]], [[], [[2, 0], [1, 0]]]]
sage: list_rec([], [[[], [2, 2]]], [[2, 2], []], [2], 2)
[[[], [[1, 1], [0, 0]]]]
sage: list_rec([], [[[], [1, 1]]], [[2], []], [2], 1)
[[[], [[1, 1]]]]
sage: list_rec([[[[], [[1, 1]]]]], [[[2], [1, 1]]], [[2, 2], []], [2, 2], 1)
[[[], [[2, 2], [1, 1]]]]
"""
if current == [] and nexts == [] and weight == []:
return [[part[1],[]]]
## Test if the current nodes is not an empty node
if not current:
return []
## Test if the current nodes drive us to new solutions
if nexts:
res = []
for i in range(len(current)):
for j in range(len(nexts[i])):
res.append( insertion_tableau(part, current[i][1], len(weight), nexts[i][j], length) )
return res
else:
## The current nodes are at the bottom of the tree
res = []
for i in range(len(current)):
res.append( insertion_tableau(part, current[i][1], len(weight), [[],[]], length) )
return res
#############################
#Spin and Cospin Polynomials#
#############################
def spin_rec(t, nexts, current, part, weight, length):
"""
Routine used for constructing the spin polynomial.
INPUT:
- ``weight`` -- list of non-negative integers
- ``length`` -- the length of the ribbons we're tiling with
- ``t`` -- the variable
EXAMPLES::
sage: from sage.combinat.ribbon_tableau import spin_rec
sage: sp = SkewPartition
sage: t = ZZ['t'].gen()
sage: spin_rec(t, [], [[[], [3, 3]]], sp([[2, 2, 2], []]), [2], 3)
[t^4]
sage: spin_rec(t, [[0], [t^4]], [[[2, 1, 1, 1, 1], [0, 3]], [[2, 2, 2], [3, 0]]], sp([[2, 2, 2, 2, 1], []]), [2, 1], 3)
[t^5]
sage: spin_rec(t, [], [[[], [3, 3, 0]]], sp([[3, 3], []]), [2], 3)
[t^2]
sage: spin_rec(t, [[t^4], [t^3], [t^2]], [[[2, 2, 2], [0, 0, 3]], [[3, 2, 1], [0, 3, 0]], [[3, 3], [3, 0, 0]]], sp([[3, 3, 3], []]), [2, 1], 3)
[t^6 + t^4 + t^2]
sage: spin_rec(t, [[t^5], [t^4], [t^6 + t^4 + t^2]], [[[2, 2, 2, 2, 1], [0, 0, 3]], [[3, 3, 1, 1, 1], [0, 3, 0]], [[3, 3, 3], [3, 0, 0]]], sp([[3, 3, 3, 2, 1], []]), [2, 1, 1], 3)
[2*t^7 + 2*t^5 + t^3]
"""
if not current:
return [parent(t).zero()]
tmp = []
partp = part[0].conjugate()
ell = len(partp)
#compute the contribution of the ribbons added at
#the current node
for val in current:
perms = val[1]
perm = [partp[i] + ell - (i + 1) - perms[i] for i in reversed(range(ell))]
perm = to_standard(perm)
tmp.append( weight[-1]*(length-1) - perm.number_of_inversions() )
if nexts:
return [ sum(sum(t**tval * nval for nval in nexts[i])
for i, tval in enumerate(tmp)) ]
else:
return [ sum(t**val for val in tmp) ]
def spin_polynomial_square(part, weight, length):
r"""
Returns the spin polynomial associated with ``part``, ``weight``, and
``length``, with the substitution `t \to t^2` made.
EXAMPLES::
sage: from sage.combinat.ribbon_tableau import spin_polynomial_square
sage: spin_polynomial_square([6,6,6],[4,2],3)
t^12 + t^10 + 2*t^8 + t^6 + t^4
sage: spin_polynomial_square([6,6,6],[4,1,1],3)
t^12 + 2*t^10 + 3*t^8 + 2*t^6 + t^4
sage: spin_polynomial_square([3,3,3,2,1], [2,2], 3)
t^7 + t^5
sage: spin_polynomial_square([3,3,3,2,1], [2,1,1], 3)
2*t^7 + 2*t^5 + t^3
sage: spin_polynomial_square([3,3,3,2,1], [1,1,1,1], 3)
3*t^7 + 5*t^5 + 3*t^3 + t
sage: spin_polynomial_square([5,4,3,2,1,1,1], [2,2,1], 3)
2*t^9 + 6*t^7 + 2*t^5
sage: spin_polynomial_square([[6]*6, [3,3]], [4,4,2], 3)
3*t^18 + 5*t^16 + 9*t^14 + 6*t^12 + 3*t^10
"""
R = ZZ['t']
if part in _Partitions:
part = SkewPartition([part,_Partitions([])])
elif part in SkewPartitions():
part = SkewPartition(part)
if part == [[],[]] and weight == []:
return R.one()
t = R.gen()
return R(graph_implementation_rec(part, weight, length, functools.partial(spin_rec,t))[0])
def spin_polynomial(part, weight, length):
"""
Returns the spin polynomial associated to ``part``, ``weight``, and
``length``.
EXAMPLES::
sage: from sage.combinat.ribbon_tableau import spin_polynomial
sage: spin_polynomial([6,6,6],[4,2],3)
t^6 + t^5 + 2*t^4 + t^3 + t^2
sage: spin_polynomial([6,6,6],[4,1,1],3)
t^6 + 2*t^5 + 3*t^4 + 2*t^3 + t^2
sage: spin_polynomial([3,3,3,2,1], [2,2], 3)
t^(7/2) + t^(5/2)
sage: spin_polynomial([3,3,3,2,1], [2,1,1], 3)
2*t^(7/2) + 2*t^(5/2) + t^(3/2)
sage: spin_polynomial([3,3,3,2,1], [1,1,1,1], 3)
3*t^(7/2) + 5*t^(5/2) + 3*t^(3/2) + sqrt(t)
sage: spin_polynomial([5,4,3,2,1,1,1], [2,2,1], 3)
2*t^(9/2) + 6*t^(7/2) + 2*t^(5/2)
sage: spin_polynomial([[6]*6, [3,3]], [4,4,2], 3)
3*t^9 + 5*t^8 + 9*t^7 + 6*t^6 + 3*t^5
"""
from sage.symbolic.ring import SR
sp = spin_polynomial_square(part, weight, length)
t = SR.var('t')
coeffs = sp.list()
return sum(c * t**(QQ(i)/2) for i,c in enumerate(coeffs))
def cospin_polynomial(part, weight, length):
"""
Return the cospin polynomial associated to ``part``, ``weight``, and
``length``.
EXAMPLES::
sage: from sage.combinat.ribbon_tableau import cospin_polynomial
sage: cospin_polynomial([6,6,6],[4,2],3)
t^4 + t^3 + 2*t^2 + t + 1
sage: cospin_polynomial([3,3,3,2,1], [3,1], 3)
1
sage: cospin_polynomial([3,3,3,2,1], [2,2], 3)
t + 1
sage: cospin_polynomial([3,3,3,2,1], [2,1,1], 3)
t^2 + 2*t + 2
sage: cospin_polynomial([3,3,3,2,1], [1,1,1,1], 3)
t^3 + 3*t^2 + 5*t + 3
sage: cospin_polynomial([5,4,3,2,1,1,1], [2,2,1], 3)
2*t^2 + 6*t + 2
sage: cospin_polynomial([[6]*6, [3,3]], [4,4,2], 3)
3*t^4 + 6*t^3 + 9*t^2 + 5*t + 3
"""
R = ZZ['t']
# The power in the spin polynomial are all half integers
# or all integers. Manipulation of expressions need to
# separate cases
sp = spin_polynomial_square(part, weight, length)
if sp == 0:
return R.zero()
coeffs = [c for c in sp.list() if c != 0]
d = len(coeffs) - 1
t = R.gen()
return R( sum(c * t**(d-i) for i,c in enumerate(coeffs)) )
## //////////////////////////////////////////////////////////////////////////////////////////
## // Generic function for driving into the graph of partitions coding all ribbons
## // tableaux of a given shape and weight
## //////////////////////////////////////////////////////////////////////////////////////////
## //This function construct the graph of the set of k-ribbon tableaux
## //of a given skew shape and a given weight.
## //The first argument is always a skew partition.
## //In the case where the inner partition is empty there is no branch without solutions
## //In the other cases there is in average a lot of branches without solutions
## /////////////////////////////////////////////////////////////////////////////////////////
def graph_implementation_rec(skp, weight, length, function):
"""
TESTS::
sage: from sage.combinat.ribbon_tableau import graph_implementation_rec, list_rec
sage: graph_implementation_rec(SkewPartition([[1], []]), [1], 1, list_rec)
[[[], [[1]]]]
sage: graph_implementation_rec(SkewPartition([[2, 1], []]), [1, 2], 1, list_rec)
[[[], [[2], [1, 2]]]]
sage: graph_implementation_rec(SkewPartition([[], []]), [0], 1, list_rec)
[[[], []]]
"""
if sum(weight) == 0:
weight = []
partp = skp[0].conjugate()
ell = len(partp)
outer = skp[1]
outer_len = len(outer)
# Some tests in order to know if the shape and the weight are compatible.
if weight and weight[-1] <= len(partp):
perms = permutation.Permutations([0]*(len(partp)-weight[-1]) + [length]*(weight[-1])).list()
else:
return function([], [], skp, weight, length)
selection = []
for j in range(len(perms)):
retire = [(val + ell - (i+1) - perms[j][i]) for i,val in enumerate(partp)]
retire.sort(reverse=True)
retire = [val - ell + (i+1) for i,val in enumerate(retire)]
if retire[-1] >= 0 and retire == sorted(retire, reverse=True):
retire = Partition(retire).conjugate()
# Cutting branches if the retired partition has a line strictly included into the inner one
if len(retire) >= outer_len:
append = True
for k in range(outer_len):
if retire[k] - outer[k] < 0:
append = False
break
if append:
selection.append([retire, perms[j]])
#selection contains the list of current nodes
if len(weight) == 1:
return function([], selection, skp, weight, length)
else:
#The recursive calls permit us to construct the list of the sons
#of all current nodes in selection
a = [graph_implementation_rec([p[0], outer], weight[:-1], length, function)
for p in selection]
return function(a, selection, skp, weight, length)
##############################################################
class MultiSkewTableau(CombinatorialElement):
"""
A multi skew tableau which is a tuple of skew tableaux.
EXAMPLES::
sage: s = MultiSkewTableau([ [[None,1],[2,3]], [[1,2],[2]] ])
sage: s.size()
6
sage: s.weight()
[2, 3, 1]
sage: s.shape()
[[2, 2] / [1], [2, 1] / []]
TESTS::
sage: mst = MultiSkewTableau([ [[None,1],[2,3]], [[1,2],[2]] ])
sage: TestSuite(mst).run()
"""
@staticmethod
def __classcall_private__(cls, x):
"""
Construct a multi skew tableau.
EXAMPLES::
sage: s = MultiSkewTableau([ [[None,1],[2,3]], [[1,2],[2]] ])
"""
if isinstance(x, MultiSkewTableau):
return x
return MultiSkewTableaux()([SkewTableau(i) for i in x] )
def size(self):
"""
Return the size of ``self``, which is the sum of the sizes of the skew
tableaux in ``self``.
EXAMPLES::
sage: s = SemistandardSkewTableaux([[2,2],[1]]).list()
sage: a = MultiSkewTableau([s[0],s[1],s[2]])
sage: a.size()
9
"""
return sum(x.size() for x in self)
def weight(self):
"""
Return the weight of ``self``.
EXAMPLES::
sage: s = SemistandardSkewTableaux([[2,2],[1]]).list()
sage: a = MultiSkewTableau([s[0],s[1],s[2]])
sage: a.weight()
[5, 3, 1]
"""
weights = [x.weight() for x in self]
m = max([len(x) for x in weights])
weight = [0]*m
for w in weights:
for i in range(len(w)):
weight[i] += w[i]
return weight
def shape(self):
"""
Return the shape of ``self``.
EXAMPLES::
sage: s = SemistandardSkewTableaux([[2,2],[1]]).list()
sage: a = MultiSkewTableau([s[0],s[1],s[2]])
sage: a.shape()
[[2, 2] / [1], [2, 2] / [1], [2, 2] / [1]]
"""
return [x.shape() for x in self]
def inversion_pairs(self):
"""
Return a list of the inversion pairs of ``self``.
EXAMPLES::
sage: s = MultiSkewTableau([ [[2,3],[5,5]], [[1,1],[3,3]], [[2],[6]] ])
sage: s.inversion_pairs()
[((0, (0, 0)), (1, (0, 0))),
((0, (1, 0)), (1, (0, 1))),
((0, (1, 1)), (1, (0, 0))),
((0, (1, 1)), (1, (1, 1))),
((0, (1, 1)), (2, (0, 0))),
((1, (0, 1)), (2, (0, 0))),
((1, (1, 1)), (2, (0, 0)))]
"""
inv = []
for k in range(len(self)):
for b in self[k].cells():
inv += self._inversion_pairs_from_position(k,b)
return inv
def inversions(self):
"""
Return the number of inversion pairs of ``self``.
EXAMPLES::
sage: t1 = SkewTableau([[1]])
sage: t2 = SkewTableau([[2]])
sage: MultiSkewTableau([t1,t1]).inversions()
0
sage: MultiSkewTableau([t1,t2]).inversions()
0
sage: MultiSkewTableau([t2,t2]).inversions()
0
sage: MultiSkewTableau([t2,t1]).inversions()
1
sage: s = MultiSkewTableau([ [[2,3],[5,5]], [[1,1],[3,3]], [[2],[6]] ])
sage: s.inversions()
7
"""
return len(self.inversion_pairs())
def _inversion_pairs_from_position(self, k, ij):
"""
Return the number of inversions at the cell position `(i,j)` in the
``k``-th tableaux in ``self``.
EXAMPLES::
sage: s = MultiSkewTableau([ [[2,3],[5,5]], [[1,1],[3,3]], [[2],[6]] ])
sage: s._inversion_pairs_from_position(0, (1,1))
[((0, (1, 1)), (1, (0, 0))),
((0, (1, 1)), (1, (1, 1))),
((0, (1, 1)), (2, (0, 0)))]
sage: s._inversion_pairs_from_position(1, (0,1))
[((1, (0, 1)), (2, (0, 0)))]
"""
pk = k
pi,pj = ij
c = pi - pj
value = self[pk][pi][pj]
pk_cells = self[pk].cells_by_content(c)
same_diagonal = [ t.cells_by_content(c) for t in self[pk+1:] ]
above_diagonal = [ t.cells_by_content(c+1) for t in self[pk+1:] ]
res = []
for i,j in pk_cells:
if pi < i and value > self[pk][i][j]:
res.append( ((pk,(pi,pj)), (pk,(i,j))) )
for k in range(len(same_diagonal)):
for i,j in same_diagonal[k]:
if value > self[pk+k+1][i][j]:
res.append( ((pk,(pi,pj)), (pk+k+1,(i,j))) )
for k in range(len(above_diagonal)):
for i,j in above_diagonal[k]:
if value < self[pk+k+1][i][j]:
res.append( ((pk,(pi,pj)), (pk+k+1,(i,j))) )
return res
class MultiSkewTableaux(UniqueRepresentation, Parent):
r"""
Multiskew tableaux.
"""
def __init__(self, category=None):
"""
EXAMPLES::
sage: R = MultiSkewTableaux()
sage: TestSuite(R).run()
"""
if category is None:
category = Sets()
Parent.__init__(self, category=category)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: MultiSkewTableaux()
Multi Skew Tableaux tableaux
"""
return "Multi Skew Tableaux tableaux"
def _element_constructor_(self, rt):
"""
Construct an element of ``self`` from ``rt``.
EXAMPLES::
sage: R = MultiSkewTableaux()
sage: R([[[1, 1], [2]], [[None, 2], [3, 3]]])
[[[1, 1], [2]], [[None, 2], [3, 3]]]
"""
return self.element_class(self, rt)
Element = MultiSkewTableau
class SemistandardMultiSkewTableaux(MultiSkewTableaux):
"""
Semistandard multi skew tableaux.
A multi skew tableau is a `k`-tuple of skew tableaux of
given shape with a specified total weight.
EXAMPLES::
sage: S = SemistandardMultiSkewTableaux([ [[2,1],[]], [[2,2],[1]] ], [2,2,2]); S
Semistandard multi skew tableaux of shape [[2, 1] / [], [2, 2] / [1]] and weight [2, 2, 2]
sage: S.list()
[[[[1, 1], [2]], [[None, 2], [3, 3]]],
[[[1, 2], [2]], [[None, 1], [3, 3]]],
[[[1, 3], [2]], [[None, 2], [1, 3]]],
[[[1, 3], [2]], [[None, 1], [2, 3]]],
[[[1, 1], [3]], [[None, 2], [2, 3]]],
[[[1, 2], [3]], [[None, 2], [1, 3]]],
[[[1, 2], [3]], [[None, 1], [2, 3]]],
[[[2, 2], [3]], [[None, 1], [1, 3]]],
[[[1, 3], [3]], [[None, 1], [2, 2]]],
[[[2, 3], [3]], [[None, 1], [1, 2]]]]
"""
@staticmethod
def __classcall_private__(cls, shape, weight):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: S1 = SemistandardMultiSkewTableaux([ [[2,1],[]], [[2,2],[1]] ], [2,2,2])
sage: shape_alt = ( SkewPartition([[2,1],[]]), SkewPartition([[2,2],[1]]) )
sage: S2 = SemistandardMultiSkewTableaux(shape_alt, (2,2,2))
sage: S1 is S2
True
"""
shape = tuple(SkewPartition(x) for x in shape)
weight = Partition(weight)
if sum(weight) != sum(s.size() for s in shape):
raise ValueError("the sum of weight must be the sum of the sizes of shape")
return super(SemistandardMultiSkewTableaux, cls).__classcall__(cls, shape, weight)
def __init__(self, shape, weight):
"""
TESTS::
sage: S = SemistandardMultiSkewTableaux([ [[2,1],[]], [[2,2],[1]] ], [2,2,2])
sage: TestSuite(S).run()
"""
self._shape = shape
self._weight = weight
MultiSkewTableaux.__init__(self, category=FiniteEnumeratedSets())
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: SemistandardMultiSkewTableaux([ [[2,1],[]], [[2,2],[1]] ], [2,2,2])
Semistandard multi skew tableaux of shape [[2, 1] / [], [2, 2] / [1]] and weight [2, 2, 2]
"""
return "Semistandard multi skew tableaux of shape %s and weight %s"%(list(self._shape), self._weight)
def __contains__(self, x):
"""
TESTS::
sage: s = SemistandardMultiSkewTableaux([ [[2,1],[]], [[2,2],[1]] ], [2,2,2])
sage: all(i in s for i in s)
True
"""
try:
x = MultiSkewTableau(x)
except TypeError:
return False
if x.weight() != list(self._weight):
return False
if x.shape() != list(self._shape):
return False
if not all( x[i].is_semistandard() for i in range(len(x)) ):
return False
return True
def __iter__(self):
"""
EXAMPLES::
sage: sp = SkewPartitions(3).list()
sage: SemistandardMultiSkewTableaux([SkewPartition([[1, 1, 1], []]), SkewPartition([[3], []])],[2,2,2]).list()
[[[[1], [2], [3]], [[1, 2, 3]]]]
::
sage: a = SkewPartition([[8,7,6,5,1,1],[2,1,1]])
sage: weight = [3,3,2]
sage: k = 3
sage: s = SemistandardMultiSkewTableaux(a.quotient(k),weight)
sage: len(s.list())
34
sage: RibbonTableaux(a,weight,k).cardinality()
34
"""
parts = self._shape
mu = self._weight
#Splitting the partition
s = [ p.size() for p in parts ]
parts = [p.to_list() for p in parts]
#Gluing the partitions
parttmp = parts[0]
i = 1
for i in range(1,len(parts)):
trans = parttmp[0][0]
current_part = parts[i]
current_part[1] += [0]*(len(current_part[0])-len(current_part[1]))
inner_current = [ trans + j for j in current_part[1] ]
outer_current = [ trans + j for j in current_part[0] ]
parttmp = [ outer_current + parttmp[0], inner_current + parttmp[1] ]
#List the corresponding skew tableaux
l = [ st.to_word() for st in SemistandardSkewTableaux(parttmp, mu) ]
S = SkewTableaux()
for k in range(len(l)):
pos = 0 #Double check this
restmp = [ S.from_shape_and_word(parts[0], [l[k][j] for j in range(s[0])]) ]
for i in range(1, len(parts)):
w = [l[k][j] for j in range(pos+s[i-1], pos+s[i-1]+s[i])]
restmp.append( S.from_shape_and_word(parts[i], w) )
yield self.element_class(self, restmp)
class RibbonTableau_class(RibbonTableau):
"""
This exists solely for unpickling ``RibbonTableau_class`` objects.
"""
def __setstate__(self, state):
r"""
Unpickle old ``RibbonTableau_class`` objects.
TESTS::
sage: loads(b'x\x9c5\xcc\xbd\x0e\xc2 \x14@\xe1\xb4Z\x7f\xd0\x07\xc1\x85D}\x8f\x0e\x8d\x1d\t\xb9\x90\x1bJ\xa44\x17\xe8h\xa2\x83\xef-\xda\xb8\x9do9\xcf\xda$\xb0(\xcc4j\x17 \x8b\xe8\xb4\x9e\x82\xca\xa0=\xc2\xcc\xba\x1fo\x8b\x94\xf1\x90\x12\xa3\xea\xf4\xa2\xfaA+\xde7j\x804\xd0\xba-\xe5]\xca\xd4H\xdapI[\xde.\xdf\xe8\x82M\xc2\x85\x8c\x16#\x1b\xe1\x8e\xea\x0f\xda\xf5\xd5\xf9\xdd\xd1\x1e%1>\x14]\x8a\x0e\xdf\xb8\x968"\xceZ|\x00x\xef5\x11')
[[None, 1], [2, 3]]
sage: loads(dumps( RibbonTableau([[None, 1],[2,3]]) ))
[[None, 1], [2, 3]]
"""
self.__class__ = RibbonTableau
self.__init__(RibbonTableaux(), state['_list'])
from sage.misc.persist import register_unpickle_override
register_unpickle_override('sage.combinat.ribbon_tableau', 'RibbonTableau_class', RibbonTableau_class)
register_unpickle_override('sage.combinat.ribbon_tableau', 'RibbonTableaux_shapeweightlength', RibbonTableaux)
register_unpickle_override('sage.combinat.ribbon_tableau', 'SemistandardMultiSkewTtableaux_shapeweight', SemistandardMultiSkewTableaux)
| 33.390034
| 446
| 0.501672
|
27b983f76e1294454190dabf6b6b56f3ee82a7fa
| 4,529
|
py
|
Python
|
services/other/crud.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
services/other/crud.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
services/other/crud.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
import typing
import sqlalchemy
from pydantic import BaseModel
from sqlalchemy.ext.asyncio import AsyncSession
from db import Base
ModelType = typing.TypeVar('ModelType', bound=Base)
CreateSchemaType = typing.TypeVar('CreateSchemaType', bound=BaseModel)
UpdateSchemaType = typing.TypeVar('UpdateSchemaType', bound=BaseModel)
class CRUD(typing.Generic[ModelType, CreateSchemaType, UpdateSchemaType]):
""" CRUD """
def __init__(self, model: typing.Type[ModelType]) -> None:
self.__model = model
async def exist(self, db: AsyncSession, **kwargs) -> bool:
"""
Exist
:param db: DB
:type db: AsyncSession
:param kwargs: kwargs
:return: Exist instance?
:rtype: bool
"""
query = await db.execute(sqlalchemy.exists(sqlalchemy.select(self.__model.id).filter_by(**kwargs)).select())
return query.scalar()
async def get(self, db: AsyncSession, **kwargs) -> typing.Optional[ModelType]:
"""
Get
:param db: DB
:type db: AsyncSession
:param kwargs: kwargs
:return: Instance
"""
query = await db.execute(sqlalchemy.select(self.__model).filter_by(**kwargs))
return query.scalars().first()
async def create(self, db: AsyncSession, **kwargs) -> ModelType:
"""
Create instance
:param db: DB
:type db: AsyncSession
:param kwargs: kwargs
:return: New instance
"""
instance = self.__model(**kwargs)
db.add(instance)
await db.flush()
await db.commit()
return instance
async def update(self, db: AsyncSession, filter_by: dict, **kwargs) -> ModelType:
"""
Update instance
:param db: DB
:type db: AsyncSession
:param filter_by: Filter by
:type filter_by: dict
:param kwargs: kwargs
:return: Instance
"""
query = sqlalchemy.update(self.__model).filter_by(**filter_by).values(**kwargs)
query.execution_options(synchronize_session="fetch")
await db.execute(query)
await db.commit()
return await self.get(db, **filter_by)
async def remove(self, db: AsyncSession, **kwargs) -> None:
"""
Remove instance
:param db: DB
:type db: AsyncSession
:param kwargs: kwargs
:return: None
"""
await db.execute(sqlalchemy.delete(self.__model).filter_by(**kwargs))
await db.commit()
async def all(self, db: AsyncSession, skip: int = 0, limit: int = 100) -> list[ModelType]:
"""
All
:param db: DB
:type db: AsyncSession
:param skip: Skip
:type skip: int
:param limit: Limit
:type limit: int
:return: Instances
:rtype: list
"""
query = await db.execute(
sqlalchemy.select(self.__model).order_by(self.__model.id.desc()).offset(skip).limit(limit)
)
return query.scalars().all()
async def filter(self, db: AsyncSession, skip: int = 0, limit: int = 100, **kwargs) -> list[ModelType]:
"""
Filter
:param db: DB
:type db: AsyncSession
:param skip: Skip
:type skip: int
:param limit: Limit
:type limit: int
:param kwargs: Filter params
:return: Instances
:rtype: list
"""
query = await db.execute(
sqlalchemy.select(self.__model).filter_by(
**kwargs
).order_by(self.__model.id.desc()).offset(skip).limit(limit)
)
return query.scalars().all()
async def exist_page(self, db: AsyncSession, skip: int = 0, limit: int = 100, **kwargs) -> bool:
"""
Exist page?
:param db: DB
:type db: AsyncSession
:param skip: Skip
:type skip: int
:param limit: Limit
:type limit: int
:return: Exist page?
:rtype: bool
"""
query = await db.execute(
sqlalchemy.exists(
sqlalchemy.select(self.__model.id).filter_by(**kwargs).order_by(
self.__model.id.desc()
).offset(skip).limit(limit)
).select()
)
return query.scalar()
| 32.120567
| 116
| 0.544712
|
b59dae82e9731ed6b3fc1e91e4e82b343ee97f80
| 6,454
|
py
|
Python
|
Snow-Cooling/Libraries/HT_internal_convection.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | 7
|
2017-06-02T20:31:22.000Z
|
2021-04-05T13:52:33.000Z
|
Snow-Cooling/Libraries/HT_internal_convection.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | null | null | null |
Snow-Cooling/Libraries/HT_internal_convection.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | 9
|
2019-01-24T17:43:41.000Z
|
2021-07-25T18:08:34.000Z
|
"""
Object name: PipeFlow
"""
import numpy as np
import scipy
import scipy.optimize
class PipeFlow(object):
""" Determination of Nu, pressure drop, mean temperature for internal convection
import HT_internal_convection.py as intconv
pipe =intconv.PipeFlow(D, Re=0.0, Um = 0.0, mdot = 0.0, nu = 0.0, rho = 0.0)
where
D is the only required input and one of the following combination (Re, nu) or (Um, nu) or (mdot, rho, nu)
Hence the minimum calls for PipeFlow are
pipe =intconv.PipeFlow(D, Re= Re_m, nu = nu_m) outputs pipe.Um
pipe =intconv.PipeFlow(D, Re= Re_m, nu = nu_m, rho = rho_m) outputs pipe.Um (bulk velocity)
and pipe.mdot (mass flow)
pipe =intconv.PipeFlow(D, Um = 0.0, nu = 0.0) outputs pipe.Re
pipe =intconv.PipeFlow(D, Um = Um, nu = nu_m, rho = rho_m) outputs pipe.Re, pipe.mdot
pipe =intconv.PipeFlow(D, mdot = 0.0, nu = 0.0, rho = 0.0) outputs pipe.Re, pipe.Um
pipe.f_laminar(Re) outputs the friction factor for laminar flow pipe.f
pipe.f_turbulent(Re,eps = 0.0, nu = 0.0) outputs the friction factor for turbulent flow pipe.f
The following correlations output pipe.Nu
pipe.laminar_isothermal for isothermal wall boundary condition
pipe.laminar_isoflux for isoflux wall boundary condition
pipe.Dittus_Boelter(mode, Pr, Re = 0.) for turbulent flow where mode is either "heating" or "cooling"
The Re is optional if omitted, the Reynolds number calculated in the object PipeFlow will be used
pipe.Sieder_Tate(Pr,mu,mu_s, Re = 0.0) mu and mu_s are the mean and wall dynamics viscosities
The Re is optional if omitted, the Reynolds number calculated in the object PipeFlow will be used
pipe.Gnielinski( Pr, f,Re = 0.0): where f is the friction factor
The Re is optional if omitted, the Reynolds number calculated in the object PipeFlow will be used
"""
def __init__(self,D, Re=0.0, Um = 0.0 , mdot = 0.0, nu = 0.0, rho = 0.0, L = 1.0 ):
self.D = D
self.L = L
if Re == 0.0:
if Um != 0.0 and nu != 0.0:
Re = Um*D/nu
elif mdot != 0 and rho != 0.0 and nu != 0.0:
Um = mdot/(rho*np.pi*D**2/4.)
Re = Um*D/nu
else:
print("Warning if Re == 0, Um, D and nu or mdot, rho and nu must be specified")
self.Re = Re
if Um == 0.:
if Re != 0. and nu != 0.:
Um = Re*nu/D
if mdot == 0.0 and rho != 0.0:
mdot = rho*Um*np.pi*D**2/4.
elif mdot !=0.0 and rho != 0.0:
Um = mdot/(rho*np.pi*D**2/4.)
self.Um = Um
if mdot == 0.0:
if rho != 0.0:
mdot = rho*Um*np.pi*D**2/4.
else:
self.rho = 1.0
self.mdot = rho*Um*np.pi*D**2/4.
self.mdot = mdot
self.nu = nu
if Re == 0. and nu != 0.:
Re = Um*D/nu
self.Re = Re
if rho == 0.0:
self.rho = 1.0
else:
self.rho = rho
def f_laminar(self, Re = 0.0):
if Re == 0. and self.Re !=0:
Re = self.Re
elif Re == 0 and self.Re == 0.0:
print("Warning Reynolds number is not defined")
self.f = 64./Re
self.dPdx = self.f*(self.L/self.D)*(self.rho*self.Um**2)/2.
def f_turbulent(self,Re = 0.0, eps = 0.0):
if Re == 0. and self.Re !=0.0:
Re = self.Re
elif Re == 0 and self.Re == 0.0:
print("Warning Reynolds number is not defined")
if eps == 0.0:
print("Pipe wall is assumed to be hydrodynamically smooth")
e = eps
f_0 = (0.790*np.log(Re)- 1.64)**(-2.)
if (e > 0.):
f_1 = 1./(-2.0*np.log10(e/3.71))**2
else:
f_1 = f_0
f_guess = np.max([f_0,f_1])
#f_guess = 0.04
def f_tmp(x):
y = (-2*np.log10((2.51/(Re*np.sqrt(x))) + (e/(3.71))) - 1.0/np.sqrt(x))
return y
y = scipy.optimize.fsolve(f_tmp, f_guess)
self.f = y[0]
self.dPdx = self.f*(self.L/self.D)*(self.rho*self.Um**2)/2.
def laminar_isothermal(self):
self.Nu = 3.66
def laminar_isoflux(self):
self.Nu = 4.36
def Dittus_Boelter(self,mode,Pr,Re = 0.0):
if Re == 0. and self.Re !=0:
Re = self.Re
else:
print("Warning Reynolds number is not defined")
if (mode == 'heating'):
n = 0.4
elif (mode == 'cooling'):
n = 0.3
else:
print("Warning you have to specify mode='heating' or 'cooling'")
self.Nu = 0.023*Re**(4./5.)*Pr**n
def Sieder_Tate(self,Pr,mu,mu_s, Re = 0.0):
if Re == 0. and self.Re !=0:
Re = self.Re
else:
print("Warning Reynolds number is not defined")
self.Nu = 0.027*Re**(4/5)*Pr**(1/3)*(mu/mu_s)**0.14
def Gnielinski(self, Pr, f,Re = 0.0):
if Re == 0. and self.Re !=0:
Re = self.Re
else:
print("Warning Reynolds number is not defined")
self.Nu = (f/8.)*(Re-1000.)*Pr/(1+12.7*(f/8.)**0.5*(Pr**(2./3.)-1.))
def Skupinski(self,Pr, Re = 0.0):
if Re == 0. and self.Re !=0:
Re = self.Re
else:
print("Warning Reynolds number is not defined")
self.Nu = 4.82+0.0185*(Re*Pr)**0.827
def Seban(self,Pr, Re = 0.0):
if Re == 0. and self.Re !=0:
Re = self.Re
else:
print("Warning Reynolds number is not defined")
self.Nu = 5.0+0.025*(Re*Pr)**0.8
def log_mean_temperature(T_s,T_o,T_i):
if (T_s < min(T_o,T_i)):
DT_o = T_o-T_s
DT_i = T_i-T_s
elif (T_s > max(T_o,T_i)):
DT_o = T_s-T_o
DT_i = T_s-T_i
return (DT_o-DT_i)/np.log(DT_o/DT_i)
def T_mx_Ts_constant(T_s,T_mi,P,mdot,Cp,hbar,x):
return T_s-(T_s-T_mi)*np.exp(-P*x*hbar/(mdot*Cp))
def T_mo_T_infty(T_infty,T_mi,mdot,Cp,R_tot):
return T_infty-(Tinfty-T_mi)*np.exp(-1/(mdot*Cp*Rtot))
def L_given_other_params(T_infty,T_mo,T_mi,mdot,Cp,Rptot):
return -mdot*Cp*Rptot*np.log((T_infty -T_mo)/(T_infty - T_mi))
| 36.055866
| 113
| 0.520762
|
906d802a8f0c062e57dbcd65e88b4fa7e0087bf0
| 1,913
|
py
|
Python
|
lib/test/BindingsTest/test_common_types.py
|
alrikai/deitytd
|
0e7b504e6ce46df3de9a920650c5bd44324308cf
|
[
"MIT"
] | 1
|
2019-04-12T18:37:26.000Z
|
2019-04-12T18:37:26.000Z
|
lib/test/BindingsTest/test_common_types.py
|
alrikai/deitytd
|
0e7b504e6ce46df3de9a920650c5bd44324308cf
|
[
"MIT"
] | null | null | null |
lib/test/BindingsTest/test_common_types.py
|
alrikai/deitytd
|
0e7b504e6ce46df3de9a920650c5bd44324308cf
|
[
"MIT"
] | null | null | null |
import pyDeityTD as deitytd
import pytest
def test_range_init():
default_range = deitytd.range()
assert default_range.low == 0
assert default_range.high == 0
custom_range = deitytd.range(-1, 10)
assert custom_range.low == -1
assert custom_range.high == 10
custom_range = deitytd.range(0.314, 159265)
assert custom_range.low == pytest.approx(0.314)
assert custom_range.high == 159265
copy_range = deitytd.range(custom_range)
assert copy_range.low == pytest.approx(0.314)
assert copy_range.high == 159265
def test_range_ops():
range_lhs = deitytd.range(10, 100)
range_rhs = deitytd.range(1, 10)
new_range = range_lhs + range_rhs
assert new_range.low == 11
assert new_range.high == 110
new_range = range_lhs * range_rhs
assert new_range.low == 10
assert new_range.high == 1000
new_range = range_lhs + 15
assert new_range.low == 25
assert new_range.high == 115
new_range = 25 + range_rhs
assert new_range.low == 26
assert new_range.high == 35
new_range = range_lhs * 15
assert new_range.low == 150
assert new_range.high == 1500
new_range = 25 * range_rhs
assert new_range.low == 25
assert new_range.high == 250
def test_inplace_range_ops():
range_lhs = deitytd.range(10, 100)
range_rhs = deitytd.range(1, 10)
test_range = deitytd.range(range_lhs)
test_range += range_rhs
assert test_range.low == 11
assert test_range.high == 110
test_range = deitytd.range(range_lhs)
test_range *= range_rhs
assert test_range.low == 10
assert test_range.high == 1000
test_range = deitytd.range(range_lhs)
test_range += 15
assert test_range.low == 25
assert test_range.high == 115
test_range = deitytd.range(range_lhs)
test_range *= 25
assert test_range.low == 250
assert test_range.high == 2500
| 25.171053
| 51
| 0.673288
|
f3962c0bd056d34fe1500c2aacb30dbd8664d5a7
| 6,271
|
py
|
Python
|
k2/python/tests/intersect_dense_pruned_test.py
|
open-speech/sequeender
|
7a64e1a7d8a4b05b0b82e17c542f9f7f943a41e0
|
[
"MIT"
] | 5
|
2020-11-19T15:49:55.000Z
|
2021-06-10T23:51:52.000Z
|
k2/python/tests/intersect_dense_pruned_test.py
|
open-speech/sequeender
|
7a64e1a7d8a4b05b0b82e17c542f9f7f943a41e0
|
[
"MIT"
] | null | null | null |
k2/python/tests/intersect_dense_pruned_test.py
|
open-speech/sequeender
|
7a64e1a7d8a4b05b0b82e17c542f9f7f943a41e0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
#
# See ../../../LICENSE for clarification regarding multiple authors
# To run this single test, use
#
# ctest --verbose -R intersect_dense_pruned_test_py
import unittest
import k2
import torch
class TestIntersectDensePruned(unittest.TestCase):
def test_simple(self):
s = '''
0 1 1 1.0
1 1 1 50.0
1 2 2 2.0
2 3 -1 3.0
3
'''
fsa = k2.Fsa.from_str(s)
fsa.requires_grad_(True)
fsa_vec = k2.create_fsa_vec([fsa])
log_prob = torch.tensor([[[0.1, 0.2, 0.3], [0.04, 0.05, 0.06]]],
dtype=torch.float32,
requires_grad=True)
supervision_segments = torch.tensor([[0, 0, 2]], dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(log_prob, supervision_segments)
out_fsa = k2.intersect_dense_pruned(fsa_vec,
dense_fsa_vec,
beam=100000,
max_active_states=10000,
min_active_states=0)
scores = k2.get_tot_scores(out_fsa,
log_semiring=False,
use_float_scores=True)
scores.sum().backward()
# `expected` results are computed using gtn.
# See https://bit.ly/3oYObeb
expected_scores_out_fsa = torch.tensor([1.2, 2.06, 3.0])
expected_grad_fsa = torch.tensor([1.0, 0.0, 1.0, 1.0])
expected_grad_log_prob = torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0,
1.0]).reshape_as(log_prob)
assert torch.allclose(out_fsa.scores, expected_scores_out_fsa)
assert torch.allclose(expected_grad_fsa, fsa.scores.grad)
assert torch.allclose(expected_grad_log_prob, log_prob.grad)
def test_two_dense(self):
s = '''
0 1 1 1.0
1 1 1 50.0
1 2 2 2.0
2 3 -1 3.0
3
'''
fsa = k2.Fsa.from_str(s)
fsa.requires_grad_(True)
fsa_vec = k2.create_fsa_vec([fsa])
log_prob = torch.tensor(
[[[0.1, 0.2, 0.3], [0.04, 0.05, 0.06], [0.0, 0.0, 0.0]],
[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.0, 0.0, 0.0]]],
dtype=torch.float32,
requires_grad=True)
supervision_segments = torch.tensor([[0, 0, 2], [1, 0, 3]],
dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(log_prob, supervision_segments)
out_fsa = k2.intersect_dense_pruned(fsa_vec,
dense_fsa_vec,
beam=100000,
max_active_states=10000,
min_active_states=0)
assert out_fsa.shape == (2, None, None), 'There should be two FSAs!'
scores = k2.get_tot_scores(out_fsa,
log_semiring=False,
use_float_scores=True)
scores.sum().backward()
# `expected` results are computed using gtn.
# See https://bit.ly/3oYObeb
expected_scores_out_fsa = torch.tensor(
[1.2, 2.06, 3.0, 1.2, 50.5, 2.0, 3.0])
expected_grad_fsa = torch.tensor([2.0, 1.0, 2.0, 2.0])
expected_grad_log_prob = torch.tensor([
0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0, 0, 0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 1.0
]).reshape_as(log_prob)
assert torch.allclose(out_fsa.scores, expected_scores_out_fsa)
assert torch.allclose(expected_grad_fsa, fsa.scores.grad)
assert torch.allclose(expected_grad_log_prob, log_prob.grad)
def test_two_fsas(self):
s1 = '''
0 1 1 1.0
1 2 2 2.0
2 3 -1 3.0
3
'''
s2 = '''
0 1 1 1.0
1 1 1 50.0
1 2 2 2.0
2 3 -1 3.0
3
'''
fsa1 = k2.Fsa.from_str(s1)
fsa2 = k2.Fsa.from_str(s2)
fsa1.requires_grad_(True)
fsa2.requires_grad_(True)
fsa_vec = k2.create_fsa_vec([fsa1, fsa2])
log_prob = torch.tensor(
[[[0.1, 0.2, 0.3], [0.04, 0.05, 0.06], [0.0, 0.0, 0.0]],
[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.0, 0.0, 0.0]]],
dtype=torch.float32,
requires_grad=True)
supervision_segments = torch.tensor([[0, 0, 2], [1, 0, 3]],
dtype=torch.int32)
dense_fsa_vec = k2.DenseFsaVec(log_prob, supervision_segments)
out_fsa = k2.intersect_dense_pruned(fsa_vec,
dense_fsa_vec,
beam=100000,
max_active_states=10000,
min_active_states=0)
assert out_fsa.shape == (2, None, None), 'There should be two FSAs!'
scores = k2.get_tot_scores(out_fsa,
log_semiring=False,
use_float_scores=True)
scores.sum().backward()
# `expected` results are computed using gtn.
# See https://bit.ly/3oYObeb
expected_scores_out_fsa = torch.tensor(
[1.2, 2.06, 3.0, 1.2, 50.5, 2.0, 3.0])
expected_grad_fsa1 = torch.tensor([1.0, 1.0, 1.0])
expected_grad_fsa2 = torch.tensor([1.0, 1.0, 1.0, 1.0])
print("fsa2 is ", fsa2.__str__())
expected_grad_log_prob = torch.tensor([
0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0, 0, 0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 1.0
]).reshape_as(log_prob)
assert torch.allclose(out_fsa.scores, expected_scores_out_fsa)
assert torch.allclose(expected_grad_fsa1, fsa1.scores.grad)
assert torch.allclose(expected_grad_fsa2, fsa2.scores.grad)
assert torch.allclose(expected_grad_log_prob, log_prob.grad)
if __name__ == '__main__':
unittest.main()
| 36.888235
| 76
| 0.495296
|
4fd0a41802241b2d3a65a3fccdec88f8f792cfae
| 4,799
|
py
|
Python
|
blocks/bool.py
|
antonhoess/crazy_matrix
|
fbd2a7a59c21b4ab43dc6a5ded38da19ee1280cd
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T20:39:25.000Z
|
2021-02-04T20:39:25.000Z
|
blocks/bool.py
|
antonhoess/crazy_matrix
|
fbd2a7a59c21b4ab43dc6a5ded38da19ee1280cd
|
[
"BSD-3-Clause"
] | null | null | null |
blocks/bool.py
|
antonhoess/crazy_matrix
|
fbd2a7a59c21b4ab43dc6a5ded38da19ee1280cd
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Optional, Sequence
from base.block import BlockFixed, Block, IBlock
__author__ = "Anton Höß"
__copyright__ = "Copyright 2021"
class AndN(Block):
def __init__(self, prev_blocks: Sequence[IBlock] = None, name: Optional[str] = None):
Block.__init__(self, None, 1, name=name)
if prev_blocks is not None:
for b in prev_blocks:
if b is not None:
self.conn_to_prev_block(b)
# end if
# end for
# end if
# end def
def _calc_values(self):
value = True
value_calculated = False
for conn_in in self._conn_in:
if conn_in.value is None:
self._pin_value[0] = None
return
else:
value = value and conn_in.value
value_calculated = True
# end if
# end for
self._pin_value[0] = int(value) if value_calculated else None
# end def
# end class
class OrN(Block):
def __init__(self, prev_blocks: Sequence[IBlock] = None, name: Optional[str] = None):
Block.__init__(self, None, 1, name=name)
if prev_blocks is not None:
for b in prev_blocks:
if b is not None:
self.conn_to_prev_block(b)
# end if
# end for
# end if
# end def
def _calc_values(self):
value = False
value_calculated = False
for conn_in in self._conn_in:
if conn_in.value is None:
self._pin_value[0] = None
return
else:
value = value or conn_in.value
value_calculated = True
# end if
# end for
self._pin_value[0] = int(value) if value_calculated else None
# end def
# end class
class Not(BlockFixed):
def __init__(self, prev_block: Optional[IBlock] = None, name: Optional[str] = None):
BlockFixed.__init__(self, 1, 1, name=name)
if prev_block is not None:
self.conn_to_prev_block(prev_block)
# end if
# end def
def _calc_values(self):
if self._conn_in[0].value is not None:
self._pin_value[0] = int(not self._conn_in[0].value)
else:
self._pin_value[0] = None
# end if
# end def
# end class
class Gt(BlockFixed):
def __init__(self, prev_block: Optional[IBlock] = None, prev_block2: Optional[IBlock] = None, name: Optional[str] = None):
BlockFixed.__init__(self, 2, 1, name=name)
if prev_block is not None:
self.conn_to_prev_block(prev_block)
# end if
if prev_block2 is not None:
self.conn_to_prev_block(prev_block2)
# end if
# end def
def _calc_values(self):
if self._conn_in[0].value is not None:
self._pin_value[0] = int(self._conn_in[0].value > self._conn_in[1].value)
else:
self._pin_value[0] = None
# end if
# end def
# end class
class Lt(BlockFixed):
def __init__(self, prev_block: Optional[IBlock] = None, prev_block2: Optional[IBlock] = None, name: Optional[str] = None):
BlockFixed.__init__(self, 2, 1, name=name)
if prev_block is not None:
self.conn_to_prev_block(prev_block)
# end if
if prev_block2 is not None:
self.conn_to_prev_block(prev_block2)
# end if
# end def
def _calc_values(self):
if self._conn_in[0].value is not None:
self._pin_value[0] = int(self._conn_in[0].value < self._conn_in[1].value)
else:
self._pin_value[0] = None
# end if
# end def
# end class
class EqN(Block):
def __init__(self, prev_blocks: Sequence[IBlock] = None, name: Optional[str] = None):
Block.__init__(self, None, 1, name=name)
if prev_blocks is not None:
for b in prev_blocks:
if b is not None:
self.conn_to_prev_block(b)
# end if
# end for
# end if
# end def
def _calc_values(self):
eq = True
value = None
value_calculated = False
for conn_in in self._conn_in:
if conn_in.value is None:
self._pin_value[0] = None
return
else:
if not value_calculated:
value = conn_in.value
value_calculated = True
else:
if conn_in.value != value:
eq = False
# end if
# end if
# end if
# end for
self._pin_value[0] = int(eq) if value_calculated else None
# end def
# end class
| 27.267045
| 126
| 0.546572
|
86cbadd36c6c83daa7b6064505b5d2e1763fecde
| 1,701
|
py
|
Python
|
generator/contact.py
|
Aks1389/python_training
|
8c1561e4f0da62cb3fca3b5307c4bfecc8e4dda2
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
Aks1389/python_training
|
8c1561e4f0da62cb3fca3b5307c4bfecc8e4dda2
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
Aks1389/python_training
|
8c1561e4f0da62cb3fca3b5307c4bfecc8e4dda2
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
import os.path
import jsonpickle
import getopt
import sys
import random
import string
import datetime
import re
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 3
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " " * 10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_phone():
return "+"+random.choice(string.digits)+"".join([random.choice(string.digits + " "*7) for i in range(11)])
def random_date():
date = (datetime.date.today() - datetime.timedelta(days=random.randint(0,29999))).strftime("%d-%B-%Y")
return re.sub("^0","", date)
def random_email():
symbols = string.ascii_letters + string.digits
return "".join([random.choice(symbols) for i in range(random.randrange(10))])+ "@" + \
"".join([random.choice(symbols) for i in range(random.randrange(4))]) + ".com"
testdata = [Contact(first_name="", last_name="")] + [
Contact(first_name=random_string("name", 7), last_name=random_string("", 12), birthday=random_date(),
address=random_string("street ", 15), mobile_phone=random_phone(), email=random_email(),
company=random_string("comp.", 10))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| 30.927273
| 110
| 0.656085
|
7716c24e58a30826594968bda0f72a20046cb117
| 5,250
|
py
|
Python
|
utils.py
|
Sooraj-s-98/Simulated-Self-Driving-Car
|
d102372a6cc0ef375a6ff9fffb9c5f7e8e6a5b90
|
[
"MIT"
] | 7
|
2017-09-22T01:53:51.000Z
|
2019-01-20T04:45:05.000Z
|
utils.py
|
Sooraj-s-98/Simulated-Self-Driving-Car
|
d102372a6cc0ef375a6ff9fffb9c5f7e8e6a5b90
|
[
"MIT"
] | 1
|
2018-04-10T05:49:53.000Z
|
2018-04-14T05:02:41.000Z
|
utils.py
|
Sooraj-s-98/Simulated-Self-Driving-Car
|
d102372a6cc0ef375a6ff9fffb9c5f7e8e6a5b90
|
[
"MIT"
] | 6
|
2019-06-28T05:10:39.000Z
|
2021-11-24T11:04:32.000Z
|
import cv2, os
import numpy as np
import matplotlib.image as mpimg
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3
INPUT_SHAPE = (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS)
def load_image(data_dir, image_file):
"""
Load RGB images from a file
"""
return mpimg.imread(os.path.join(data_dir, image_file.strip()))
def crop(image):
"""
Crop the image (removing the sky at the top and the car front at the bottom)
"""
return image[60:-25, :, :] # remove the sky and the car front
def resize(image):
"""
Resize the image to the input shape used by the network model
"""
return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA)
def rgb2yuv(image):
"""
Convert the image from RGB to YUV (This is what the NVIDIA model does)
"""
return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
def preprocess(image):
"""
Combine all preprocess functions into one
"""
image = crop(image)
image = resize(image)
image = rgb2yuv(image)
return image
def choose_image(data_dir, center, left, right, steering_angle):
"""
Randomly choose an image from the center, left or right, and adjust
the steering angle.
"""
choice = np.random.choice(3)
if choice == 0:
return load_image(data_dir, left), steering_angle + 0.2
elif choice == 1:
return load_image(data_dir, right), steering_angle - 0.2
return load_image(data_dir, center), steering_angle
def random_flip(image, steering_angle):
"""
Randomly flipt the image left <-> right, and adjust the steering angle.
"""
if np.random.rand() < 0.5:
image = cv2.flip(image, 1)
steering_angle = -steering_angle
return image, steering_angle
def random_translate(image, steering_angle, range_x, range_y):
"""
Randomly shift the image virtially and horizontally (translation).
"""
trans_x = range_x * (np.random.rand() - 0.5)
trans_y = range_y * (np.random.rand() - 0.5)
steering_angle += trans_x * 0.002
trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])
height, width = image.shape[:2]
image = cv2.warpAffine(image, trans_m, (width, height))
return image, steering_angle
def random_shadow(image):
"""
Generates and adds random shadow
"""
# (x1, y1) and (x2, y2) forms a line
# xm, ym gives all the locations of the image
x1, y1 = IMAGE_WIDTH * np.random.rand(), 0
x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT
xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]
# mathematically speaking, we want to set 1 below the line and zero otherwise
# Our coordinate is up side down. So, the above the line:
# (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1)
# as x2 == x1 causes zero-division problem, we'll write it in the below form:
# (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0
mask = np.zeros_like(image[:, :, 1])
mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1
# choose which side should have shadow and adjust saturation
cond = mask == np.random.randint(2)
s_ratio = np.random.uniform(low=0.2, high=0.5)
# adjust Saturation in HLS(Hue, Light, Saturation)
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio
return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)
def random_brightness(image):
"""
Randomly adjust brightness of the image.
"""
# HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness).
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
ratio = 1.0 + 0.4 * (np.random.rand() - 0.5)
hsv[:,:,2] = hsv[:,:,2] * ratio
return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
def augument(data_dir, center, left, right, steering_angle, range_x=100, range_y=10):
"""
Generate an augumented image and adjust steering angle.
(The steering angle is associated with the center image)
"""
image, steering_angle = choose_image(data_dir, center, left, right, steering_angle)
image, steering_angle = random_flip(image, steering_angle)
image, steering_angle = random_translate(image, steering_angle, range_x, range_y)
image = random_shadow(image)
image = random_brightness(image)
return image, steering_angle
def batch_generator(data_dir, image_paths, steering_angles, batch_size, is_training):
"""
Generate training image give image paths and associated steering angles
"""
images = np.empty([batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])
steers = np.empty(batch_size)
while True:
i = 0
for index in np.random.permutation(image_paths.shape[0]):
center, left, right = image_paths[index]
steering_angle = steering_angles[index]
# argumentation
if is_training and np.random.rand() < 0.6:
image, steering_angle = augument(data_dir, center, left, right, steering_angle)
else:
image = load_image(data_dir, center)
# add the image and steering angle to the batch
images[i] = preprocess(image)
steers[i] = steering_angle
i += 1
if i == batch_size:
break
yield images, steers
| 33.018868
| 95
| 0.647429
|
d9929ced832f73ee33cb4cf65707a8e64b060fef
| 2,812
|
py
|
Python
|
src/year2021/day15a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 10
|
2017-12-11T17:54:52.000Z
|
2021-12-09T20:16:30.000Z
|
src/year2021/day15a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 260
|
2015-12-09T11:03:03.000Z
|
2021-12-12T14:32:23.000Z
|
src/year2021/day15a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | null | null | null |
"""2021 - Day 15 Part 1: Chiton."""
from __future__ import annotations
from typing import Iterator
from typing import NamedTuple
SHIFTS = {
(0, -1),
(+1, 0),
(0, +1),
(-1, 0),
}
class RiskMap:
def __init__(self, data: list[list[int]]):
self.data = data
@classmethod
def from_task_a(cls, task: str) -> RiskMap:
data = [[int(x) for x in line] for line in task.splitlines()]
return cls(data)
@classmethod
def from_task_b(cls, task: str) -> RiskMap:
chunk = [[int(x) for x in line] for line in task.splitlines()]
chunk_h = len(chunk[0])
chunk_v = len(chunk)
data_h = chunk_h * 5
data_v = chunk_v * 5
data = [[0 for _ in range(data_h)] for _ in range(data_v)]
for row_i in range(5):
for col_i in range(5):
inc = row_i + col_i
for y, chunk_row in enumerate(chunk):
for x, item in enumerate(chunk_row):
new_x = x + chunk_h * col_i
new_y = y + chunk_v * row_i
data[new_y][new_x] = (item - 1 + inc) % 9 + 1
return cls(data)
@property
def max_x(self) -> int:
assert len(self.data)
assert len(self.data[0])
return len(self.data[0]) - 1
@property
def max_y(self) -> int:
assert len(self.data)
return len(self.data) - 1
def __getitem__(self, item: Point) -> int:
return self.data[item.y][item.x]
def adjacent(self, point: Point) -> Iterator[Point]:
for dx, dy in SHIFTS:
new_x = point.x + dx
new_y = point.y + dy
valid_new_x = 0 <= new_x < len(self.data[0])
valid_new_y = 0 <= new_y < len(self.data)
if valid_new_x and valid_new_y:
yield Point(new_x, new_y)
class Point(NamedTuple):
x: int
y: int
def walk(start: Point, target: Point, risk_map: RiskMap) -> int:
cumulative_risk = {start: 0}
to_visit: set[Point] = set()
to_visit.add(start)
while to_visit:
current = to_visit.pop()
current_path_risk = cumulative_risk[current]
for point in risk_map.adjacent(current):
self_risk = risk_map[point]
candidate_path_risk = current_path_risk + self_risk
if (
point not in cumulative_risk
or cumulative_risk[point] > candidate_path_risk
):
cumulative_risk[point] = candidate_path_risk
to_visit.add(point)
return cumulative_risk[target]
def solve(task: str) -> int:
risk_map = RiskMap.from_task_a(task)
start = Point(0, 0)
target = Point(risk_map.max_x, risk_map.max_y)
return walk(start, target, risk_map)
| 25.563636
| 70
| 0.558321
|
5706d09702129703023b669ed0d6e0807a0b6ff6
| 9,835
|
py
|
Python
|
protocol-generator/bin/gen_protocol_ext.py
|
maxwell-dev/maxwell-protocol-rust
|
780d400757720cec46a6995b4d5cf10cc6b76de0
|
[
"Apache-2.0"
] | null | null | null |
protocol-generator/bin/gen_protocol_ext.py
|
maxwell-dev/maxwell-protocol-rust
|
780d400757720cec46a6995b4d5cf10cc6b76de0
|
[
"Apache-2.0"
] | null | null | null |
protocol-generator/bin/gen_protocol_ext.py
|
maxwell-dev/maxwell-protocol-rust
|
780d400757720cec46a6995b4d5cf10cc6b76de0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import re
from os.path import basename
def parse():
parser = argparse.ArgumentParser(
description="The gernerator for maxwell protocol in rust."
)
parser.add_argument("--proto_file", required=True,
type=argparse.FileType("r"))
parser.add_argument("--enum_type_name", required=True)
args = parser.parse_args()
return args.proto_file, args.enum_type_name
def extract(content, enum_type_name):
enum_type_def_pattern = r"enum\s+" + enum_type_name + "\s+{([^}]+)}"
enum_type_def_match = re.search(enum_type_def_pattern, content)
if enum_type_def_match:
enum_pairs_pattern = r"([A-Z_0-9]+)\s*=\s*([0-9]+);"
enum_pairs = re.findall(
enum_pairs_pattern, enum_type_def_match.group(1))
return enum_pairs
else:
return []
def capitalize(name):
return "".join(map(lambda s: s.capitalize(), name.lower().split("_")))
def spaces(n):
return " " * n
def build_use_decls(module_name):
return f"""use super::{module_name}::*;\n""" \
f"""use bytes::{{BufMut, Bytes, BytesMut}};\n""" \
f"""pub use prost::DecodeError;\n""" \
f"""use prost::Message;\n""" \
f"""use std::fmt::{{Debug, Formatter, Result as FmtResult}};"""
def build_protocol_msg_enum_def(enum_pairs):
protocol_msg_variant_defs = []
for (enum_name, enum_value) in enum_pairs:
if enum_name[0:7] == "UNKNOWN":
continue
protocol_msg_variant_defs.append(
f"""{spaces(4)}{capitalize(enum_name)}({capitalize(enum_name)}),"""
)
protocol_msg_variant_defs_output = "\n".join(protocol_msg_variant_defs)
protocol_msg_enum_def_output = f"""pub enum ProtocolMsg {{\n{protocol_msg_variant_defs_output}\n}}"""
return protocol_msg_enum_def_output
def build_protocol_msg_debug_impl(enum_pairs):
match_arm_decls = []
for (enum_name, enum_value) in enum_pairs:
if enum_name[0:7] == "UNKNOWN":
continue
match_arm_decls.append(
f"""{spaces(12)}ProtocolMsg::{capitalize(enum_name)}(msg) => write!(f, "{{:?}}", msg),"""
)
match_arms_decls_output = "\n".join(match_arm_decls)
match_expr_decl_output = f"""{spaces(8)}match self {{\n{match_arms_decls_output}\n{spaces(8)}}}"""
fmt_output = f"""{spaces(4)}fn fmt(&self, f: &mut Formatter) -> FmtResult {{\n""" \
f"""{match_expr_decl_output}\n""" \
f"""{spaces(4)}}}"""
protocol_msg_debug_impl_output = f"""impl Debug for ProtocolMsg {{\n{fmt_output}\n}}"""
return protocol_msg_debug_impl_output
def build_into_protocol_trait_def():
return f"""pub trait IntoProtocol {{\n""" \
f"""{spaces(4)}fn into_protocol(self) -> ProtocolMsg;\n""" \
f"""}}"""
def build_into_protocol_impls(enum_pairs):
impls = []
for (enum_name, enum_value) in enum_pairs:
if enum_name[0:7] == "UNKNOWN":
continue
impls.append(
f"""impl IntoProtocol for {capitalize(enum_name)} {{\n"""
f"""{spaces(4)}#[inline]\n""" \
f"""{spaces(4)}fn into_protocol(self) -> ProtocolMsg {{\n"""
f"""{spaces(8)}ProtocolMsg::{capitalize(enum_name)}(self)\n"""
f"""{spaces(4)}}}\n"""
f"""}}"""
)
impls_output = "\n\n".join(impls)
return impls_output
def build_encode_into_trait_def():
return f"""pub trait EncodeInto: Message + Sized {{\n""" \
f"""{spaces(4)}fn encode_type(bytes: &mut BytesMut);\n\n""" \
f"""{spaces(4)}fn encode_into(&self) -> Bytes {{\n""" \
f"""{spaces(8)}let size = self.encoded_len() as usize;\n""" \
f"""{spaces(8)}let mut bytes = BytesMut::with_capacity(size + 1);\n""" \
f"""{spaces(8)}Self::encode_type(&mut bytes);\n""" \
f"""{spaces(8)}if let Err(err) = self.encode(&mut bytes) {{\n""" \
f"""{spaces(12)}panic!("Failed to encode msg: {{:?}}", err);\n""" \
f"""{spaces(8)}}}\n""" \
f"""{spaces(8)}return bytes.freeze();\n""" \
f"""{spaces(4)}}}\n""" \
f"""}}"""
def build_encode_into_impls(enum_pairs):
impls = []
for (enum_name, enum_value) in enum_pairs:
if enum_name[0:7] == "UNKNOWN":
continue
impls.append(
f"""impl EncodeInto for {capitalize(enum_name)} {{\n"""
f"""{spaces(4)}#[inline]\n""" \
f"""{spaces(4)}fn encode_type(bytes: &mut BytesMut) {{\n"""
f"""{spaces(8)}bytes.put_u8({enum_value});\n"""
f"""{spaces(4)}}}\n"""
f"""}}"""
)
impls_output = "\n\n".join(impls)
return impls_output
def build_encode_fn_def(enum_pairs):
match_arm_decls = []
for (enum_name, enum_value) in enum_pairs:
if enum_name[0:7] == "UNKNOWN":
continue
match_arm_decls.append(
f"""{spaces(8)}ProtocolMsg::{capitalize(enum_name)}(msg) => msg.encode_into(),"""
)
match_arms_decls_output = "\n".join(match_arm_decls)
match_expr_decl_output = f"""{spaces(4)}match protocol_msg {{\n{match_arms_decls_output}\n{spaces(4)}}}"""
encode_fn_def_output = f"""pub fn encode(protocol_msg: &ProtocolMsg) -> Bytes {{\n""" \
f"""{match_expr_decl_output}\n""" \
f"""}}"""
return encode_fn_def_output
def build_decode_fn_def(enum_pairs):
vars_decl_output = f"""{spaces(4)}let msg_type = bytes[0] as i8;\n""" \
f"""{spaces(4)}let msg_body = bytes.slice(1..);"""
case_decls = []
first = True
for (enum_name, enum_value) in enum_pairs:
if enum_name[0:7] == "UNKNOWN":
continue
if first:
case_name = f"""{spaces(4)}if"""
first = False
else:
case_name = f"""{spaces(1)}else if"""
case_decls.append(
f"""{case_name} msg_type == {enum_value} {{\n"""
f"""{spaces(8)}let res: Result<{capitalize(enum_name)}, DecodeError> = Message::decode(msg_body);\n"""
f"""{spaces(8)}match res {{\n"""
f"""{spaces(12)}Ok(msg) => Ok(ProtocolMsg::{capitalize(enum_name)}(msg)),\n"""
f"""{spaces(12)}Err(err) => Err(err),\n"""
f"""{spaces(8)}}}\n"""
f"""{spaces(4)}}}"""
)
case_decls.append(
f"""{spaces(1)}else {{\n"""
f"""{spaces(8)}Err(DecodeError::new(format!("Invalid msg type: {{}}", msg_type)))\n"""
f"""{spaces(4)}}}"""
)
cases_output = "".join(case_decls)
decode_fn_def_output = f"""pub fn decode(bytes: &Bytes) -> Result<ProtocolMsg, DecodeError> {{\n""" \
f"""{vars_decl_output}\n""" \
f"""{cases_output}\n""" \
f"""}}"""
return decode_fn_def_output
def build_set_round_ref_fn_def(enum_pairs):
match_arm_decls = []
for (enum_name, enum_value) in enum_pairs:
if enum_name[0:7] == "UNKNOWN":
continue
if enum_name in ["DO_REQ", "DO_REP", "DO2_REQ", "DO2_REP", "OK2_REP", "ERROR2_REP"]:
match_arm_decls.append(
f"""{spaces(8)}ProtocolMsg::{capitalize(enum_name)}(msg) => msg.traces[0].r#ref = round_ref,""")
else:
match_arm_decls.append(
f"""{spaces(8)}ProtocolMsg::{capitalize(enum_name)}(msg) => msg.r#ref = round_ref,""")
match_arms_decls_output = "\n".join(match_arm_decls)
match_expr_decl_output = f"""{spaces(4)}match protocol_msg {{\n{match_arms_decls_output}\n{spaces(4)}}}"""
set_round_ref_fn_def_output = f"""pub fn set_round_ref(protocol_msg: &mut ProtocolMsg, round_ref: u32) -> &ProtocolMsg {{\n""" \
f"""{match_expr_decl_output}\n""" \
f"""{spaces(4)}protocol_msg\n""" \
f"""}}"""
return set_round_ref_fn_def_output
def build_get_round_ref_fn_def(enum_pairs):
match_arm_decls = []
for (enum_name, enum_value) in enum_pairs:
if enum_name[0:7] == "UNKNOWN":
continue
if enum_name in ["DO_REQ", "DO_REP", "DO2_REQ", "DO2_REP", "OK2_REP", "ERROR2_REP"]:
match_arm_decls.append(
f"""{spaces(8)}ProtocolMsg::{capitalize(enum_name)}(msg) => msg.traces[0].r#ref,""")
else:
match_arm_decls.append(
f"""{spaces(8)}ProtocolMsg::{capitalize(enum_name)}(msg) => msg.r#ref,""")
match_arms_decls_output = "\n".join(match_arm_decls)
match_expr_decl_output = f"""{spaces(4)}match protocol_msg {{\n{match_arms_decls_output}\n{spaces(4)}}}"""
get_round_ref_fn_def_output = f"""pub fn get_round_ref(protocol_msg: &ProtocolMsg) -> u32 {{\n""" \
f"""{match_expr_decl_output}\n""" \
f"""}}"""
return get_round_ref_fn_def_output
def output(module_name, enum_pairs):
output = \
f"""{build_use_decls(module_name)}\n\n""" \
f"""{build_protocol_msg_enum_def(enum_pairs)}\n\n""" \
f"""{build_protocol_msg_debug_impl(enum_pairs)}\n\n""" \
f"""{build_into_protocol_trait_def()}\n\n""" \
f"""{build_into_protocol_impls(enum_pairs)}\n\n""" \
f"""{build_encode_into_trait_def()}\n\n""" \
f"""{build_encode_into_impls(enum_pairs)}\n\n""" \
f"""{build_encode_fn_def(enum_pairs)}\n\n""" \
f"""{build_decode_fn_def(enum_pairs)}\n\n""" \
f"""{build_set_round_ref_fn_def(enum_pairs)}\n\n""" \
f"""{build_get_round_ref_fn_def(enum_pairs)}"""
output_file_name = f"""../src/protocol/{module_name}_ext.rs"""
with open(output_file_name, "w") as output_file:
output_file.write(output)
if __name__ == "__main__":
proto_file, enum_type_name = parse()
module_name = re.sub(r"([^.]+).normalized.proto$", r"\1", basename(proto_file.name))
content = proto_file.read().replace("\n", "")
enum_pairs = extract(content, enum_type_name)
output(module_name, enum_pairs)
| 38.720472
| 132
| 0.592679
|
adb0897a441f88e4518a56a143924d2f139526c8
| 5,849
|
py
|
Python
|
rgb_text2.py
|
slzatz/esp8266
|
687a0ff1cf2326f7c911e90026811036575932d1
|
[
"MIT"
] | 2
|
2017-05-25T23:15:22.000Z
|
2019-09-29T02:13:13.000Z
|
rgb_text2.py
|
slzatz/esp8266
|
687a0ff1cf2326f7c911e90026811036575932d1
|
[
"MIT"
] | null | null | null |
rgb_text2.py
|
slzatz/esp8266
|
687a0ff1cf2326f7c911e90026811036575932d1
|
[
"MIT"
] | 2
|
2019-09-29T02:13:14.000Z
|
2019-11-06T07:49:21.000Z
|
'''
This script is used in conjunction with ili9341_text2.py and font2.py to utilize
larger fonts on the Adafruit TFT FeatherWing - 2.4" 320x240 Touchscreen.
This script is imported by ili9341_text2.py -- both that script and this one
are being frozen into the Micropython firmware by being placed in the
micropython/esp8266/modules directory. If you don't freeze them into the firmware,
you run out of memory.
These scripts are modified from Adafruit's Tony DiCola's scripts at:
https://github.com/adafruit/micropython-adafruit-rgb-display
The Adafruit learning module is at:
https://learn.adafruit.com/micropython-hardware-ili9341-tft-and-featherwing/overview
'''
import font2 as font
import utime
import ustruct
def color565(r, g, b):
return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3
class DummyPin:
"""A fake gpio pin for when you want to skip pins."""
def init(self, *args, **kwargs):
pass
def off(self):
pass
def on(self):
pass
class Display:
_PAGE_SET = None
_COLUMN_SET = None
_RAM_WRITE = None
_RAM_READ = None
_INIT = ()
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
_DECODE_PIXEL = ">BBB"
def __init__(self, width, height):
self.width = width
self.height = height
self.init()
def init(self):
"""Run the initialization commands."""
for command, data in self._INIT:
self._write(command, data)
def _block(self, x0, y0, x1, y1, data=None):
"""Read or write a block of data."""
self._write(self._COLUMN_SET, self._encode_pos(x0, x1))
self._write(self._PAGE_SET, self._encode_pos(y0, y1))
if data is None:
size = ustruct.calcsize(self._DECODE_PIXEL)
return self._read(self._RAM_READ,
(x1 - x0 + 1) * (y1 - y0 + 1) * size)
self._write(self._RAM_WRITE, data)
def _encode_pos(self, a, b):
"""Encode a postion into bytes."""
return ustruct.pack(self._ENCODE_POS, a, b)
def _encode_pixel(self, color):
"""Encode a pixel color into bytes."""
return ustruct.pack(self._ENCODE_PIXEL, color)
def _decode_pixel(self, data):
"""Decode bytes into a pixel color."""
return color565(*ustruct.unpack(self._DECODE_PIXEL, data))
def pixel(self, x, y, color=None):
"""Read or write a pixel."""
if color is None:
return self._decode_pixel(self._block(x, y, x, y))
if not 0 <= x < self.width or not 0 <= y < self.height:
return
self._block(x, y, x, y, self._encode_pixel(color))
def fill_rectangle(self, x, y, width, height, color):
"""Draw a filled rectangle."""
x = min(self.width - 1, max(0, x))
y = min(self.height - 1, max(0, y))
w = min(self.width - x, max(1, width))
h = min(self.height - y, max(1, height))
self._block(x, y, x + w - 1, y + h - 1, b'')
chunks, rest = divmod(w * h, 512)
pixel = self._encode_pixel(color)
if chunks:
data = pixel * 512
for count in range(chunks):
self._write(None, data)
self._write(None, pixel * rest)
def fill(self, color=0):
"""Fill whole screen."""
self.fill_rectangle(0, 0, self.width, self.height, color)
def hline(self, x, y, width, color):
"""Draw a horizontal line."""
self.fill_rectangle(x, y, width, 1, color)
def vline(self, x, y, height, color):
"""Draw a vertical line."""
self.fill_rectangle(x, y, 1, height, color)
def draw_text(self, x, y, string, color=None): #, size=1, space=1):
def pixel_y(char_row):
char_offset = y - char_row + 1
return 12 + char_offset
def pixel_x(char_number, char_column):
char_offset = x + char_number * font.cols + char_number
pixel_offset = char_offset + char_column
return pixel_offset
def pixel_mask(char, char_row, char_column):
# eliminated first 32 non-printing chars
char_index_offset = (ord(char)-32) * font.rows
try:
return font.bytes_[char_index_offset + char_row] >> (8-char_column) & 0x1
except IndexError:
return 0
for char_number, char in enumerate(string):
for char_row in range(font.rows): #13
for char_column in range(font.cols): #8
if pixel_mask(char, char_row, char_column):
self.pixel(pixel_x(char_number,char_column),
pixel_y(char_row),
color)
class DisplaySPI(Display):
def __init__(self, spi, dc, cs, rst=None, width=1, height=1):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
if self.rst:
self.rst.init(self.rst.OUT, value=0)
self.reset()
super().__init__(width, height)
def reset(self):
self.rst.off()
utime.sleep_ms(50)
self.rst.on()
utime.sleep_ms(50)
def _write(self, command=None, data=None):
if command is not None:
self.dc.off()
self.cs.off()
self.spi.write(bytearray([command]))
self.cs.on()
if data is not None:
self.dc.on()
self.cs.off()
self.spi.write(data)
self.cs.on()
def _read(self, command=None, count=0):
self.dc.off()
self.cs.off()
if command is not None:
self.spi.write(bytearray([command]))
if count:
data = self.spi.read(count)
self.cs.on()
return data
| 33.422857
| 89
| 0.574286
|
c44c816d93a99869a51752088824f12bb2b8857d
| 19,207
|
py
|
Python
|
buildscripts/hang_analyzer.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | 2
|
2021-08-19T12:41:45.000Z
|
2021-08-19T12:48:10.000Z
|
buildscripts/hang_analyzer.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/hang_analyzer.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Hang Analyzer
A prototype hang analyzer for Evergreen integration to help investigate test timeouts
1. Script supports taking dumps, and/or dumping a summary of useful information about a process
2. Script will iterate through a list of interesting processes,
and run the tools from step 1. The list of processes can be provided as an option.
3. Java processes will be dumped using jstack, if available.
Supports Linux, MacOS X, Solaris, and Windows.
"""
import StringIO
import csv
import glob
import itertools
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import threading
import time
from distutils import spawn
from optparse import OptionParser
if sys.platform == "win32":
import win32process
def call(a = []):
sys.stdout.write(str(a) + "\n")
sys.stdout.flush()
ret = subprocess.call(a)
if( ret != 0):
sys.stderr.write("Bad exit code %d\n" % (ret))
raise Exception()
# Copied from python 2.7 version of subprocess.py
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def callo(a = []):
sys.stdout.write(str(a) + "\n")
sys.stdout.flush()
return check_output(a)
def find_program(prog, paths):
"""Finds the specified program in env PATH, or tries a set of paths """
loc = spawn.find_executable(prog)
if(loc != None):
return loc
for loc in paths:
p = os.path.join(loc, prog)
if os.path.exists(p):
return p
return None
class WindowsDumper(object):
def __find_debugger(self):
"""Finds the installed debugger"""
# We are looking for c:\Program Files (x86)\Windows Kits\8.1\Debuggers\x64
cdb = spawn.find_executable('cdb.exe')
if(cdb != None):
return cdb
from win32com.shell import shell, shellcon
# Cygwin via sshd does not expose the normal environment variables
# Use the shell api to get the variable instead
rootDir = shell.SHGetFolderPath(0, shellcon.CSIDL_PROGRAM_FILESX86, None, 0)
for i in range(0,2):
pathToTest = os.path.join(rootDir, "Windows Kits", "8." + str(i), "Debuggers", "x64" )
sys.stdout.write("Checking for debugger in %s\n" % pathToTest)
if(os.path.exists(pathToTest)):
return os.path.join(pathToTest, "cdb.exe")
return None
def dump_info(self, pid, process_name, stream, take_dump = False):
"""Dump useful information to the console"""
dbg = self.__find_debugger()
if dbg is None:
stream.write("WARNING: Debugger cdb.exe not found, skipping dumping of %d\n" % (pid))
return
stream.write("INFO: Debugger %s, analyzing %d\n" % (dbg, pid))
cmds = [
".symfix", # Fixup symbol path
".symopt +0x10", # Enable line loading (off by default in CDB, on by default in WinDBG)
".reload", # Reload symbols
"!peb", # Dump current exe, & environment variables
"lm", # Dump loaded modules
"~* kp 100", # Dump All Threads with function arguments
".dump /ma /u dump_" + process_name + "." + str(pid) + "." + self.get_dump_ext() if take_dump else "",
# Dump to file, dump_<process name>_<time stamp>_<pid in hex>.<pid>.mdmp
".detach", # Detach
"q" # Quit
]
call([dbg, '-c', ";".join(cmds), '-p', str(pid)])
stream.write("INFO: Done analyzing process\n")
def get_dump_ext(self):
return "mdmp"
def dump_core(self, pid, output_file):
"""Take a dump of pid to specified file"""
dbg = self.__find_debugger()
if dbg is None:
sys.stdout.write("WARNING: Debugger cdb.exe not found, skipping dumping of %d to %s\n" % (pid, output_file))
return
sys.stdout.write("INFO: Debugger %s, analyzing %d to %s\n" % (dbg, pid, output_file))
call([dbg, '-c', ".dump /ma %s;.detach;q" % output_file, '-p', str(pid)] )
sys.stdout.write("INFO: Done analyzing process\n")
class WindowsProcessList(object):
def __find_ps(self):
"""Finds tasklist """
return os.path.join(os.environ["WINDIR"], "system32", "tasklist.exe")
def dump_processes(self):
"""Get list of [Pid, Process Name]"""
ps = self.__find_ps()
sys.stdout.write("INFO: Getting list of processes using %s\n" % ps)
ret = callo([ps, "/FO", "CSV"])
b = StringIO.StringIO(ret)
csvReader = csv.reader(b)
p = [[int(row[1]), row[0]] for row in csvReader if row[1] != "PID"]
sys.stdout.write("INFO: Done analyzing process\n")
return p
# LLDB dumper is for MacOS X
class LLDBDumper(object):
def __find_debugger(self):
"""Finds the installed debugger"""
return find_program('lldb', ['/usr/bin'])
def dump_info(self, pid, process_name, stream, take_dump = False):
dbg = self.__find_debugger()
if dbg is None:
stream.write("WARNING: Debugger lldb not found, skipping dumping of %d\n" % (pid))
return
stream.write("INFO: Debugger %s, analyzing %d\n" % (dbg, pid))
lldb_version = callo([dbg, "--version"])
stream.write(lldb_version)
# Do we have the XCode or LLVM version of lldb?
# Old versions of lldb do not work well when taking commands via a file
# XCode (7.2): lldb-340.4.119
# LLVM - lldb version 3.7.0 ( revision )
if 'version' not in lldb_version:
# We have XCode's lldb
lldb_version = lldb_version[lldb_version.index("lldb-"):]
lldb_version = lldb_version.replace('lldb-', '')
lldb_major_version = int(lldb_version[:lldb_version.index('.')])
if lldb_major_version < 340:
stream.write("WARNING: Debugger lldb is too old, please upgrade to XCode 7.2\n")
return
cmds = [
"attach -p %d" % pid,
"target modules list",
"thread backtrace all",
"process save-core dump_" + process_name + "." + str(pid) + "." + self.get_dump_ext() if take_dump else "",
"settings set interpreter.prompt-on-quit false",
"quit",
]
tf = tempfile.NamedTemporaryFile()
for c in cmds:
tf.write(c + "\n")
tf.flush()
# Works on in MacOS 10.9 & later
#call([dbg] + list( itertools.chain.from_iterable([['-o', b] for b in cmds])))
call(['cat', tf.name])
call([dbg, '--source', tf.name])
stream.write("INFO: Done analyzing process\n")
def get_dump_ext(self):
return "core"
def dump_core(self, pid, output_file):
"""Take a dump of pid to specified file"""
sys.stderr.write("ERROR: lldb does not support dumps, stupid debugger\n")
class DarwinProcessList(object):
def __find_ps(self):
"""Finds ps"""
return find_program('ps', ['/bin'])
def dump_processes(self):
"""Get list of [Pid, Process Name]"""
ps = self.__find_ps()
sys.stdout.write("INFO: Getting list of processes using %s\n" % ps)
ret = callo([ps, "-axco", "pid,comm"])
b = StringIO.StringIO(ret)
csvReader = csv.reader(b, delimiter=' ', quoting=csv.QUOTE_NONE, skipinitialspace=True)
p = [[int(row[0]), row[1]] for row in csvReader if row[0] != "PID"]
sys.stdout.write("INFO: Done analyzing process\n")
return p
# GDB dumper is for Linux & Solaris
class GDBDumper(object):
def __find_debugger(self):
"""Finds the installed debugger"""
return find_program('gdb', ['/opt/mongodbtoolchain/gdb/bin', '/usr/bin'])
def dump_info(self, pid, process_name, stream, take_dump = False):
dbg = self.__find_debugger()
if dbg is None:
stream.write("WARNING: Debugger gdb not found, skipping dumping of %d\n" % (pid))
return
stream.write("INFO: Debugger %s, analyzing %d\n" % (dbg, pid))
call([dbg, "--version"])
cmds = [
"set pagination off",
"attach %d" % pid,
"info sharedlibrary",
"thread apply all bt",
"gcore dump_" + process_name + "." + str(pid) + "." + self.get_dump_ext() if take_dump else "",
"set confirm off",
"quit",
]
call([dbg, "--quiet"] + list( itertools.chain.from_iterable([['-ex', b] for b in cmds])))
stream.write("INFO: Done analyzing process\n")
def get_dump_ext(self):
return "core"
def _find_gcore(self):
"""Finds the installed gcore"""
dbg = "/usr/bin/gcore"
if os.path.exists(dbg):
return dbg
return None
def dump_core(self, pid, output_file):
"""Take a dump of pid to specified file"""
dbg = self._find_gcore()
if dbg is None:
sys.stdout.write("WARNING: Debugger gcore not found, skipping dumping of %d to %s\n" % (pid, output_file))
return
sys.stdout.write("INFO: Debugger %s, analyzing %d to %s\n" % (dbg, pid, output_file))
call([dbg, "-o", output_file, str(pid)])
sys.stdout.write("INFO: Done analyzing process\n")
# GCore appends the pid to the output file name
return output_file + "." + str(pid)
class LinuxProcessList(object):
def __find_ps(self):
"""Finds ps"""
return find_program('ps', ['/bin', '/usr/bin'])
def dump_processes(self):
"""Get list of [Pid, Process Name]"""
ps = self.__find_ps()
sys.stdout.write("INFO: Getting list of processes using %s\n" % ps)
call([ps, "--version"])
ret = callo([ps, "-eo", "pid,args"])
b = StringIO.StringIO(ret)
csvReader = csv.reader(b, delimiter=' ', quoting=csv.QUOTE_NONE, skipinitialspace=True)
p = [[int(row[0]), os.path.split(row[1])[1]] for row in csvReader if row[0] != "PID"]
sys.stdout.write("INFO: Done analyzing process\n")
return p
class SolarisProcessList(object):
def __find_ps(self):
"""Finds ps"""
return find_program('ps', ['/bin', '/usr/bin'])
def dump_processes(self):
"""Get list of [Pid, Process Name]"""
ps = self.__find_ps()
sys.stdout.write("INFO: Getting list of processes using %s\n" % ps)
ret = callo([ps, "-eo", "pid,args"])
b = StringIO.StringIO(ret)
csvReader = csv.reader(b, delimiter=' ', quoting=csv.QUOTE_NONE, skipinitialspace=True)
p = [[int(row[0]), os.path.split(row[1])[1]] for row in csvReader if row[0] != "PID"]
sys.stdout.write("INFO: Done analyzing process\n")
return p
# jstack is a JDK utility
class JstackDumper(object):
def __find_debugger(self):
"""Finds the installed jstack debugger"""
return find_program('jstack', ['/usr/bin'])
def dump_info(self, pid, process_name, stream, take_dump = False):
"""Dump java thread stack traces to the console"""
jstack = self.__find_debugger()
if jstack is None:
stream.write("WARNING: Debugger jstack not found, skipping dumping of %d\n" % (pid))
return
stream.write("INFO: Debugger %s, analyzing %d\n" % (jstack, pid))
call([jstack, "-l", str(pid)])
stream.write("INFO: Done analyzing process\n")
def dump_core(self, pid, output_file):
"""Take a dump of pid to specified file"""
sys.stderr.write("ERROR: jstack does not support dumps\n")
# jstack is a JDK utility
class JstackWindowsDumper(object):
def dump_info(self, pid, process_name, stream):
"""Dump java thread stack traces to the console"""
stream.write("WARNING: Debugger jstack not supported, skipping dumping of %d\n" % (pid))
def get_hang_analyzers():
dbg = None
jstack = None
ps = None
if sys.platform.startswith("linux"):
dbg = GDBDumper()
jstack = JstackDumper()
ps = LinuxProcessList()
elif sys.platform.startswith("sunos"):
dbg = GDBDumper()
jstack = JstackDumper()
ps = SolarisProcessList()
elif os.name == 'nt' or (os.name == "posix" and sys.platform == "cygwin"):
dbg = WindowsDumper()
jstack = JstackWindowsDumper()
ps = WindowsProcessList()
elif sys.platform == "darwin":
dbg = LLDBDumper()
jstack = JstackDumper()
ps = DarwinProcessList()
return [ps, dbg, jstack]
def check_dump_quota(quota, ext):
"""Check if sum of the files with ext is within the specified quota in megabytes"""
files = glob.glob("*." + ext)
size_sum = 0
for file_name in files:
size_sum += os.path.getsize(file_name)
return (size_sum <= quota)
def signal_process(pid, signalnum):
"""Signal process with signal, N/A on Windows"""
try:
os.kill(pid, signalnum)
print "Waiting for process to report"
time.sleep(5)
except OSError,e:
print "Hit OS error trying to signal process: " + str(e)
except AttributeError:
print "Cannot send signal to a process on Windows"
def timeout_protector():
print "Script timeout has been hit, terminating"
if sys.platform == "win32":
# Have the process exit with code 9 when it terminates itself to closely match the exit code
# of the process when it sends itself a SIGKILL.
handle = win32process.GetCurrentProcess()
win32process.TerminateProcess(handle, 9)
else:
os.kill(os.getpid(), signal.SIGKILL)
# Basic procedure
#
# 1. Get a list of interesting processes
# 2. Dump useful information or take dumps
def main():
print "Python Version: " + sys.version
print "OS: " + platform.platform()
try:
distro = platform.linux_distribution()
print "Linux Distribution: " + str(distro)
except AttributeError:
print "Cannot determine Linux distro since Python is too old"
try:
uid = os.getuid()
print "Current User: " + str(uid)
current_login = os.getlogin()
print "Current Login: " + current_login
except OSError:
print "Cannot determine Unix Current Login"
except AttributeError:
print "Cannot determine Unix Current Login, not supported on Windows"
interesting_processes = ["mongo", "mongod", "mongos", "_test", "dbtest", "python", "java"]
go_processes = []
parser = OptionParser(description=__doc__)
parser.add_option('-p', '--process_names', dest='process_names', help='List of process names to analyze')
parser.add_option('-g', '--go_process_names', dest='go_process_names', help='List of go process names to analyze')
parser.add_option('-s', '--max_core_dumps_size', dest='max_core_dumps_size', default=10000, help='Maximum total size of core dumps to keep in megabytes')
(options, args) = parser.parse_args()
if options.process_names is not None:
interesting_processes = options.process_names.split(',')
if options.go_process_names is not None:
go_processes = options.go_process_names.split(',')
interesting_processes += go_processes
[ps, dbg, jstack] = get_hang_analyzers()
if( ps == None or (dbg == None and jstack == None)):
sys.stderr.write("hang_analyzer.py: Unsupported platform: %s\n" % (sys.platform))
exit(1)
# Make sure the script does not hang
timer = threading.Timer(120, timeout_protector)
timer.start()
processes_orig = ps.dump_processes()
# Find all running interesting processes by doing a substring match.
processes = [a for a in processes_orig
if any([a[1].find(ip) >= 0 for ip in interesting_processes]) and a[0] != os.getpid()]
sys.stdout.write("Found %d interesting processes\n" % len(processes))
max_dump_size_bytes = int(options.max_core_dumps_size) * 1024 * 1024
if( len(processes) == 0):
for process in processes_orig:
sys.stdout.write("Ignoring process %d of %s\n" % (process[0], process[1]))
else:
# Dump all other processes including go programs, except python & java.
for process in [a for a in processes if not re.match("^(java|python)", a[1])]:
sys.stdout.write("Dumping process %d of %s\n" % (process[0], process[1]))
dbg.dump_info(process[0], process[1], sys.stdout, check_dump_quota(max_dump_size_bytes, dbg.get_dump_ext()))
# Dump java processes using jstack.
for process in [a for a in processes if a[1].startswith("java")]:
sys.stdout.write("Dumping process %d of %s\n" % (process[0], process[1]))
jstack.dump_info(process[0], process[1], sys.stdout)
# Signal go processes to ensure they print out stack traces, and die on POSIX OSes.
# On Windows, this will simply kill the process since python emulates SIGABRT as
# TerminateProcess.
# Note: The stacktrace output may be captured elsewhere (i.e. resmoke).
for process in [a for a in processes if a[1] in go_processes]:
sys.stdout.write("Sending signal SIGABRT to go process %d of %s\n" % (process[0], process[1]))
signal_process(process[0], signal.SIGABRT)
# Dump python processes after signalling them.
for process in [a for a in processes if a[1].startswith("python")]:
sys.stdout.write("Sending signal SIGUSR1 to python process %d of %s\n" % (process[0], process[1]))
signal_process(process[0], signal.SIGUSR1)
dbg.dump_info(process[0], process[1], sys.stdout, check_dump_quota(max_dump_size_bytes, dbg.get_dump_ext()))
# Suspend the timer so we can exit cleanly
timer.cancel()
sys.stdout.write("Done analyzing processes for hangs\n")
if __name__ == "__main__":
main()
| 33.99469
| 157
| 0.614568
|
c0c669d109a5dcdeff2a354b0027abc710898a0a
| 2,716
|
py
|
Python
|
hackerearth/Algorithms/Where is Checkerboard/test.py
|
HBinhCT/Q-project
|
a9876c09b0bab096ef0c772edfa05427ae091e03
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/Where is Checkerboard/test.py
|
HBinhCT/Q-project
|
a9876c09b0bab096ef0c772edfa05427ae091e03
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/Where is Checkerboard/test.py
|
HBinhCT/Q-project
|
a9876c09b0bab096ef0c772edfa05427ae091e03
|
[
"MIT"
] | null | null | null |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'42 42',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____',
'_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
'__________________________________________',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), '1\n')
if __name__ == '__main__':
unittest.main()
| 43.806452
| 53
| 0.571429
|
db54390e6f992ce8ac6c02b0c5d6e04d50097a89
| 1,375
|
py
|
Python
|
chatterbot/ext/telegram_chatterbot/chatterbot.py
|
19-1-skku-oss/2019-1-OSS-L3
|
46234df5431661a4c664f420e2f99ceccc4b39f7
|
[
"BSD-3-Clause"
] | 2
|
2019-04-16T10:15:39.000Z
|
2019-04-16T10:18:41.000Z
|
chatterbot/ext/telegram_chatterbot/chatterbot.py
|
19-1-skku-oss/2019-1-OSS-L3
|
46234df5431661a4c664f420e2f99ceccc4b39f7
|
[
"BSD-3-Clause"
] | 22
|
2019-05-28T07:13:25.000Z
|
2019-06-11T07:16:51.000Z
|
chatterbot/ext/telegram_chatterbot/chatterbot.py
|
19-1-skku-oss/2019-1-OSS-L3
|
46234df5431661a4c664f420e2f99ceccc4b39f7
|
[
"BSD-3-Clause"
] | 12
|
2019-05-29T11:20:15.000Z
|
2022-02-08T06:30:26.000Z
|
from chatterbot import ChatBot as ChatterBot
from chatterbot.conversation import Statement
class ChatBot:
def __init__(self, name='', chatbot=None):
self.name = name
self.chatbot = self.default_bot(name) if chatbot is None else chatbot
def get_name(self):
return self.name
def learn(self, ask_text, response_text):
input_statement = Statement(ask_text)
correct_response = Statement(response_text)
bot.learn_response(correct_response, input_statement)
def response(self, input_text):
return self.chatbot.get_response(input_text).text
def default_bot(self, name):
bot = ChatterBot(
name,
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch'
},
{
'import_path': 'chatterbot.logic.SpecificResponseAdapter',
},
{
'import_path': 'chatterbot.logic.MathematicalEvaluation',
},
{
'import_path': 'chatterbot.logic.TimeLogicAdapter',
},
{
'import_path': 'chatterbot.logic.UnitConversion',
}
]
)
return bot
| 31.25
| 78
| 0.554909
|
5284c5b2a8bbec081d164a97cca212e28cf62db1
| 7,783
|
py
|
Python
|
tests/test_file.py
|
ActivisionGameScience/assertpy
|
c0989de171bcf3e21dbad9415ff9d3b8f5fe78fc
|
[
"BSD-3-Clause"
] | 246
|
2015-01-14T01:40:03.000Z
|
2021-08-03T02:50:50.000Z
|
tests/test_file.py
|
ActivisionGameScience/assertpy
|
c0989de171bcf3e21dbad9415ff9d3b8f5fe78fc
|
[
"BSD-3-Clause"
] | 98
|
2015-01-01T14:28:55.000Z
|
2019-11-14T21:36:18.000Z
|
tests/test_file.py
|
ActivisionGameScience/assertpy
|
c0989de171bcf3e21dbad9415ff9d3b8f5fe78fc
|
[
"BSD-3-Clause"
] | 54
|
2015-01-14T01:42:10.000Z
|
2019-11-18T10:04:42.000Z
|
# Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import tempfile
from assertpy import assert_that,contents_of,fail
class TestFile(object):
def setup(self):
self.tmp = tempfile.NamedTemporaryFile()
self.tmp.write('foobar'.encode('utf-8'))
self.tmp.seek(0)
def teardown(self):
self.tmp.close()
def test_contents_of_path(self):
contents = contents_of(self.tmp.name)
assert_that(contents).is_equal_to('foobar').starts_with('foo').ends_with('bar')
def test_contents_of_path_ascii(self):
contents = contents_of(self.tmp.name, 'ascii')
assert_that(contents).is_equal_to('foobar').starts_with('foo').ends_with('bar')
def test_contents_of_return_type(self):
if sys.version_info[0] == 3:
contents = contents_of(self.tmp.name)
assert_that(contents).is_type_of(str)
else:
contents = contents_of(self.tmp.name)
assert_that(contents).is_type_of(unicode)
def test_contents_of_return_type_ascii(self):
if sys.version_info[0] == 3:
contents = contents_of(self.tmp.name, 'ascii')
assert_that(contents).is_type_of(str)
else:
contents = contents_of(self.tmp.name, 'ascii')
assert_that(contents).is_type_of(str)
def test_contents_of_file(self):
contents = contents_of(self.tmp.file)
assert_that(contents).is_equal_to('foobar').starts_with('foo').ends_with('bar')
def test_contents_of_file_ascii(self):
contents = contents_of(self.tmp.file, 'ascii')
assert_that(contents).is_equal_to('foobar').starts_with('foo').ends_with('bar')
def test_contains_of_bad_type_failure(self):
try:
contents_of(123)
fail('should have raised error')
except ValueError as ex:
assert_that(str(ex)).is_equal_to('val must be file or path, but was type <int>')
def test_contains_of_missing_file_failure(self):
try:
contents_of('missing.txt')
fail('should have raised error')
except IOError as ex:
assert_that(str(ex)).contains_ignoring_case('no such file')
def test_exists(self):
assert_that(self.tmp.name).exists()
def test_exists_failure(self):
try:
assert_that('missing.txt').exists()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <missing.txt> to exist, but was not found.')
def test_exists_bad_val_failure(self):
try:
assert_that(123).exists()
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a path')
def test_does_not_exist(self):
assert_that('missing.txt').does_not_exist()
def test_does_not_exist_failure(self):
try:
assert_that(self.tmp.name).does_not_exist()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <{}> to not exist, but was found.'.format(self.tmp.name))
def test_does_not_exist_bad_val_failure(self):
try:
assert_that(123).does_not_exist()
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val is not a path')
def test_is_file(self):
assert_that(self.tmp.name).is_file()
def test_is_file_exists_failure(self):
try:
assert_that('missing.txt').is_file()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <missing.txt> to exist, but was not found.')
def test_is_file_directory_failure(self):
try:
dirname = os.path.dirname(self.tmp.name)
assert_that(dirname).is_file()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches('Expected <.*> to be a file, but was not.')
def test_is_directory(self):
dirname = os.path.dirname(self.tmp.name)
assert_that(dirname).is_directory()
def test_is_directory_exists_failure(self):
try:
assert_that('missing_dir').is_directory()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <missing_dir> to exist, but was not found.')
def test_is_directory_file_failure(self):
try:
assert_that(self.tmp.name).is_directory()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches('Expected <.*> to be a directory, but was not.')
def test_is_named(self):
basename = os.path.basename(self.tmp.name)
assert_that(self.tmp.name).is_named(basename)
def test_is_named_failure(self):
try:
assert_that(self.tmp.name).is_named('foo.txt')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches('Expected filename <.*> to be equal to <foo.txt>, but was not.')
def test_is_named_bad_arg_type_failure(self):
try:
assert_that(self.tmp.name).is_named(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).matches('given filename arg must be a path')
def test_is_child_of(self):
dirname = os.path.dirname(self.tmp.name)
assert_that(self.tmp.name).is_child_of(dirname)
def test_is_child_of_failure(self):
try:
assert_that(self.tmp.name).is_child_of('foo_dir')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches('Expected file <.*> to be a child of <.*/foo_dir>, but was not.')
def test_is_child_of_bad_arg_type_failure(self):
try:
assert_that(self.tmp.name).is_child_of(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).matches('given parent directory arg must be a path')
| 39.912821
| 112
| 0.667223
|
a64355f8b84e97edd01e6595ac570ac17f27cdf5
| 28,194
|
py
|
Python
|
Utils/plot.py
|
kostyanoob/Power
|
df71c086e81966653674f74dc4eeb562e8eeadc0
|
[
"MIT"
] | 8
|
2021-02-15T07:34:31.000Z
|
2022-02-27T17:30:46.000Z
|
Utils/plot.py
|
kostyanoob/Power
|
df71c086e81966653674f74dc4eeb562e8eeadc0
|
[
"MIT"
] | 4
|
2021-05-27T10:32:38.000Z
|
2022-02-10T03:46:07.000Z
|
Utils/plot.py
|
kostyanoob/Power
|
df71c086e81966653674f74dc4eeb562e8eeadc0
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib
import matplotlib.patches as mpatches
from matplotlib.text import TextPath
matplotlib.use('Agg')
import matplotlib.pylab as pylab
import matplotlib.font_manager
import sys
import os
from matplotlib import colors as mcolors
from scipy.signal import savgol_filter
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
def savefig(outputDir, filename, dpi=300, save_copy_as_eps=True, eps_dpi=1000, verbose=True):
"""
Saves the current figure to an image file.
:param outputDir: <str> a path to the directory where the new file will be created
:param filename: <str> the name of the filename to be created. Can be specified with an extension
such as "image1.jpg" and then the format will be inferred. Otherwise, when
no extension is specified - c.f. "image1", the default PNG format is used.
:param dpi: <int> (default 300) the dpi resolution of the image to be saved
:param save_copy_as_eps: <bool> (default True) True iff an eps (vector-graphics) copy should be created
* Removes all commas from the filename, except for the last comma - crucial for the LaTex *
:param eps_dpi: <int> (default 1000) the dpi resolution of the eps image to be saved
:param verbose: <bool> True iff messages regarding file saving success should be displayed.
:return: nothing
"""
# pylab.rcParams.update({'figure.autolayout': True})
assert(len(filename)>0)
filename_list = filename.split(".")
if len(filename_list) >= 1 and not(filename_list[-1] in pylab.gcf().canvas.get_supported_filetypes().keys()):
filename = filename + ".png"
pylab.savefig(os.path.join(outputDir, filename), dpi=dpi, bbox_inches = "tight")
if verbose:
print("Saved plot to:" + os.path.join(outputDir, filename))
if save_copy_as_eps:
filename_list = filename.split(".")
if len(filename_list) > 1 and filename_list[-1] in pylab.gcf().canvas.get_supported_filetypes().keys():
filename_list[-1] = ".eps"
elif len(filename_list) == 1:
filename_list.append(".eps")
else:
raise Exception("Could not store the eps image: Illegal filename")
filename_eps = "".join(filename_list)
pylab.savefig(os.path.join(outputDir, filename_eps), format='eps', dpi=eps_dpi, bbox_inches = "tight")
if verbose:
print("Saved plot to:" + os.path.join(outputDir, filename_eps))
def plotOneLine(yAxisLst, xAxisLst, xtitleStr, ytitleStr, titleStr, outputDir, filename, stds=None,
customPointAnnotation=None, verbose=False):
'''
Creates a single line plot and stores it to a file.
: param stds - if is none, then the plot is a simple line, otherwise it
is interpreted as a list of standard deviations and added to the plot
: param customPointAnnotation - set to a particular x value to mark the plot-line
with an arrow at this x value.
can be a list - if multiple annotated points are deisred
'''
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
fig = pylab.figure()
pylab.xlabel(xtitleStr)
pylab.ylabel(ytitleStr)
pylab.suptitle(titleStr)
if stds is None:
pylab.plot(xAxisLst, yAxisLst)
else:
pylab.errorbar(xAxisLst, yAxisLst, stds, ecolor="#AAAAAA")
axes = pylab.gca()
axes.set_xlim([min(xAxisLst), max(xAxisLst)])
if not(customPointAnnotation is None):
if type(customPointAnnotation)!=list:
customPointAnnotation = [customPointAnnotation]
for pt in customPointAnnotation:
if (pt >= min(xAxisLst) or pt <=max(xAxisLst)):
annotation_mark_x = pt
annotation_mark_y = min(yAxisLst)
pylab.plot([annotation_mark_x], [annotation_mark_y], '^', markersize=5)
pylab.annotate("", xy=(annotation_mark_x, annotation_mark_y),
arrowprops=dict(facecolor='orange', shrink=0.05))
else:
ld("Warning: cannot annotate plot at point {}, since it's out of the range of X-axis".format(pt))
# axes.set_ylim([min(lossVector),max(lossVector)+0.1])
if min(yAxisLst) != 0 and max(yAxisLst) / min(yAxisLst) > 1000:
axes.set_yscale('log')
try:
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True, verbose=verbose)
except:
pass
pylab.close(fig)
def plotTwoLines(trainAcc, validAcc, xAxisLst, xtitleStr, ytitleStr, titleStr, outputDir, filename, isAnnotatedMax=False, isAnnotatedMin=False,
trainStr='Train', validStr='Validation', customPointAnnotation=None):
'''
: param customPointAnnotation - set to a particular x value to mark both of the plot-lines
with an arrow at this x value.
can be a list - if multiple annotated points are deisred
Legend "loc" arguments:
'best' : 0, (only implemented for axes legends)
'upper right' : 1,
'upper left' : 2, <--- we chose it
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
'''
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
fig = pylab.figure()
pylab.xlabel(xtitleStr)
pylab.ylabel(ytitleStr)
pylab.suptitle(titleStr)
plot_train, = pylab.plot(xAxisLst, trainAcc, label=trainStr, linestyle="--")
plot_valid, = pylab.plot(xAxisLst, validAcc, label=validStr, linestyle="-")
pylab.legend([plot_train, plot_valid], [trainStr, validStr], loc=0)
if (isAnnotatedMax or isAnnotatedMin):
annotationIdx = np.argmax(validAcc) if isAnnotatedMax else np.argmin(validAcc)
annotationVal = validAcc[annotationIdx]
minAcc_total = min(min(validAcc), min(trainAcc))
maxAcc_total = max(max(validAcc), max(trainAcc))
stry = validStr + " %.1f%%" % annotationVal + " at epoch " + str((annotationIdx + 1))
pylab.plot([annotationIdx + 1], [annotationVal], 'o')
pylab.annotate(stry, xy=(annotationIdx + 1, annotationVal),
xytext=(annotationIdx + 1 - len(xAxisLst) * 0.25, annotationVal - (maxAcc_total - minAcc_total) / 10),
arrowprops=dict(facecolor='orange', shrink=0.05))
if not(customPointAnnotation is None):
if type(customPointAnnotation)!=list:
customPointAnnotation = [customPointAnnotation]
for pt in customPointAnnotation:
if (pt in xAxisLst):
pylab.plot([pt, pt], [trainAcc[pt], validAcc[pt]], 'o', markersize=3)
pylab.annotate("", xy=(pt, trainAcc[pt]),
arrowprops=dict(facecolor='orange', shrink=0.05))
pylab.annotate("", xy=(pt, validAcc[pt]),
arrowprops=dict(facecolor='orange', shrink=0.05))
else:
ld("Warning: cannot annotate plot at point {}, since it's out of the range of X-axis".format(pt))
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True)
pylab.close(fig)
def plotBars(matrix_of_bars, list_of_labels, xtitleStr, ytitleStr, titleStr, outputDir, filename):
'''
Displays the average of each row and its std - as a bar.
:param listOfbar_lists: a 2D array, each row representing a bar measurments
:param list_of_labels: a list of strings,
corresponding to the number of rows in the matrix_of_bars.
:return:
'''
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
badInput = True
try:
badInput = len(matrix_of_bars.shape) != 2
except Exception:
badInput = True
if badInput:
raise Exception("Error at plotBars: matrix_of_bars argument must be a 2D numpy array")
n_groups = matrix_of_bars.shape[0]
badInput = True
try:
badInput = len(list_of_labels) != n_groups
except Exception:
badInput = True
if badInput:
raise Exception("Error at plotBars: list_of_labels argument must contain enough labels for all the rows in the a matrix_of_bars")
means = np.mean(matrix_of_bars, axis=1)
stds = np.std(matrix_of_bars, axis=1)
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
fig = pylab.figure()
rects1 = pylab.bar(index, means, bar_width,
alpha=opacity,
color='b',
yerr=stds,
error_kw=error_config)
#label='Accuracy')
pylab.xlabel(xtitleStr)
pylab.ylabel(ytitleStr)
pylab.title(titleStr)
pylab.xticks(index + bar_width / 2, list_of_labels, rotation = 'vertical')
pylab.legend()
pylab.tight_layout()
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True)
pylab.close(fig)
def plotManyLines(common_x_lst, y_lst_of_lists, legends_lst, xtitleStr, ytitleStr, titleStr,
outputDir, filename, extras_dict=None, customPointAnnotation=None,
std_lst_of_lists=None):
'''
for python 3.5
:param common_x_lst:
:param y_lst_of_lists:
:param legends_lst:
:param xtitleStr:
:param ytitleStr:
:param titleStr:
:param outputDir:
:param filename:
:return:
'''
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
if 'font_size' in extras_dict:
pylab.rcParams.update({'font.size': extras_dict['font_size']})
fig = pylab.figure()
pylab.xlabel(xtitleStr)
pylab.ylabel(ytitleStr)
pylab.suptitle(titleStr)
colors = ['r','g','b','c','m','y','k',"#96f97b",'#ae7181','#0504aa', '#c1f80a', '#b9a281', '#ff474c'][:len(legends_lst)]
if extras_dict is None:
linewidth_list = [2]*len(y_lst_of_lists)
legend_location = 0
pylab.yscale('linear')
marker_list = ["."] * len(y_lst_of_lists)
else:
linewidth_list = extras_dict["linewidth_list"]
legend_location = extras_dict["legend_location"]
if 'y_axis_scale' in extras_dict:
pylab.yscale(extras_dict["y_axis_scale"])
if 'marker_list' not in extras_dict:
marker_list = ['s', 'x', '*', '^', 'p', 'D', '>', '<', '+','o','.'][:len(legends_lst)] if 'marker_list' not in extras_dict else extras_dict['extras_dict']
else:
marker_list = extras_dict['marker_list']
if std_lst_of_lists is None or 'kill_errorbar' in extras_dict and extras_dict['kill_errorbar']:
axes = [pylab.plot(common_x_lst, y_lst, label=legenda, linewidth=linewidth, color=cllr, marker=marker) for cllr,y_lst,marker,linewidth,legenda in zip(colors,y_lst_of_lists,marker_list,linewidth_list,legends_lst)]
else:
axes = [pylab.errorbar(common_x_lst, y_lst, std_, label=legenda, linewidth=linewidth, color=cllr, marker=marker, elinewidth=errorLineSize, capsize=errorLineSize)
for cllr, y_lst, std_,marker, linewidth, legenda, errorLineSize in zip(colors, y_lst_of_lists, std_lst_of_lists, marker_list,linewidth_list, legends_lst, list(range(2,len(legends_lst)+2)))]
#pylab.legend(handles = [mpatches.Patch(color =cllr, label=legenda) for cllr, legenda in zip(colors,legends_lst)])
pylab.legend(loc=legend_location, markerscale=2)
if not(customPointAnnotation is None):
if type(customPointAnnotation)!=list:
customPointAnnotation = [customPointAnnotation]
for pt in customPointAnnotation:
if (pt >= min(common_x_lst) or pt <=max(common_x_lst)):
annotation_mark_x = pt
annotation_mark_y = min([min(t) for t in y_lst_of_lists])
pylab.plot([annotation_mark_x], [annotation_mark_y], '^', markersize=5)
pylab.annotate("", xy=(annotation_mark_x, annotation_mark_y),
arrowprops=dict(facecolor='#AAAAAA', shrink=0.05))
else:
ld("Warning: cannot annotate plot at point {}, since it's out of the range of X-axis".format(pt))
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True)
pylab.close(fig)
def plotManyBars(common_x_lst, y_lst_of_lists, legends_lst, xtitleStr, ytitleStr, titleStr, outputDir, filename, customPointAnnotation=None, list_of_y_stds=None):
'''
Creates a bar plot with multiple
'''
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
fig = pylab.figure()
pylab.xlabel(xtitleStr)
pylab.ylabel(ytitleStr)
pylab.suptitle(titleStr)
#colors = list(mcolors.CSS4_COLORS.values())[14:14 + len(legends_lst)]
colors = ['r','g','b','c','m','y','k'][:len(legends_lst)]
if type(common_x_lst) == list:
common_x_lst = np.array(common_x_lst)
elif type(common_x_lst) != np.ndarray:
raise Exception("Bad common_x_lst. Must be either list of numpy 1D array.")
nBarsPerFamily = len(common_x_lst)
nFamilies = len(y_lst_of_lists)
nTotalBars = nFamilies * nBarsPerFamily
span = common_x_lst.max() - common_x_lst.min()
bar_width_including_delimiters = span / (nTotalBars)
cluster_width = nFamilies * bar_width_including_delimiters
delimiter_width = 0.1 * cluster_width
bar_width = bar_width_including_delimiters-delimiter_width/nFamilies
families_X_offset = cluster_width*0.9/2
ld("BPF:{} nFamilies:{} nTotalBars:{} span:{} bar_width:{}".format(nBarsPerFamily,nFamilies,nTotalBars,span,bar_width))
opacity = 0.7
if not list_of_y_stds is None:
error_config = {'ecolor': '0.3'}
axes = [pylab.bar(common_x_lst - families_X_offset + i*(delimiter_width*int(i==0)+bar_width), y_lst, bar_width,
alpha=opacity, color=cllr,
yerr=std_t, error_kw=error_config,
label=lgnd) for cllr, y_lst, lgnd, std_t,i in zip(colors, y_lst_of_lists, legends_lst, list_of_y_stds,range(len(y_lst_of_lists)))]
else:
axes = [pylab.bar(common_x_lst - families_X_offset +cluster_width/2 + i*(delimiter_width*int(i==0)+bar_width), y_lst, bar_width,
alpha=opacity, color=cllr,
label=lgnd) for cllr, y_lst, lgnd,i in zip(colors, y_lst_of_lists, legends_lst,range(len(y_lst_of_lists)))]
pylab.legend(handles=[mpatches.Patch(color=cllr, label=legenda, alpha=opacity) for cllr, legenda in zip(colors, legends_lst)])
if not(customPointAnnotation is None):
if type(customPointAnnotation)!=list:
customPointAnnotation = [customPointAnnotation]
for pt in customPointAnnotation:
if (pt >= min(common_x_lst) or pt <=max(common_x_lst)):
annotation_mark_x = pt
annotation_mark_y = min([min(t) for t in y_lst_of_lists])
pylab.plot([annotation_mark_x], [annotation_mark_y], '^', markersize=5)
pylab.annotate("", xy=(annotation_mark_x, annotation_mark_y),
arrowprops=dict(facecolor='#AAAAAA', shrink=0.05))
else:
ld("Warning: cannot annotate plot at point {}, since it's out of the range of X-axis".format(pt))
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True)
pylab.close(fig)
def plotListOfPlots(x_lst_of_lists, y_lst_of_lists, legends_lst, xtitleStr, ytitleStr, titleStr, outputDir, filename, lpf=None, colorLst=None, fontsize=None, showGrid=False):
'''
:param lpf: the window-length of averaging. This is used for smoothing, and implemented by the Savitzky-Golay
filter.
'''
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
fig = pylab.figure()
pylab.xlabel(xtitleStr, fontsize=fontsize)
pylab.ylabel(ytitleStr, fontsize=fontsize)
if not titleStr is None and titleStr != "":
pylab.suptitle(titleStr)
#colors = list(mcolors.CSS4_COLORS.values())
if colorLst is None:
colorLst = ['red', 'orange', 'green', 'blue', 'darkblue', 'purple', 'black', 'yellow']
if lpf != None:
y_lst_of_lists_new =[savgol_filter(np.array(data), lpf, 1) for data in y_lst_of_lists]
y_lst_of_lists = y_lst_of_lists_new
if not fontsize is None:
#matplotlib.rcParams.update({'font.size': fontsize})
##matplotlib.rc('xtick', labelsize=fontsize)
#matplotlib.rc('ytick', labelsize=fontsize)
# pylab.rc('font', size=fontsize) # controls default text sizes
# pylab.rc('axes', titlesize=fontsize) # fontsize of the axes title
# pylab.rc('axes', labelsize=fontsize) # fontsize of the x and y labels
# pylab.rc('xtick', labelsize=fontsize) # fontsize of the tick labels
# pylab.rc('ytick', labelsize=fontsize) # fontsize of the tick labels
# pylab.rc('legend', fontsize=fontsize) # legend fontsize
# pylab.rc('figure', titlesize=fontsize) # fontsize of the figure title
pass
axes = [pylab.plot(x_lst, y_lst, color=cllr, linewidth=3.0) for cllr, x_lst,y_lst in zip(colorLst, x_lst_of_lists, y_lst_of_lists)]
if not legends_lst is None and len(legends_lst) == len(x_lst_of_lists):
pylab.legend(handles = [mpatches.Patch(color =cllr, label=legenda) for cllr, legenda in zip(colorLst, legends_lst)])
#pylab.legend(axes, legends_lst, loc=0) # old legend generation
if showGrid:
pylab.gca().grid(True, which='both', linestyle=':')
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True)
pylab.close(fig)
def plotListOfScatters(x_lst_of_lists, y_lst_of_lists, legends_lst, xtitleStr, ytitleStr, titleStr, outputDir, filename, extras_dict={}):
"""
:param x_lst_of_lists:
:param y_lst_of_lists:
:param legends_lst:
:param xtitleStr:
:param ytitleStr:
:param titleStr:
:param outputDir:
:param filename:
:param extras_dict: can contain weird options such as
'legend_location' : 0,1,2,3,4... - <int> sets the location to be other than just "0", which is the "best" automatic
'marker_list' : <list> which must be of the size of the number of different plots in the figure. Describing a marker that will be applied for each plot.
:return:
"""
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
fig = pylab.figure()
pylab.xlabel(xtitleStr)
pylab.ylabel(ytitleStr)
if titleStr != "":
pylab.suptitle(titleStr)
"""
#colors = list(mcolors.CSS4_COLORS.values())[14:14 + len(legends_lst)]
markersize=10
subsampleFactor=4
colors = ['g','b','c','r','m','y','k',"#96f97b",'#ae7181','#0504aa', '#c1f80a', '#b9a281', '#ff474c'][:len(legends_lst)]
markerLst = ['s','x','+','*', '^', 'p', 'D', '>', '<']
axes = [pylab.plot(common_x_lst[::subsampleFactor], y_lst[::subsampleFactor], color=cllr, marker=mrkr, markersize=markersize) for cllr,mrkr,y_lst in zip(colors,markerLst,y_lst_of_lists)]
pylab.legend(handles = [mlines.Line2D([0], [0], color=cllr, marker=mrkr, lw=3.0, markersize=markersize+2, label=legenda) for cllr,mrkr,legenda in zip(colors,markerLst,legends_lst)])
"""
# extract extra fancy features for the plot
legend_location = 0 if 'legend_location' not in extras_dict else extras_dict['legend_location']
marker_list = ["."]*len(x_lst_of_lists) if 'marker_list' not in extras_dict else extras_dict['marker_list']
marker_scaler_list = [2] * len(x_lst_of_lists) if 'marker_scaler_list' not in extras_dict else extras_dict['marker_scaler_list']
# import pdb
# pdb.set_trace()
colorVocabulary = ['red', 'orange', 'green', 'lightgreen', 'darkblue', 'cyan', 'purple', 'pink', 'black', 'gray', 'brown', 'darkred']
colorLst = [(cllr,'none') if i%2==1 else ('none',cllr) for (i,cllr) in enumerate(colorVocabulary)]
if 'font_size' in extras_dict:
matplotlib.rcParams.update({'font.size': extras_dict['font_size']})
matplotlib.rcParams['legend.fontsize'] = extras_dict['font_size']
# create the actual plots
axes = [pylab.scatter(x_lst, y_lst, s=marker_size, facecolors=cllr[0], edgecolors=cllr[1], marker=marker) for x_lst,y_lst,cllr,marker,marker_size in zip(x_lst_of_lists, y_lst_of_lists,colorLst,marker_list,marker_scaler_list)]
pylab.legend(axes, legends_lst, loc=legend_location, markerscale=2)
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True, verbose=False)
pylab.close(fig)
def plotBetweens(y_list, xAxisLst, xtitleStr, ytitleStr, titleStr, legendStrLst, outputDir, filename, verbose=True, hsv_colors=False):
# type: (list, list, str, str, str, list, str, str, bool, bool) -> None
'''
The y_list constains multiple y-values, all consistent with the xAxisLst ticks
The plot will consist of len(Y_list) plots, with a different color fill between two consecutive plots.
Pre-conditions:
1) The first array (or list) in y_list is assumed to be the lowest one in its height.
2) The first array (or list) in y_list will be filled downwards till the x-axis.
y
^ _________ y_list[2]
| /
| color=white / color2
| /
| ______/___________ y_list[1]
| /
|_____________________/ color1
| ______________________ y_list[0]
| color1 _______/
| / color0
|_________/
------------------------------------------->x
Legend "loc" arguments:
'best' : 0, (only implemented for axes legends)
'upper right' : 1,
'upper left' : 2, <--- we chose it
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
'''
if verbose:
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
fig = pylab.figure(figsize=(8,2.25))
pylab.xlabel(xtitleStr)
pylab.ylabel(ytitleStr)
pylab.suptitle(titleStr)
# Add the artificial "zero-plot" to be the reference plot
zerolst = len(xAxisLst) * [0]
y_arr_with_zerolst = [zerolst] + y_list
axis_lst = [pylab.plot(xAxisLst, zerolst, "k-")]
legend_handles = []
# Color list for assining different colors to different fills
# from __future__ import division
#
# import matplotlib.pyplot as plt
# from matplotlib import colors as mcolors
if hsv_colors:
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
color_lst = [name for hsv, name in by_hsv]
else:
color_lst = ['tab:black', 'tab:orange', 'tab:red', 'tab:brown', 'tab:purple', 'tab:pink', 'tab:olive',
'tab:green','tab:pink', 'tab:olive', 'tab:green','tab:gray', 'tab:cyan', 'tab:blue']
num_plots = len(y_arr_with_zerolst)
num_colors = len(color_lst)
# Add the plot-lines and fill the space between each to consequitive ones
for i in range(1, num_plots):
fill_color = color_lst[i % min(num_plots, num_colors)]
axis_lst.append(pylab.plot(xAxisLst, y_arr_with_zerolst[i], "k-", linewidth=0.2))
pylab.fill_between(xAxisLst, y_arr_with_zerolst[i], y_arr_with_zerolst[i - 1], facecolor=fill_color,
interpolate=True)
legend_handles.append(mpatches.Patch(color=fill_color, label=legendStrLst[i - 1]))
# Legends
pylab.legend(handles=legend_handles[::-1])#, loc=2)
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True, verbose=verbose)
pylab.close(fig)
#subsample the bar data
def subsample(x,y,factor):
new_x = []
new_y = []
for i in range(0,len(x),factor):
new_x.append(x[i])
new_y.append(0)
for j in range(factor):
new_y[-1] += y[i+j]
return new_x, new_y
def plotListOfPlots_and_Bars(x_lst_of_lists, y_lst_of_lists, legends_lst, xtitleStr, ytitleStr, titleStr, outputDir, filename, n_lines, lpf=None, colorLst=None, fontsize=None, showGrid=False):
'''
: param nlines indicates how many lines there are in the list. after this amount of lines - the bars begin
:param lpf: the window-length of averaging. This is used for smoothing, and implemented by the Savitzky-Golay
filter.
'''
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
fig = pylab.figure()
pylab.xlabel(xtitleStr, fontsize=fontsize)
pylab.ylabel(ytitleStr, fontsize=fontsize)
if not titleStr is None and titleStr != "":
pylab.suptitle(titleStr)
#colors = list(mcolors.CSS4_COLORS.values())
if colorLst is None:
colorLst = ['red', 'orange', 'green', 'blue', 'darkblue', 'purple', 'black']
if lpf != None:
y_lst_of_lists_new =[savgol_filter(np.array(data), lpf, 1) for data in y_lst_of_lists]
y_lst_of_lists = y_lst_of_lists_new
if not fontsize is None:
#matplotlib.rcParams.update({'font.size': fontsize})
##matplotlib.rc('xtick', labelsize=fontsize)
#matplotlib.rc('ytick', labelsize=fontsize)
# pylab.rc('font', size=fontsize) # controls default text sizes
# pylab.rc('axes', titlesize=fontsize) # fontsize of the axes title
# pylab.rc('axes', labelsize=fontsize) # fontsize of the x and y labels
# pylab.rc('xtick', labelsize=fontsize) # fontsize of the tick labels
# pylab.rc('ytick', labelsize=fontsize) # fontsize of the tick labels
# pylab.rc('legend', fontsize=fontsize) # legend fontsize
# pylab.rc('figure', titlesize=fontsize) # fontsize of the figure title
pass
axes = []
n_bars = len(y_lst_of_lists) - n_lines
subsample_bar_factor = 10 # by how much is the bar plot's resolution is lower than the reliable accuracy's
n_bins = len(y_lst_of_lists[0]) / subsample_bar_factor
ld("Using " + str(n_bins) + " bins and " + str(n_bars) + "bars")
bar_width = (max(x_lst_of_lists[0])-min(x_lst_of_lists[0])) / float(n_bars*n_bins)
bin_offset = bar_width*(n_bars/2)
for cllr, x_lst, y_lst,i in zip(colorLst, x_lst_of_lists, y_lst_of_lists, range(n_bars+n_lines)):
if i < n_lines:
axes.append(pylab.plot(x_lst, y_lst, color=cllr, linewidth=3.0))
else:
new_x, new_y = subsample(x_lst, y_lst, subsample_bar_factor)
axes.append(pylab.bar(x_lst-bin_offset+(i-n_lines)*bar_width, y_lst, color=cllr, align='center', width=bar_width))
if not legends_lst is None and len(legends_lst) == len(x_lst_of_lists):
pylab.legend(handles = [mpatches.Patch(color =cllr, label=legenda) for cllr, legenda in zip(colorLst, legends_lst)])
#pylab.legend(axes, legends_lst, loc=0) # old legend generation
if showGrid:
pylab.gca().grid(True, which='both', linestyle=':')
# Save to file both to the required format and to png
savefig(outputDir, filename, save_copy_as_eps=True)
pylab.close(fig)
| 45.843902
| 229
| 0.648791
|
f45392bcbe7d284a44770cd9a3b556cf0ba1478e
| 2,432
|
py
|
Python
|
src/pandemy/_datetime.py
|
antonlydell/Pandemy
|
e64e836ca35dff926ee6abf93cc2cd9aa2feb798
|
[
"MIT"
] | null | null | null |
src/pandemy/_datetime.py
|
antonlydell/Pandemy
|
e64e836ca35dff926ee6abf93cc2cd9aa2feb798
|
[
"MIT"
] | 1
|
2022-02-12T21:21:00.000Z
|
2022-02-12T21:21:00.000Z
|
src/pandemy/_datetime.py
|
antonlydell/Pandemy
|
e64e836ca35dff926ee6abf93cc2cd9aa2feb798
|
[
"MIT"
] | null | null | null |
"""Internal module that contains functions to handle datetime related operations."""
# ===============================================================
# Imports
# ===============================================================
# Standard Library
import logging
from typing import Optional
# Third Party
import pandas as pd
# Local
import pandemy
# ===============================================================
# Set Logger
# ===============================================================
# Initiate the module logger
# Handlers and formatters will be inherited from the root logger
logger = logging.getLogger(__name__)
# ===============================================================
# Functions
# ===============================================================
def datetime_columns_to_timezone(df: pd.DataFrame, localize_tz: str = 'UTC',
target_tz: Optional[str] = 'CET') -> None:
r"""Set a timezone to naive datetime columns.
Localize naive datetime columns of DataFrame `df` to the desired timezone.
Optionally convert the localized columns to desired target timezone.
Modifies DataFrame `df` inplace.
Parameters
----------
df : pd.DataFrame
The DataFrame with columns to convert to datetime.
localize_tz : str, default 'UTC'
Name of the timezone which to localize naive datetime columns into.
target_tz : str or None, default 'CET'
Name of the target timezone to convert datetime columns into after
they have been localized. If `target_tz` is None or `target_tz = `localize_tz`
no timezone conversion will be performed.
Returns
-------
None
Raises
------
pandemy.InvalidInputError
If an unknown timezone is supplied.
"""
# The datetime columns of the DataFrame
cols = df.select_dtypes(include=['datetime']).columns
for col in cols:
try:
df.loc[:, col] = df[col].dt.tz_localize(localize_tz)
if target_tz is not None or target_tz == localize_tz:
df.loc[:, col] = df[col].dt.tz_convert(target_tz)
except Exception as e:
raise pandemy.InvalidInputError(f'{type(e).__name__}: {e.args[0]}. '
f'localize_tz={localize_tz}, target_tz={target_tz}',
data=(e.args[0], localize_tz, target_tz)) from None
| 32.426667
| 96
| 0.536595
|
a54155ee4e32b18110e93ceb3a5064cd71e1fec0
| 2,489
|
py
|
Python
|
qa/rpc-tests/mempool_spendcoinbase.py
|
Patrick-W-McMahon/MagMellDollar
|
cb5a139e3a1d8f3196d7f1d25321d9839b51295b
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/mempool_spendcoinbase.py
|
Patrick-W-McMahon/MagMellDollar
|
cb5a139e3a1d8f3196d7f1d25321d9839b51295b
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/mempool_spendcoinbase.py
|
Patrick-W-McMahon/MagMellDollar
|
cb5a139e3a1d8f3196d7f1d25321d9839b51295b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Magmelldollar Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework.test_framework import MagmelldollarTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(MagmelldollarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| 38.890625
| 104
| 0.691844
|
04635d24125f9acefb530cb90b2fa6c94a4f5691
| 2,087
|
py
|
Python
|
migrations_azure/versions/65c5753b57e0_cm_354_add_signup_table.py
|
ThembiNsele/ClimateMind-Backend
|
0e418000b2a0141a1e4a7c11dbe3564082a3f4bb
|
[
"MIT"
] | 6
|
2020-08-20T10:49:59.000Z
|
2022-01-24T16:49:46.000Z
|
migrations_azure/versions/65c5753b57e0_cm_354_add_signup_table.py
|
ThembiNsele/ClimateMind-Backend
|
0e418000b2a0141a1e4a7c11dbe3564082a3f4bb
|
[
"MIT"
] | 95
|
2020-07-24T22:32:34.000Z
|
2022-03-05T15:01:16.000Z
|
migrations_azure/versions/65c5753b57e0_cm_354_add_signup_table.py
|
ThembiNsele/ClimateMind-Backend
|
0e418000b2a0141a1e4a7c11dbe3564082a3f4bb
|
[
"MIT"
] | 5
|
2020-07-30T17:29:09.000Z
|
2021-01-10T19:46:15.000Z
|
"""CM-354 add signup table
Revision ID: 65c5753b57e0
Revises: 153174730e99
Create Date: 2021-01-10 16:40:00.200713
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mssql
# revision identifiers, used by Alembic.
revision = "65c5753b57e0"
down_revision = "153174730e99"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"signup",
sa.Column("email", sa.String(length=254), nullable=False),
sa.Column("signup_timestamp", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("email"),
)
op.drop_index("ix_lrf_data_postal_code", table_name="lrf_data")
op.drop_table("lrf_data")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"lrf_data",
sa.Column("postal_code", sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column(
"http://webprotege.stanford.edu/R9vkBr0EApzeMGfa0rJGo9G",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
sa.Column(
"http://webprotege.stanford.edu/RJAL6Zu9F3EHB35HCs3cYD",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
sa.Column(
"http://webprotege.stanford.edu/RcIHdxpjQwjr8EG8yMhEYV",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
sa.Column(
"http://webprotege.stanford.edu/RDudF9SBo28CKqKpRN9poYL",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
sa.Column(
"http://webprotege.stanford.edu/RLc1ySxaRs4HWkW4m5w2Me",
mssql.BIT(),
autoincrement=False,
nullable=True,
),
)
op.create_index(
"ix_lrf_data_postal_code", "lrf_data", ["postal_code"], unique=False
)
op.drop_table("signup")
# ### end Alembic commands ###
| 28.589041
| 82
| 0.599904
|
d1cb7af9c77ba40280e3a6e45c2798486d92edd9
| 798
|
py
|
Python
|
data/contacts.py
|
anatoly-zubov/python_training
|
1bb99584c8d947f2c676ed55487863055c4db14b
|
[
"Apache-2.0"
] | null | null | null |
data/contacts.py
|
anatoly-zubov/python_training
|
1bb99584c8d947f2c676ed55487863055c4db14b
|
[
"Apache-2.0"
] | null | null | null |
data/contacts.py
|
anatoly-zubov/python_training
|
1bb99584c8d947f2c676ed55487863055c4db14b
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
from model.group_contact import Group_contact
testdata = [
Group_contact(firstname="firstname1", lastname="lastname1", mobile="11111111", email="email1"),
Group_contact(firstname="firstname2", lastname="lastname2", mobile="22222222", email="email2")
]
"""
def random_string(prefix,maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range (random.randrange(maxlen))])
testdata = [Group_contact(firstname="", lastname="", mobile="", email="")] + [
Group_contact(firstname=random_string("firstname", 8), lastname=random_string("lastname", 6),
mobile=random_string("mobile", 10),email=random_string("email", 10))
for i in range (5)
]
"""
| 39.9
| 99
| 0.703008
|
e632eccffbd74a38d7c62d632516014e2e33b397
| 5,929
|
py
|
Python
|
experiments/Set-005/X-002/generate-userdata.py
|
danielfdickinson/ivc-in-the-wtg-experiments
|
1f08ff5a5a09380f2b228f661553b6db35c20b5b
|
[
"MIT"
] | null | null | null |
experiments/Set-005/X-002/generate-userdata.py
|
danielfdickinson/ivc-in-the-wtg-experiments
|
1f08ff5a5a09380f2b228f661553b6db35c20b5b
|
[
"MIT"
] | null | null | null |
experiments/Set-005/X-002/generate-userdata.py
|
danielfdickinson/ivc-in-the-wtg-experiments
|
1f08ff5a5a09380f2b228f661553b6db35c20b5b
|
[
"MIT"
] | null | null | null |
import base64
import gzip
import os
import sys
import configparser
from jinja2 import Environment, FileSystemLoader, select_autoescape, StrictUndefined
from jinja2.exceptions import UndefinedError
def read_config(
defaults={
"delete_if_exists": "no",
"remember_password": "yes",
"userdata": "userdata-default.yaml.jinja", # This can be overridden in the INI file, globally or per-instance
"security_groups": "default",
"config_drive": "no",
},
configfile="create-instances.ini",
):
config = configparser.ConfigParser(defaults=defaults, interpolation=None)
readfile = config.read(configfile)
if len(readfile) < 1:
print("Failed to read config file. Bailing...", file=sys.stderr)
sys.exit(1)
return config
def apply_userdata_template(userdatafile, userdata_vars, server_name):
jinja_env = Environment(
loader=FileSystemLoader(os.getcwd()),
autoescape=select_autoescape(),
undefined=StrictUndefined,
)
jinja_template = jinja_env.get_template(userdatafile)
userdata_vars["server_name"] = server_name
return jinja_template.render(userdata_vars)
def get_file_data(config, section, userdata_vars):
verbatim_files_dirs = config[section]["verbatim_files_dirs"].split(":")
userdata_vars["files_to_write"] = []
userdata_vars["write_files"] = {}
for verbatim_files_dir in verbatim_files_dirs:
for verbatim_dirpath, __, verbatim_filenames in os.walk(verbatim_files_dir):
for verbatim_filename in verbatim_filenames:
local_path = os.path.join(verbatim_dirpath, verbatim_filename)
target_path = os.path.join(
verbatim_dirpath.removeprefix(verbatim_files_dir), verbatim_filename
)
local_path_size = os.path.getsize(local_path)
if local_path_size > 10240:
print(" Error: Files greater than 10k can't be part of userdata")
return None
target_file = open(local_path, "rb")
target_base_content = target_file.read()
target_file.close()
target_base_len = len(target_base_content)
target_gz_content = gzip.compress(target_base_content)
target_gz_len = len(target_gz_content)
target_gzipped = target_gz_len < target_base_len
target_var_name = (
target_path.replace("/", "-").replace(".", "-").removeprefix("-")
)
userdata_vars["write_files"][target_var_name] = {}
userdata_vars["write_files"][target_var_name]["path"] = target_path
if userdata_vars.get(target_var_name + "-permissions"):
userdata_vars["write_files"][target_var_name]["permissions"] = (
'"' + userdata_vars[target_var_name + "-permissions"] + '"'
)
else:
userdata_vars["write_files"][target_var_name]["permissions"] = ""
if userdata_vars.get(target_var_name + "-owner"):
userdata_vars["write_files"][target_var_name]["owner"] = (
'"' + userdata_vars[target_var_name + "-owner"] + '"'
)
else:
userdata_vars["write_files"][target_var_name]["owner"] = ""
if userdata_vars.get(target_var_name + "-append"):
userdata_vars["write_files"][target_var_name][
"append"
] = userdata_vars[target_var_name + "-append"]
else:
userdata_vars["write_files"][target_var_name]["append"] = False
if target_gzipped:
userdata_vars["write_files"][target_var_name][
"content"
] = base64.b64encode(target_gz_content).decode("utf-8")
userdata_vars["write_files"][target_var_name]["encoding"] = "gz+b64"
else:
userdata_vars["write_files"][target_var_name][
"content"
] = base64.b64encode(target_base_content).decode("utf-8")
userdata_vars["write_files"][target_var_name]["encoding"] = "b64"
userdata_vars["files_to_write"].append(target_var_name)
return userdata_vars
def copy_userdata_vars(userdata_vars):
new_dict = {}
for key, val in userdata_vars.items():
new_dict[key] = val
return new_dict
def main():
print("Generating userdata")
config = read_config()
for section in config.sections():
if not section.endswith("-userdata-vars"):
server_name = section
userdata_vars = {}
if (section + "-userdata-vars") in config:
userdata_vars = config[section + "-userdata-vars"]
else:
userdata_vars = config[config.default_section]
userdatafile = config[section]["userdata"]
userdata_vars = get_file_data(
config, section, copy_userdata_vars(userdata_vars)
)
if userdata_vars is None:
continue
print(
" Userdata for server {server_name}:".format(server_name=server_name)
)
try:
userdata = apply_userdata_template(
userdatafile, userdata_vars, server_name
)
if len(userdata) > 16384:
print(" Error: userdata is >16k")
continue
print(userdata)
except UndefinedError as ue:
print(" Error: {msg}".format(msg=ue.message))
continue
else:
continue
if __name__ == "__main__":
main()
| 38.251613
| 118
| 0.578006
|
dd26206d53ca057d9016d3d4d82265f04b72cf37
| 5,645
|
py
|
Python
|
SBaaS_thermodynamics/stage03_quantification_dG_p_postgresql_models.py
|
dmccloskey/SBaaS_thermodynamics
|
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
|
[
"MIT"
] | null | null | null |
SBaaS_thermodynamics/stage03_quantification_dG_p_postgresql_models.py
|
dmccloskey/SBaaS_thermodynamics
|
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
|
[
"MIT"
] | null | null | null |
SBaaS_thermodynamics/stage03_quantification_dG_p_postgresql_models.py
|
dmccloskey/SBaaS_thermodynamics
|
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
|
[
"MIT"
] | null | null | null |
#SBaaS base
from SBaaS_base.postgresql_orm_base import *
class data_stage03_quantification_dG_p(Base):
__tablename__ = 'data_stage03_quantification_dG_p'
id = Column(Integer, Sequence('data_stage03_quantification_dG_p_id_seq'), primary_key=True)
experiment_id = Column(String(50))
model_id = Column(String(50))
sample_name_abbreviation = Column(String(100))
time_point = Column(String(10))
pathway_id = Column(String(100))
dG_p = Column(Float);
dG_p_var = Column(Float);
dG_p_units = Column(String(50));
dG_p_lb = Column(Float);
dG_p_ub = Column(Float);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (UniqueConstraint('experiment_id','sample_name_abbreviation','time_point','pathway_id'),
)
def __init__(self,
row_dict_I,
):
self.pathway_id=row_dict_I['pathway_id'];
self.time_point=row_dict_I['time_point'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.model_id=row_dict_I['model_id'];
self.experiment_id=row_dict_I['experiment_id'];
self.used_=row_dict_I['used_'];
self.dG_p_ub=row_dict_I['dG_p_ub'];
self.comment_=row_dict_I['comment_'];
self.dG_p_lb=row_dict_I['dG_p_lb'];
self.dG_p_units=row_dict_I['dG_p_units'];
self.dG_p_var=row_dict_I['dG_p_var'];
self.dG_p=row_dict_I['dG_p'];
def __set__row__(self,experiment_id_I,model_id_I,sample_name_abbreviation_I,
time_point_I,pathway_id_I,
dG_p_I,dG_p_var_I,dG_p_units_I,dG_p_lb_I,
dG_p_ub_I,used_I,comment_I,):
self.experiment_id = experiment_id_I;
self.model_id = model_id_I;
self.sample_name_abbreviation = sample_name_abbreviation_I;
self.time_point = time_point_I;
self.pathway_id = pathway_id_I;
self.dG_p = dG_p_I;
self.dG_p_var = dG_p_var_I;
self.dG_p_units = dG_p_units_I;
self.dG_p_lb = dG_p_lb_I;
self.dG_p_ub = dG_p_ub_I;
self.used_ = used_I;
self.comment_ = comment_I;
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'model_id':self.model_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'time_point':self.time_point,
'pathway_id':self.pathway_id,
'dG_p':self.dG_p,
'dG_p_var':self.dG_p_var,
'dG_p_units':self.dG_p_units,
'dG_p_lb':self.dG_p_lb,
'dG_p_ub':self.dG_p_ub,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage03_quantification_dG0_p(Base):
__tablename__ = 'data_stage03_quantification_dG0_p'
id = Column(Integer, Sequence('data_stage03_quantification_dG0_p_id_seq'), primary_key=True)
experiment_id = Column(String(50))
model_id = Column(String(50))
sample_name_abbreviation = Column(String(100))
time_point = Column(String(10))
pathway_id = Column(String(100))
dG0_p = Column(Float);
dG0_p_var = Column(Float);
dG0_p_units = Column(String(50));
dG0_p_lb = Column(Float);
dG0_p_ub = Column(Float);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (UniqueConstraint('experiment_id','sample_name_abbreviation','time_point','pathway_id'),
)
def __init__(self,
row_dict_I,
):
self.comment_=row_dict_I['comment_'];
self.used_=row_dict_I['used_'];
self.dG0_p_ub=row_dict_I['dG0_p_ub'];
self.dG0_p_lb=row_dict_I['dG0_p_lb'];
self.dG0_p_units=row_dict_I['dG0_p_units'];
self.dG0_p_var=row_dict_I['dG0_p_var'];
self.dG0_p=row_dict_I['dG0_p'];
self.pathway_id=row_dict_I['pathway_id'];
self.time_point=row_dict_I['time_point'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.model_id=row_dict_I['model_id'];
self.experiment_id=row_dict_I['experiment_id'];
def __set__row__(self,experiment_id_I,model_id_I,sample_name_abbreviation_I,
time_point_I,pathway_id_I,
dG0_p_I,dG0_p_var_I,dG0_p_units_I,dG0_p_lb_I,
dG0_p_ub_I,used_I,comment_I,):
self.experiment_id = experiment_id_I;
self.model_id = model_id_I;
self.sample_name_abbreviation = sample_name_abbreviation_I;
self.time_point = time_point_I;
self.pathway_id = pathway_id_I;
self.dG0_p = dG0_p_I;
self.dG0_p_var = dG0_p_var_I;
self.dG0_p_units = dG0_p_units_I;
self.dG0_p_lb = dG0_p_lb_I;
self.dG0_p_ub = dG0_p_ub_I;
self.used_ = used_I;
self.comment_ = comment_I;
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'model_id':self.model_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'time_point':self.time_point,
'pathway_id':self.pathway_id,
'dG0_p':self.dG0_p,
'dG0_p_var':self.dG0_p_var,
'dG0_p_units':self.dG0_p_units,
'dG0_p_lb':self.dG0_p_lb,
'dG0_p_ub':self.dG0_p_ub,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
| 40.321429
| 109
| 0.638264
|
35846e80984f6abefee6390e1138ada02a93f181
| 790
|
py
|
Python
|
parser/utils.py
|
qmzrjf/parser_Work
|
d3825401dc79ec37809dcd2391c417aeb538a792
|
[
"MIT"
] | null | null | null |
parser/utils.py
|
qmzrjf/parser_Work
|
d3825401dc79ec37809dcd2391c417aeb538a792
|
[
"MIT"
] | null | null | null |
parser/utils.py
|
qmzrjf/parser_Work
|
d3825401dc79ec37809dcd2391c417aeb538a792
|
[
"MIT"
] | null | null | null |
import random
from time import sleep
import json
import sqlite3
from config import config
def write_json(dict_js):
try:
data = json.load(open(config.JSON_PATH))
except:
data = []
data.append(dict_js)
with open(config.JSON_PATH, 'w') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
def save_info_txt(array: list) -> None:
with open(config.TEXT_PATH, 'a') as file:
for line in array:
file.write(' | '.join(line) + '\n')
def save_info_to_db(array: list) -> None:
conn = sqlite3.connect(config.DB_PATH)
cursor = conn.cursor()
cursor.executemany('INSERT INTO vacancy VALUES (?,?,?,?,?,?,?,?,?,?,?,?)', array)
conn.commit()
conn.close()
def random_sleep():
sleep(random.randint(1, 4))
| 18.809524
| 85
| 0.618987
|
1eb37c01b1c2de1e0177807ea60ebaa116390907
| 2,265
|
py
|
Python
|
tests/formats/mysql/file_reader/parsers/test_index_unique.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 10
|
2018-04-09T08:39:42.000Z
|
2022-03-14T15:36:05.000Z
|
tests/formats/mysql/file_reader/parsers/test_index_unique.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 14
|
2018-05-02T11:14:08.000Z
|
2022-01-15T18:48:54.000Z
|
tests/formats/mysql/file_reader/parsers/test_index_unique.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 5
|
2018-07-18T02:20:48.000Z
|
2022-02-19T09:32:07.000Z
|
import unittest
from mygrations.formats.mysql.file_reader.parsers.index_unique import index_unique
class test_index_unique(unittest.TestCase):
def test_simple(self):
# parse a typical UNIQUE KEY
parser = index_unique()
returned = parser.parse('UNIQUE KEY `users_email` (`email`),')
# we should have matched
self.assertTrue(parser.matched)
# and we should have some data now
self.assertEquals('', returned)
# we should have lots of data now
self.assertEquals('users_email', parser.name)
self.assertEquals(['email'], parser.columns)
self.assertEquals(parser.index_type, 'UNIQUE')
self.assertTrue(parser.has_comma)
self.assertEquals('UNIQUE KEY `users_email` (`email`)', str(parser))
def test_optional_comma(self):
# ending comma is optional
parser = index_unique()
returned = parser.parse('UNIQUE KEY `users_email` (`email`)')
# we should have matched
self.assertTrue(parser.matched)
self.assertFalse(parser.has_comma)
self.assertEquals('UNIQUE KEY `users_email` (`email`)', str(parser))
def test_optional_quotes(self):
# key name quotes are optional
parser = index_unique()
returned = parser.parse('UNIQUE KEY users_email (`email`)')
# we should have matched
self.assertTrue(parser.matched)
self.assertEquals('users_email', parser.name)
self.assertEquals('UNIQUE KEY `users_email` (`email`)', str(parser))
def test_multiple_columns(self):
# multi column index
parser = index_unique()
returned = parser.parse('UNIQUE KEY `users_email` (`email`,`username`,`password`),')
# we should have matched
self.assertTrue(parser.matched)
# and we should have some data now
self.assertEquals('', returned)
# we should have lots of data now
self.assertEquals('users_email', parser.name)
self.assertEquals(['email', 'username', 'password'], parser.columns)
self.assertEquals(parser.index_type, 'UNIQUE')
self.assertTrue(parser.has_comma)
self.assertEquals('UNIQUE KEY `users_email` (`email`,`username`,`password`)', str(parser))
| 35.390625
| 98
| 0.653863
|
2c460bddd3a4e4ed6cbb6d4ec6e2eb7b40b56012
| 603
|
py
|
Python
|
survey/actions.py
|
TheWITProject/MentorApp
|
2f08b87a7cde6d180e16d6f37d0b8019b8361638
|
[
"MIT"
] | null | null | null |
survey/actions.py
|
TheWITProject/MentorApp
|
2f08b87a7cde6d180e16d6f37d0b8019b8361638
|
[
"MIT"
] | 65
|
2020-02-04T22:31:07.000Z
|
2022-01-13T02:39:19.000Z
|
survey/actions.py
|
TheWITProject/MentorApp
|
2f08b87a7cde6d180e16d6f37d0b8019b8361638
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ungettext
def make_published(modeladmin, request, queryset):
"""
Mark the given survey as published
"""
count = queryset.update(is_published=True)
message = ungettext(
"%(count)d survey was successfully marked as published.",
"%(count)d surveys were successfully marked as published",
count,
) % {"count": count}
modeladmin.message_user(request, message)
make_published.short_description = _("Mark selected surveys as published")
| 28.714286
| 74
| 0.701493
|
028e28a3cd348f7b0b9ed8f2665b62d01387af9b
| 604
|
py
|
Python
|
sleekxmpp/features/feature_mechanisms/stanza/success.py
|
calendar42/SleekXMPP--XEP-0080-
|
d7bd5fd29f26a5d7de872a49ff63a353b8043e49
|
[
"BSD-3-Clause"
] | 1
|
2016-10-24T05:30:25.000Z
|
2016-10-24T05:30:25.000Z
|
sleekxmpp/features/feature_mechanisms/stanza/success.py
|
vijayp/SleekXMPP
|
b2e7f57334d27f140f079213c2016615b7168742
|
[
"BSD-3-Clause"
] | null | null | null |
sleekxmpp/features/feature_mechanisms/stanza/success.py
|
vijayp/SleekXMPP
|
b2e7f57334d27f140f079213c2016615b7168742
|
[
"BSD-3-Clause"
] | 1
|
2020-05-06T18:46:53.000Z
|
2020-05-06T18:46:53.000Z
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.xmlstream import ElementBase, StanzaBase, ET
from sleekxmpp.xmlstream import register_stanza_plugin
class Success(StanzaBase):
"""
"""
name = 'success'
namespace = 'urn:ietf:params:xml:ns:xmpp-sasl'
interfaces = set()
plugin_attrib = name
def setup(self, xml):
StanzaBase.setup(self, xml)
self.xml.tag = self.tag_name()
| 22.37037
| 59
| 0.688742
|
e207637a16fd1a3716b50d824d41d07d85a27111
| 3,684
|
py
|
Python
|
tests/core/image/test_get_dominant_colors.py
|
nekto-nekto/imgfind
|
2e0c3853c564f58e514dd4b30c2fed951ec8fde0
|
[
"WTFPL"
] | 1
|
2020-09-29T11:50:11.000Z
|
2020-09-29T11:50:11.000Z
|
tests/core/image/test_get_dominant_colors.py
|
nekto-nekto/imgfind
|
2e0c3853c564f58e514dd4b30c2fed951ec8fde0
|
[
"WTFPL"
] | null | null | null |
tests/core/image/test_get_dominant_colors.py
|
nekto-nekto/imgfind
|
2e0c3853c564f58e514dd4b30c2fed951ec8fde0
|
[
"WTFPL"
] | 1
|
2020-10-02T19:53:24.000Z
|
2020-10-02T19:53:24.000Z
|
# coding=utf-8
import numpy as np
import pytest
from imgfind.core.image import get_dominant_colors
from tests.conftest import asset
@pytest.mark.parametrize(
"asset_name,n_dominants,downscale_to,exp_palette,exp_freqs,exp_shape",
(
pytest.param(
"sample_1280×853.jpeg",
2,
None,
((52, 45, 58), (178, 146, 169)),
(0.53, 0.47),
(853, 1280, 3),
id="JPEG",
),
pytest.param(
"sample_640×426.png",
2,
None,
((52, 45, 56), (177, 144, 169)),
(0.52, 0.48),
(426, 640, 3),
id="PNG",
),
pytest.param(
"sample_640×426.bmp",
2,
None,
((52, 45, 56), (177, 144, 169)),
(0.52, 0.48),
(426, 640, 3),
id="BMP",
),
pytest.param(
"sample_640×426.tiff",
2,
None,
((52, 45, 56), (176, 144, 168)),
(0.52, 0.48),
(426, 640, 3),
id="TIFF",
),
pytest.param(
"sample_170×256.ico",
2,
None,
((55, 48, 57), (173, 141, 167)),
(0.52, 0.48),
(170, 256, 3),
id="ICO",
),
pytest.param(
"sample_640×426.gif",
2,
None,
((53, 45, 56), (176, 144, 169)),
(0.52, 0.48),
(426, 640, 3),
id="GIF",
),
pytest.param(
"sample_640×426.png",
2,
1000,
((52, 45, 56), (177, 144, 169)),
(0.52, 0.48),
(426, 640, 3),
id="PNG (shouldn't downscale)",
),
pytest.param(
"tiger_001.png",
2,
300,
((247, 245, 244), (143, 122, 96)),
(0.58, 0.42),
(234, 300, 3),
id="PNG (transparent)",
),
pytest.param(
"8bit_grayscale.png",
2,
None,
((255, 255, 255), (28, 28, 28)),
(0.96, 0.04),
(756, 1233, 3),
id="PNG-8",
),
pytest.param(
"8bit_grayscale.png",
2,
100,
((251, 251, 251), (208, 208, 208)),
(0.87, 0.13),
(61, 99, 3),
id="PNG-8 (downscale)",
),
pytest.param(
"grayscale.gif",
2,
100,
((255, 255, 255), (255, 255, 255)),
(1, 0),
(100, 100, 3),
id="GIF grayscale",
),
),
)
def test_supported_images(asset_name, n_dominants, downscale_to, exp_palette, exp_freqs, exp_shape):
image_path = asset(asset_name)
shape, palette, freqs = get_dominant_colors(image_path, n_dominants, downscale_to)
assert len(palette) == n_dominants
assert len(shape) == 3
assert len(freqs) == n_dominants
np.testing.assert_equal(np.uint8(palette), exp_palette)
np.testing.assert_allclose(freqs, exp_freqs, atol=0.01)
assert shape == exp_shape
@pytest.mark.parametrize(
"asset_name",
(pytest.param("sample1.webp", id="WEBP"),),
)
def test_unsupported_image(asset_name):
image_path = asset(asset_name)
try:
get_dominant_colors(image_path, 2)
except ValueError:
pass
except Exception as e:
raise AssertionError(f"Expected ValueError exception, but got {e.__class__.__name__}")
else:
raise AssertionError(f"Unsupported image format passed, but got no exception")
| 26.12766
| 100
| 0.446254
|
b2c1f8434ca3e7bfa2c52e4c8ceb4fd990f8ca67
| 1,150
|
py
|
Python
|
tests/CSIRO_depth_validation.py
|
BillMills/AutoQC
|
cb56fa5bb2115170ec204edd84e2d69ce84be820
|
[
"MIT"
] | 17
|
2015-01-31T00:35:58.000Z
|
2020-10-26T19:01:46.000Z
|
tests/CSIRO_depth_validation.py
|
castelao/AutoQC
|
eb85422c1a6a5ff965a1ef96b3cb29240a66b506
|
[
"MIT"
] | 163
|
2015-01-21T03:44:42.000Z
|
2022-01-09T22:03:12.000Z
|
tests/CSIRO_depth_validation.py
|
BillMills/AutoQC
|
cb56fa5bb2115170ec204edd84e2d69ce84be820
|
[
"MIT"
] | 11
|
2015-06-04T14:32:22.000Z
|
2021-04-11T05:18:09.000Z
|
import qctests.CSIRO_depth
import util.testingProfile
import numpy
##### CSIRO_depth_test ---------------------------------------------------
def test_CSIRO_depth():
'''
Spot-check the nominal behavior of the CSIRO depth test.
'''
# too shallow for an xbt
p = util.testingProfile.fakeProfile([0,0,0], [0,1,20], probe_type=2)
qc = qctests.CSIRO_depth.test(p, None)
truth = numpy.zeros(3, dtype=bool)
truth[0] = True
truth[1] = True
assert numpy.array_equal(qc, truth), 'failed to flag a too-shallow xbt measurement'
# shallow but not an xbt - don't flag
p = util.testingProfile.fakeProfile([0,0,0], [0,1,20], probe_type=1)
qc = qctests.CSIRO_depth.test(p, None)
truth = numpy.zeros(3, dtype=bool)
assert numpy.array_equal(qc, truth), 'flagged a non-xbt measurement'
# threshold value - don't flag
p = util.testingProfile.fakeProfile([0,0,0], [0,3.6,20], probe_type=2)
qc = qctests.CSIRO_depth.test(p, None)
truth = numpy.zeros(3, dtype=bool)
truth[0] = True
print(qc)
assert numpy.array_equal(qc, truth), "shouldn't flag measurements at threshold"
| 34.848485
| 87
| 0.638261
|
63470e586d35a199bd6645f66c262e5d4de26a7d
| 673
|
py
|
Python
|
src/state/DayState.py
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | 31
|
2018-10-19T15:28:36.000Z
|
2022-02-14T03:01:25.000Z
|
src/state/DayState.py
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | null | null | null |
src/state/DayState.py
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | 10
|
2019-01-10T04:02:12.000Z
|
2021-11-17T01:52:15.000Z
|
from state.State import State
class DayState(State):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super(DayState, cls).__new__(cls)
return cls._instance
def doClouck(self, context, hour):
from state.NightState import NightState
if hour < 9 or 17 <= hour:
context.changeState(NightState())
def doUse(self, context):
context.recordLog("使用金库(白天)")
def doAlarm(self, context):
context.callSecurityCenter("按下警铃(白天)")
def doPhone(self, context):
context.callSecurityCenter("正常通话(白天)")
def __str__(self):
return "[ 白天 ]"
| 25.884615
| 61
| 0.619614
|
d3d0020405abb1d725826db2bd651127dfd6f65e
| 2,741
|
py
|
Python
|
Jogo Pontos.py
|
marcusviniciusteixeira/PythonGames
|
bab6b5433a61b30f5096a06de37d2e16f5f75fc2
|
[
"MIT"
] | 1
|
2021-11-24T00:00:51.000Z
|
2021-11-24T00:00:51.000Z
|
Jogo Pontos.py
|
marcusviniciusteixeira/PythonGames
|
bab6b5433a61b30f5096a06de37d2e16f5f75fc2
|
[
"MIT"
] | null | null | null |
Jogo Pontos.py
|
marcusviniciusteixeira/PythonGames
|
bab6b5433a61b30f5096a06de37d2e16f5f75fc2
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from sys import exit
from random import randint
pygame.init()
largura = 1000
altura = 800
x = largura/2
y = altura/2
w = largura/3
z = altura/2
Xcircle = randint(40, 600)#TAMANHO TELA
Ycircle = randint(50, 400)
tela = pygame.display.set_mode((largura, altura))
pygame.display.set_caption(('meliodas'))
relogio = pygame.time.Clock()
contador1 = 0
contador2 = 0
fonte = pygame.font.SysFont('arial', 30, True, True)
#wav = pygame.mixer.Sound('')
while True:
relogio.tick(20)
tela.fill((255,255,255))
mensagem1 = ('Pontos:{}'.format(contador1))
mensagem2 = ('Pontos:{}'.format(contador2))
textoFormat1 = fonte.render(mensagem1, True, (0,255,0))
textoFormat2 = fonte.render(mensagem2, True, (0,0,255))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
'''
if event.type == KEYDOWN:
if event.key == K_a:
x = x - 20
if event.key == K_d:
x = x + 20
if event.key == K_w:
y = y - 20
if event.key == K_s:
y = y + 20
'''
if pygame.key.get_pressed()[K_LEFT]:
x = x - 20
if pygame.key.get_pressed()[K_RIGHT]:
x = x + 20
if pygame.key.get_pressed()[K_UP]:
y = y - 20
if pygame.key.get_pressed()[K_DOWN]:
y = y + 20
if pygame.key.get_pressed()[K_a]:
w = w - 20
if pygame.key.get_pressed()[K_d]:
w = w + 20
if pygame.key.get_pressed()[K_w]:
z = z - 20
if pygame.key.get_pressed()[K_s]:
z = z + 20
player1 = pygame.draw.rect(tela, (0,255,0), (x,y,40,50))
player2 = pygame.draw.rect(tela, (0,0,255), (w,z,40,50))
colisor = pygame.draw.circle(tela, (255,0,0), (Xcircle,Ycircle,), 10)
if player1.colliderect(colisor) or player2.colliderect(colisor):
Xcircle = randint(40, 600)
Ycircle = randint(50, 430)
if player1.colliderect(colisor):
contador1 = contador1 + 1
print('Player Vermelho:{}'.format(contador1))
#wav.play()
if player2.colliderect(colisor):
contador2 = contador2 + 1
print('Player Azul:{}'.format(contador2))
#wav.play()
if x > largura:
x = 0
if x < 0:
x = largura
if y > altura:
y = 0
if y < 0:
y = altura
if w > largura:
w = 0
if w < 0:
w = largura
if z > altura:
z = 0
if z < 0:
z = altura
tela.blit(textoFormat1, (450,40))
tela.blit(textoFormat2, (40,40))
#pygame.draw.line(tela, (96,0,189),(390,0),(390,600),5)
pygame.display.update()
| 23.62931
| 73
| 0.545421
|
84c222ec86ed0d5265aba617b7458e5dc5e6b9df
| 992
|
py
|
Python
|
dice_roller/views.py
|
czambuk/dnd
|
a1aaa76c2e5108b6a55fe53a5974f6f584208736
|
[
"MIT"
] | null | null | null |
dice_roller/views.py
|
czambuk/dnd
|
a1aaa76c2e5108b6a55fe53a5974f6f584208736
|
[
"MIT"
] | 5
|
2021-03-19T03:49:43.000Z
|
2021-09-22T19:04:33.000Z
|
dice_roller/views.py
|
czambuk/dnd
|
a1aaa76c2e5108b6a55fe53a5974f6f584208736
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views import View
from .forms import DiceRollForm
from dice_roll import throw_decode, roll
class DiceRollView(View):
def get(self, request):
form = DiceRollForm()
return render(request,
"dice_roller/dice_roller.html",
{'form': form}
)
#
# def post(self, request):
# form = DiceRollForm(request.POST)
# if form.is_valid():
# roll_code = form.cleaned_data['roll_data']
# roll_data = throw_decode(roll_code)
# result = f"""Rzucono {roll_data[0]} razy kością {roll_data[1]} z {roll_data[2]} modyfikatorem
# i wypadło... {roll(roll_code)}"""
# return render(request,
# "dice_roller/dice_roller.html",
# {'form': form,
# 'result': result,
# }
# )
| 34.206897
| 107
| 0.506048
|
e7678bd580ca5da906658549b1ed6e53d8da96e3
| 4,631
|
py
|
Python
|
qa/rpc-tests/signrawtransactions.py
|
cruzezy/bitcoin
|
7767d558c5047e9f11e2e5eb9bf3f885a03adb08
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
qa/rpc-tests/signrawtransactions.py
|
cruzezy/bitcoin
|
7767d558c5047e9f11e2e5eb9bf3f885a03adb08
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
qa/rpc-tests/signrawtransactions.py
|
cruzezy/bitcoin
|
7767d558c5047e9f11e2e5eb9bf3f885a03adb08
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NetgoldTestFramework
from test_framework.util import *
class SignRawTransactionsTest(NetgoldTestFramework):
"""Tests transaction signing via RPC command "signrawtransaction"."""
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def successful_signing_test(self):
"""Creates and signs a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| 42.1
| 120
| 0.677607
|
9ad85b379359b66bb0b7dc5c67583f1036378b64
| 2,942
|
py
|
Python
|
tests/syngenta_digital_alc/s3/test_event_client.py
|
syngenta-digital/package-python-alc
|
74c712d8a94078b922aca22e319a0cb4b035228b
|
[
"Apache-2.0"
] | null | null | null |
tests/syngenta_digital_alc/s3/test_event_client.py
|
syngenta-digital/package-python-alc
|
74c712d8a94078b922aca22e319a0cb4b035228b
|
[
"Apache-2.0"
] | 10
|
2021-10-19T23:08:46.000Z
|
2022-01-12T23:17:19.000Z
|
tests/syngenta_digital_alc/s3/test_event_client.py
|
syngenta-digital/package-python-alc
|
74c712d8a94078b922aca22e319a0cb4b035228b
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from syngenta_digital_alc.s3.event_client import EventClient
from syngenta_digital_alc.s3.record_client import RecordClient
from tests.syngenta_digital_alc.s3 import mock_data
class S3EventClientTest(unittest.TestCase):
def test_s3_record_parses(self):
framework = EventClient(mock_data.get_s3_event(), None)
s3_record = framework.records[0]
self.assertIsInstance(s3_record, RecordClient)
def test_s3_parsed_object(self):
framework = EventClient(mock_data.get_s3_event(), None)
record = framework.records[0]
self.assertDictEqual(
record.s3_object,
{
'key': '123456789/3c8e97105d5f462f8896a7189910ee16-original.jpg',
'size': 17545,
'eTag': 'b79ac2ef68c08fa9ac6013d53038a26c',
'sequencer': '005BA40CB5BD42013A'
}
)
self.assertDictEqual(
record.s3_bucket,
{
'name': 'deploy-workers-poc-photos',
'ownerIdentity': {'principalId': 'A32KFL0DQ3MH8X'},
'arn': 'arn:aws:s3:::deploy-workers-poc-photos'
}
)
def test_s3_record_doesnt_parse(self):
framework = EventClient(mock_data.get_s3_event(), None)
record = framework.records[0]
self.assertDictEqual(
record._record,
{
'eventVersion': '2.0',
'eventSource': 'aws:s3',
'awsRegion': 'us-east-1',
'eventTime': '2018-09-20T21:10:13.821Z',
'eventName': 'ObjectCreated:Put',
'userIdentity': {
'principalId': 'AWS:AROAI7Z5ZQEQ3UETKKYGQ:deploy-workers-poc-put-v1-photo'
},
'requestParameters': {
'sourceIPAddress': '172.20.133.36'
},
'responseElements': {
'x-amz-request-id': '6B859DD0CE613FAE',
'x-amz-id-2': 'EXLMfc9aiXZFzNwLKXpw35iaVvl/DkEA6GtbuxjfmuLN3kLPL/aGoa7NMSwpl3m7ICAtNbjJX4w='
},
's3': {
's3SchemaVersion': '1.0',
'configurationId': 'exS3-v2--7cde234c7ff76c53c44990396aeddc6d',
'bucket': {
'name': 'deploy-workers-poc-photos',
'ownerIdentity': {
'principalId': 'A32KFL0DQ3MH8X'
},
'arn': 'arn:aws:s3:::deploy-workers-poc-photos'
},
'object': {
'key': '123456789/3c8e97105d5f462f8896a7189910ee16-original.jpg',
'size': 17545,
'eTag': 'b79ac2ef68c08fa9ac6013d53038a26c',
'sequencer': '005BA40CB5BD42013A'
}
}
}
)
| 38.710526
| 112
| 0.516315
|
465ae67a47287fbaf39de316ca58cf94f2ec4c9e
| 1,899
|
py
|
Python
|
neo/VM/RandomAccessStack.py
|
nickfujita/neo-python
|
42e0bb41866d0b2c7aa3ee460a8f8ce3ae2f1f22
|
[
"MIT"
] | 1
|
2021-06-19T04:06:56.000Z
|
2021-06-19T04:06:56.000Z
|
neo/VM/RandomAccessStack.py
|
nickfujita/neo-python
|
42e0bb41866d0b2c7aa3ee460a8f8ce3ae2f1f22
|
[
"MIT"
] | 1
|
2018-09-26T17:50:24.000Z
|
2018-09-26T18:38:42.000Z
|
neo/VM/RandomAccessStack.py
|
nickfujita/neo-python
|
42e0bb41866d0b2c7aa3ee460a8f8ce3ae2f1f22
|
[
"MIT"
] | null | null | null |
from neo.VM.InteropService import StackItem
class RandomAccessStack:
_list = []
_size = 0 # cache the size for performance
_name = 'Stack'
def __init__(self, name='Stack'):
self._list = []
self._size = 0
self._name = name
@property
def Count(self):
return self._size
@property
def Items(self):
return self._list
def Clear(self):
self._list = []
self._size = 0
def GetEnumerator(self):
return enumerate(self._list)
def Insert(self, index, item):
index = int(index)
if index < 0 or index > self._size:
raise Exception("Invalid list operation")
self._list.insert(index, item)
self._size += 1
# @TODO can be optimized
def Peek(self, index=0):
index = int(index)
if index >= self._size:
raise Exception("Invalid list operation")
return self._list[self._size - 1 - index]
def Pop(self):
# self.PrintList("POPSTACK <- ")
return self.Remove(0)
def PushT(self, item):
if not type(item) is StackItem and not issubclass(type(item), StackItem):
item = StackItem.New(item)
self._list.append(item)
self._size += 1
# @TODO can be optimized
def Remove(self, index):
index = int(index)
if index < 0 or index >= self._size:
raise Exception("Invalid list operation")
item = self._list.pop(self._size - 1 - index)
self._size -= 1
return item
def Set(self, index, item):
index = int(index)
if index < 0 or index > self._size:
raise Exception("Invalid list operation")
if not type(item) is StackItem and not issubclass(type(item), StackItem):
item = StackItem.New(item)
self._list[self._size - index - 1] = item
| 23.444444
| 81
| 0.57188
|
338d3584e9ac3a4a3b5e0b2aa75508228320ff5e
| 15
|
py
|
Python
|
config.py
|
raym0ndev/Passwordgen
|
e29471edf3df14f1c1b7931aa62a2ead2f7f340a
|
[
"Unlicense"
] | null | null | null |
config.py
|
raym0ndev/Passwordgen
|
e29471edf3df14f1c1b7931aa62a2ead2f7f340a
|
[
"Unlicense"
] | null | null | null |
config.py
|
raym0ndev/Passwordgen
|
e29471edf3df14f1c1b7931aa62a2ead2f7f340a
|
[
"Unlicense"
] | null | null | null |
key = '*4['
| 5
| 12
| 0.266667
|
64d485440d391b1c4e56d9f2911f5fee4ebbe3bf
| 4,330
|
py
|
Python
|
userDB.py
|
sven-oly/LanguageTools
|
8c1e0bbae274232064e9796aa401c906797af452
|
[
"Apache-2.0"
] | 3
|
2021-02-02T12:11:27.000Z
|
2021-12-28T03:58:05.000Z
|
userDB.py
|
sven-oly/LanguageTools
|
8c1e0bbae274232064e9796aa401c906797af452
|
[
"Apache-2.0"
] | 7
|
2020-12-11T00:44:52.000Z
|
2022-03-01T18:00:00.000Z
|
userDB.py
|
sven-oly/LanguageTools
|
8c1e0bbae274232064e9796aa401c906797af452
|
[
"Apache-2.0"
] | 3
|
2019-06-08T17:46:47.000Z
|
2021-09-16T02:03:56.000Z
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
import main
import json
import logging
import os
import urllib
import webapp2
from google.appengine.api import users
from google.appengine.ext.webapp import template
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext import db
class UserDB(db.Model):
userName = db.StringProperty(u'')
userEMail = db.StringProperty(u'')
userId = db.StringProperty(u'')
userLevel = db.StringProperty(u'')
privileges = db.ListProperty(str, default=[])
lastUpdate = db.DateTimeProperty(auto_now=True, auto_now_add=True)
defaultDB = db.StringProperty(u'')
comment = db.StringProperty('')
affiliation = db.StringProperty('')
# Return info based on current user.
def getUserInfo(login_target='/', logout_target='/'):
current_user = users.get_current_user()
user_nickname = None
user_logout = None
user_login = None
isAdmin = None
user_login = users.create_login_url(login_target)
if current_user:
user_logout = users.create_logout_url('/')
user_nickname = current_user.nickname()
user_login = users.create_login_url('/words/getWords/')
isAdmin = users.is_current_user_admin()
# logging.info('%s, %s, %s, %s' % (current_user, user_nickname, user_logout, user_login))
return (current_user, user_nickname, user_logout, user_login, isAdmin)
class manageUsers(webapp2.RequestHandler):
def get(self):
user_info = getUserInfo(self.request.url)
q = UserDB.all()
userCount = 0
userList = []
roleList = ['Admin', 'Edit', 'View']
for p in q.run():
userCount += 1
userList.append(p)
template_values = {
'language': main.Language,
'roleList': roleList,
'userList': userList,
'userInfo': user_info,
'user_nickname': user_info[1],
'user_logout': user_info[2],
'user_login_url': user_info[3],
}
path = os.path.join(os.path.dirname(__file__), 'users.html')
self.response.out.write(template.render(path, template_values))
class addUser(webapp2.RequestHandler):
def get(self):
newUserEmail = self.request.get('userEmail', None)
userRole = self.request.get('role', None)
privileges = self.request.GET.getall('privileges')
userName = self.request.get('userName', None)
self.response.out.write('\nArguments = %s' % self.request.arguments())
self.response.out.write('\nEMail = %s' % newUserEmail)
self.response.out.write('\nuserName = %s' % userName)
self.response.out.write('\nprivileges = %s' % privileges)
q = UserDB.all()
q.filter('userEMail =', newUserEmail)
p = q.get() # Get all the matching emails.
if p:
self.response.out.write('\n!!!: User %s already in database: %s\n' % (
p, p.userLevel))
else:
newUser = UserDB(userEMail=newUserEmail,
userName=userName,
userLevel=userRole,
privileges=privileges,)
newUser.put()
self.response.out.write('\n!!!: Added User %s (%s) in role %s' % (
newUser.userName, newUser.userEMail, newUser.userLevel))
class deleteUser(webapp2.RequestHandler):
def get(self):
userEMails = self.request.GET.getall('userDelete')
logging.info('Emails to delete: %s' % userEMails)
confirm = self.request.get('confirmDelete', None)
if not confirm:
self.response.out.write('\n!!!: Delete not confirmed!')
numDeleted = 0
for email in userEMails:
q = UserDB.all()
q.filter('userEMail =', email)
self.response.out.write('\n!!!: email = %s\n' % (email))
p = q.get() # Get all the matching emails.
self.response.out.write('\n!!!: p=: %s)\n' % (p))
if p is not None:
self.response.out.write('\n!!!: User %s to be deleted from database: %s \n' % (p.userName, p.userEMail))
UserDB.delete(p)
numDeleted += 1
else:
self.response.out.write('\n!!!: No such email in database: %s\n' % (
email))
self.response.out.write('\n!!!: %d users were deleted from database\n' % (numDeleted))
class clearUsers(webapp2.RequestHandler):
def get(self):
q = UserDB.all()
numDeleted = 0
for p in q.run():
UserDB.delete(p)
numDeleted += 1
self.response.out.write('\n%d users deleted' % numDeleted)
| 29.862069
| 112
| 0.658661
|
b05f3e81a12d74c722b970c93b0b3b9dddfffe82
| 103,661
|
py
|
Python
|
code_for_batchgradient_descent/venv/Lib/site-packages/matplotlib/widgets.py
|
Sibasish-Padhy/Algo-ScriptML
|
c0d80a2968ffac2c8d8e3f891144dd91da353f5a
|
[
"MIT"
] | 4
|
2021-03-25T15:49:56.000Z
|
2021-12-15T09:10:04.000Z
|
code_for_batchgradient_descent/venv/Lib/site-packages/matplotlib/widgets.py
|
Sibasish-Padhy/Algo-ScriptML
|
c0d80a2968ffac2c8d8e3f891144dd91da353f5a
|
[
"MIT"
] | 14
|
2021-03-26T20:54:22.000Z
|
2021-04-06T17:18:53.000Z
|
code_for_batchgradient_descent/venv/Lib/site-packages/matplotlib/widgets.py
|
Sibasish-Padhy/Algo-ScriptML
|
c0d80a2968ffac2c8d8e3f891144dd91da353f5a
|
[
"MIT"
] | 3
|
2021-03-28T16:13:00.000Z
|
2021-07-16T10:27:25.000Z
|
"""
GUI neutral widgets
===================
Widgets that are designed to work for any of the GUI backends.
All of these widgets require you to predefine a `matplotlib.axes.Axes`
instance and pass that as the first parameter. Matplotlib doesn't try to
be too smart with respect to layout -- you will have to figure out how
wide and tall you want your Axes to be to accommodate your widget.
"""
from contextlib import ExitStack
import copy
from numbers import Integral, Number
import numpy as np
import matplotlib as mpl
from . import _api, cbook, colors, ticker
from .lines import Line2D
from .patches import Circle, Rectangle, Ellipse
class LockDraw:
"""
Some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstances, like when the toolbar is in zoom-to-rect
mode and drawing a rectangle. To avoid this, a widget can acquire a
canvas' lock with ``canvas.widgetlock(widget)`` before drawing on the
canvas; this will prevent other widgets from doing so at the same time (if
they also try to acquire the lock first).
"""
def __init__(self):
self._owner = None
def __call__(self, o):
"""Reserve the lock for *o*."""
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
"""Release the lock from *o*."""
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
"""Return whether drawing is available to *o*."""
return not self.locked() or self.isowner(o)
def isowner(self, o):
"""Return whether *o* owns this lock."""
return self._owner is o
def locked(self):
"""Return whether the lock is currently held by an owner."""
return self._owner is not None
class Widget:
"""
Abstract base class for GUI neutral widgets.
"""
drawon = True
eventson = True
_active = True
def set_active(self, active):
"""Set whether the widget is active."""
self._active = active
def get_active(self):
"""Get whether the widget is active."""
return self._active
# set_active is overridden by SelectorWidgets.
active = property(get_active, set_active, doc="Is the widget active?")
def ignore(self, event):
"""
Return whether *event* should be ignored.
This method should be called at the beginning of any event callback.
"""
return not self.active
class AxesWidget(Widget):
"""
Widget connected to a single `~matplotlib.axes.Axes`.
To guarantee that the widget remains responsive and not garbage-collected,
a reference to the object should be maintained by the user.
This is necessary because the callback registry
maintains only weak-refs to the functions, which are member
functions of the widget. If there are no references to the widget
object it may be garbage collected which will disconnect the callbacks.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
canvas : `~matplotlib.backend_bases.FigureCanvasBase`
The parent figure canvas for the widget.
active : bool
If False, the widget does not respond to events.
"""
cids = _api.deprecated("3.4")(property(lambda self: self._cids))
def __init__(self, ax):
self.ax = ax
self.canvas = ax.figure.canvas
self._cids = []
def connect_event(self, event, callback):
"""
Connect a callback function with an event.
This should be used in lieu of ``figure.canvas.mpl_connect`` since this
function stores callback ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self._cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self._cids:
self.canvas.mpl_disconnect(c)
class Button(AxesWidget):
"""
A GUI neutral button.
For the button to remain responsive you must keep a reference to it.
Call `.on_clicked` to connect to the button.
Attributes
----------
ax
The `matplotlib.axes.Axes` the button renders into.
label
A `matplotlib.text.Text` instance.
color
The color of the button when not hovering.
hovercolor
The color of the button when hovering.
"""
cnt = _api.deprecated("3.4")(property( # Not real, but close enough.
lambda self: len(self._observers.callbacks['clicked'])))
observers = _api.deprecated("3.4")(property(
lambda self: self._observers.callbacks['clicked']))
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` instance the button will be placed into.
label : str
The button text.
image : array-like or PIL Image
The image to place in the button, if not *None*. The parameter is
directly forwarded to `~matplotlib.axes.Axes.imshow`.
color : color
The color of the button when not activated.
hovercolor : color
The color of the button when the mouse is over it.
"""
super().__init__(ax)
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self._observers = cbook.CallbackRegistry()
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_facecolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
def _click(self, event):
if self.ignore(event) or event.inaxes != self.ax or not self.eventson:
return
if event.canvas.mouse_grabber != self.ax:
event.canvas.grab_mouse(self.ax)
def _release(self, event):
if self.ignore(event) or event.canvas.mouse_grabber != self.ax:
return
event.canvas.release_mouse(self.ax)
if self.eventson and event.inaxes == self.ax:
self._observers.process('clicked', event)
def _motion(self, event):
if self.ignore(event):
return
c = self.hovercolor if event.inaxes == self.ax else self.color
if not colors.same_color(c, self.ax.get_facecolor()):
self.ax.set_facecolor(c)
if self.drawon:
self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
Connect the callback function *func* to button click events.
Returns a connection id, which can be used to disconnect the callback.
"""
return self._observers.connect('clicked', lambda event: func(event))
def disconnect(self, cid):
"""Remove the callback function with connection id *cid*."""
self._observers.disconnect(cid)
class SliderBase(AxesWidget):
"""
The base class for constructing Slider widgets. Not intended for direct
usage.
For the slider to remain responsive you must maintain a reference to it.
"""
def __init__(self, ax, orientation, closedmin, closedmax,
valmin, valmax, valfmt, dragging, valstep):
if ax.name == '3d':
raise ValueError('Sliders cannot be added to 3D Axes')
super().__init__(ax)
_api.check_in_list(['horizontal', 'vertical'], orientation=orientation)
self.orientation = orientation
self.closedmin = closedmin
self.closedmax = closedmax
self.valmin = valmin
self.valmax = valmax
self.valstep = valstep
self.drag_active = False
self.valfmt = valfmt
if orientation == "vertical":
ax.set_ylim((valmin, valmax))
axis = ax.yaxis
else:
ax.set_xlim((valmin, valmax))
axis = ax.xaxis
self._fmt = axis.get_major_formatter()
if not isinstance(self._fmt, ticker.ScalarFormatter):
self._fmt = ticker.ScalarFormatter()
self._fmt.set_axis(axis)
self._fmt.set_useOffset(False) # No additive offset.
self._fmt.set_useMathText(True) # x sign before multiplicative offset.
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
self.connect_event("button_press_event", self._update)
self.connect_event("button_release_event", self._update)
if dragging:
self.connect_event("motion_notify_event", self._update)
self._observers = cbook.CallbackRegistry()
def _stepped_value(self, val):
"""Return *val* coerced to closest number in the ``valstep`` grid."""
if isinstance(self.valstep, Number):
val = (self.valmin
+ round((val - self.valmin) / self.valstep) * self.valstep)
elif self.valstep is not None:
valstep = np.asanyarray(self.valstep)
if valstep.ndim != 1:
raise ValueError(
f"valstep must have 1 dimension but has {valstep.ndim}"
)
val = valstep[np.argmin(np.abs(valstep - val))]
return val
def disconnect(self, cid):
"""
Remove the observer with connection id *cid*.
Parameters
----------
cid : int
Connection id of the observer to be removed.
"""
self._observers.disconnect(cid)
def reset(self):
"""Reset the slider to the initial value."""
if self.val != self.valinit:
self.set_val(self.valinit)
class Slider(SliderBase):
"""
A slider representing a floating point range.
Create a slider from *valmin* to *valmax* in axes *ax*. For the slider to
remain responsive you must maintain a reference to it. Call
:meth:`on_changed` to connect to the slider event.
Attributes
----------
val : float
Slider value.
"""
cnt = _api.deprecated("3.4")(property( # Not real, but close enough.
lambda self: len(self._observers.callbacks['changed'])))
observers = _api.deprecated("3.4")(property(
lambda self: self._observers.callbacks['changed']))
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt=None,
closedmin=True, closedmax=True, slidermin=None,
slidermax=None, dragging=True, valstep=None,
orientation='horizontal', *, initcolor='r', **kwargs):
"""
Parameters
----------
ax : Axes
The Axes to put the slider in.
label : str
Slider label.
valmin : float
The minimum value of the slider.
valmax : float
The maximum value of the slider.
valinit : float, default: 0.5
The slider initial position.
valfmt : str, default: None
%-format string used to format the slider value. If None, a
`.ScalarFormatter` is used instead.
closedmin : bool, default: True
Whether the slider interval is closed on the bottom.
closedmax : bool, default: True
Whether the slider interval is closed on the top.
slidermin : Slider, default: None
Do not allow the current slider to have a value less than
the value of the Slider *slidermin*.
slidermax : Slider, default: None
Do not allow the current slider to have a value greater than
the value of the Slider *slidermax*.
dragging : bool, default: True
If True the slider can be dragged by the mouse.
valstep : float or array-like, default: None
If a float, the slider will snap to multiples of *valstep*.
If an array the slider will snap to the values in the array.
orientation : {'horizontal', 'vertical'}, default: 'horizontal'
The orientation of the slider.
initcolor : color, default: 'r'
The color of the line at the *valinit* position. Set to ``'none'``
for no line.
Notes
-----
Additional kwargs are passed on to ``self.poly`` which is the
`~matplotlib.patches.Rectangle` that draws the slider knob. See the
`.Rectangle` documentation for valid property names (``facecolor``,
``edgecolor``, ``alpha``, etc.).
"""
super().__init__(ax, orientation, closedmin, closedmax,
valmin, valmax, valfmt, dragging, valstep)
if slidermin is not None and not hasattr(slidermin, 'val'):
raise ValueError(
f"Argument slidermin ({type(slidermin)}) has no 'val'")
if slidermax is not None and not hasattr(slidermax, 'val'):
raise ValueError(
f"Argument slidermax ({type(slidermax)}) has no 'val'")
self.slidermin = slidermin
self.slidermax = slidermax
valinit = self._value_in_bounds(valinit)
if valinit is None:
valinit = valmin
self.val = valinit
self.valinit = valinit
if orientation == 'vertical':
self.poly = ax.axhspan(valmin, valinit, 0, 1, **kwargs)
self.hline = ax.axhline(valinit, 0, 1, color=initcolor, lw=1)
else:
self.poly = ax.axvspan(valmin, valinit, 0, 1, **kwargs)
self.vline = ax.axvline(valinit, 0, 1, color=initcolor, lw=1)
if orientation == 'vertical':
self.label = ax.text(0.5, 1.02, label, transform=ax.transAxes,
verticalalignment='bottom',
horizontalalignment='center')
self.valtext = ax.text(0.5, -0.02, self._format(valinit),
transform=ax.transAxes,
verticalalignment='top',
horizontalalignment='center')
else:
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, self._format(valinit),
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.set_val(valinit)
def _value_in_bounds(self, val):
"""Makes sure *val* is with given bounds."""
val = self._stepped_value(val)
if val <= self.valmin:
if not self.closedmin:
return
val = self.valmin
elif val >= self.valmax:
if not self.closedmax:
return
val = self.valmax
if self.slidermin is not None and val <= self.slidermin.val:
if not self.closedmin:
return
val = self.slidermin.val
if self.slidermax is not None and val >= self.slidermax.val:
if not self.closedmax:
return
val = self.slidermax.val
return val
def _update(self, event):
"""Update the slider position."""
if self.ignore(event) or event.button != 1:
return
if event.name == 'button_press_event' and event.inaxes == self.ax:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
elif ((event.name == 'button_release_event') or
(event.name == 'button_press_event' and
event.inaxes != self.ax)):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
if self.orientation == 'vertical':
val = self._value_in_bounds(event.ydata)
else:
val = self._value_in_bounds(event.xdata)
if val not in [None, self.val]:
self.set_val(val)
def _format(self, val):
"""Pretty-print *val*."""
if self.valfmt is not None:
return self.valfmt % val
else:
_, s, _ = self._fmt.format_ticks([self.valmin, val, self.valmax])
# fmt.get_offset is actually the multiplicative factor, if any.
return s + self._fmt.get_offset()
def set_val(self, val):
"""
Set slider value to *val*.
Parameters
----------
val : float
"""
xy = self.poly.xy
if self.orientation == 'vertical':
xy[1] = 0, val
xy[2] = 1, val
else:
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
self.valtext.set_text(self._format(val))
if self.drawon:
self.ax.figure.canvas.draw_idle()
self.val = val
if self.eventson:
self._observers.process('changed', val)
def on_changed(self, func):
"""
Connect *func* as callback function to changes of the slider value.
Parameters
----------
func : callable
Function to call when slider is changed.
The function must accept a single float as its arguments.
Returns
-------
int
Connection id (which can be used to disconnect *func*).
"""
return self._observers.connect('changed', lambda val: func(val))
class RangeSlider(SliderBase):
"""
A slider representing a range of floating point values. Defines the min and
max of the range via the *val* attribute as a tuple of (min, max).
Create a slider that defines a range contained within [*valmin*, *valmax*]
in axes *ax*. For the slider to remain responsive you must maintain a
reference to it. Call :meth:`on_changed` to connect to the slider event.
Attributes
----------
val : tuple of float
Slider value.
"""
def __init__(
self,
ax,
label,
valmin,
valmax,
valinit=None,
valfmt=None,
closedmin=True,
closedmax=True,
dragging=True,
valstep=None,
orientation="horizontal",
**kwargs,
):
"""
Parameters
----------
ax : Axes
The Axes to put the slider in.
label : str
Slider label.
valmin : float
The minimum value of the slider.
valmax : float
The maximum value of the slider.
valinit : tuple of float or None, default: None
The initial positions of the slider. If None the initial positions
will be at the 25th and 75th percentiles of the range.
valfmt : str, default: None
%-format string used to format the slider values. If None, a
`.ScalarFormatter` is used instead.
closedmin : bool, default: True
Whether the slider interval is closed on the bottom.
closedmax : bool, default: True
Whether the slider interval is closed on the top.
dragging : bool, default: True
If True the slider can be dragged by the mouse.
valstep : float, default: None
If given, the slider will snap to multiples of *valstep*.
orientation : {'horizontal', 'vertical'}, default: 'horizontal'
The orientation of the slider.
Notes
-----
Additional kwargs are passed on to ``self.poly`` which is the
`~matplotlib.patches.Rectangle` that draws the slider knob. See the
`.Rectangle` documentation for valid property names (``facecolor``,
``edgecolor``, ``alpha``, etc.).
"""
super().__init__(ax, orientation, closedmin, closedmax,
valmin, valmax, valfmt, dragging, valstep)
if valinit is None:
# Place at the 25th and 75th percentiles
extent = valmax - valmin
valinit = np.array(
[valmin + extent * 0.25, valmin + extent * 0.75]
)
else:
valinit = self._value_in_bounds(valinit)
self.val = valinit
self.valinit = valinit
if orientation == "vertical":
self.poly = ax.axhspan(valinit[0], valinit[1], 0, 1, **kwargs)
else:
self.poly = ax.axvspan(valinit[0], valinit[1], 0, 1, **kwargs)
if orientation == "vertical":
self.label = ax.text(
0.5,
1.02,
label,
transform=ax.transAxes,
verticalalignment="bottom",
horizontalalignment="center",
)
self.valtext = ax.text(
0.5,
-0.02,
self._format(valinit),
transform=ax.transAxes,
verticalalignment="top",
horizontalalignment="center",
)
else:
self.label = ax.text(
-0.02,
0.5,
label,
transform=ax.transAxes,
verticalalignment="center",
horizontalalignment="right",
)
self.valtext = ax.text(
1.02,
0.5,
self._format(valinit),
transform=ax.transAxes,
verticalalignment="center",
horizontalalignment="left",
)
self.set_val(valinit)
def _min_in_bounds(self, min):
"""Ensure the new min value is between valmin and self.val[1]."""
if min <= self.valmin:
if not self.closedmin:
return self.val[0]
min = self.valmin
if min > self.val[1]:
min = self.val[1]
return self._stepped_value(min)
def _max_in_bounds(self, max):
"""Ensure the new max value is between valmax and self.val[0]."""
if max >= self.valmax:
if not self.closedmax:
return self.val[1]
max = self.valmax
if max <= self.val[0]:
max = self.val[0]
return self._stepped_value(max)
def _value_in_bounds(self, val):
return (self._min_in_bounds(val[0]), self._max_in_bounds(val[1]))
def _update_val_from_pos(self, pos):
"""Update the slider value based on a given position."""
idx = np.argmin(np.abs(self.val - pos))
if idx == 0:
val = self._min_in_bounds(pos)
self.set_min(val)
else:
val = self._max_in_bounds(pos)
self.set_max(val)
def _update(self, event):
"""Update the slider position."""
if self.ignore(event) or event.button != 1:
return
if event.name == "button_press_event" and event.inaxes == self.ax:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
elif (event.name == "button_release_event") or (
event.name == "button_press_event" and event.inaxes != self.ax
):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
if self.orientation == "vertical":
self._update_val_from_pos(event.ydata)
else:
self._update_val_from_pos(event.xdata)
def _format(self, val):
"""Pretty-print *val*."""
if self.valfmt is not None:
return f"({self.valfmt % val[0]}, {self.valfmt % val[1]})"
else:
_, s1, s2, _ = self._fmt.format_ticks(
[self.valmin, *val, self.valmax]
)
# fmt.get_offset is actually the multiplicative factor, if any.
s1 += self._fmt.get_offset()
s2 += self._fmt.get_offset()
# Use f string to avoid issues with backslashes when cast to a str
return f"({s1}, {s2})"
def set_min(self, min):
"""
Set the lower value of the slider to *min*.
Parameters
----------
min : float
"""
self.set_val((min, self.val[1]))
def set_max(self, max):
"""
Set the lower value of the slider to *max*.
Parameters
----------
max : float
"""
self.set_val((self.val[0], max))
def set_val(self, val):
"""
Set slider value to *val*.
Parameters
----------
val : tuple or array-like of float
"""
val = np.sort(np.asanyarray(val))
if val.shape != (2,):
raise ValueError(
f"val must have shape (2,) but has shape {val.shape}"
)
val[0] = self._min_in_bounds(val[0])
val[1] = self._max_in_bounds(val[1])
xy = self.poly.xy
if self.orientation == "vertical":
xy[0] = 0, val[0]
xy[1] = 0, val[1]
xy[2] = 1, val[1]
xy[3] = 1, val[0]
xy[4] = 0, val[0]
else:
xy[0] = val[0], 0
xy[1] = val[0], 1
xy[2] = val[1], 1
xy[3] = val[1], 0
xy[4] = val[0], 0
self.poly.xy = xy
self.valtext.set_text(self._format(val))
if self.drawon:
self.ax.figure.canvas.draw_idle()
self.val = val
if self.eventson:
self._observers.process("changed", val)
def on_changed(self, func):
"""
Connect *func* as callback function to changes of the slider value.
Parameters
----------
func : callable
Function to call when slider is changed. The function
must accept a numpy array with shape (2,) as its argument.
Returns
-------
int
Connection id (which can be used to disconnect *func*).
"""
return self._observers.connect('changed', lambda val: func(val))
class CheckButtons(AxesWidget):
r"""
A GUI neutral set of check buttons.
For the check buttons to remain responsive you must keep a
reference to this object.
Connect to the CheckButtons with the `.on_clicked` method.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
labels : list of `.Text`
rectangles : list of `.Rectangle`
lines : list of (`.Line2D`, `.Line2D`) pairs
List of lines for the x's in the check boxes. These lines exist for
each box, but have ``set_visible(False)`` when its box is not checked.
"""
cnt = _api.deprecated("3.4")(property( # Not real, but close enough.
lambda self: len(self._observers.callbacks['clicked'])))
observers = _api.deprecated("3.4")(property(
lambda self: self._observers.callbacks['clicked']))
def __init__(self, ax, labels, actives=None):
"""
Add check buttons to `matplotlib.axes.Axes` instance *ax*.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
labels : list of str
The labels of the check buttons.
actives : list of bool, optional
The initial check states of the buttons. The list must have the
same length as *labels*. If not given, all buttons are unchecked.
"""
super().__init__(ax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if actives is None:
actives = [False] * len(labels)
if len(labels) > 1:
dy = 1. / (len(labels) + 1)
ys = np.linspace(1 - dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
axcolor = ax.get_facecolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color': 'k', 'linewidth': 1.25,
'transform': ax.transAxes, 'solid_capstyle': 'butt'}
for y, label, active in zip(ys, labels, actives):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy / 2, dy / 2
x, y = 0.05, y - h / 2
p = Rectangle(xy=(x, y), width=w, height=h, edgecolor='black',
facecolor=axcolor, transform=ax.transAxes)
l1 = Line2D([x, x + w], [y + h, y], **lineparams)
l2 = Line2D([x, x + w], [y, y + h], **lineparams)
l1.set_visible(active)
l2.set_visible(active)
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1, l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
self.connect_event('button_press_event', self._clicked)
self._observers = cbook.CallbackRegistry()
def _clicked(self, event):
if self.ignore(event) or event.button != 1 or event.inaxes != self.ax:
return
for i, (p, t) in enumerate(zip(self.rectangles, self.labels)):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y)):
self.set_active(i)
break
def set_active(self, index):
"""
Toggle (activate or deactivate) a check button by index.
Callbacks will be triggered if :attr:`eventson` is True.
Parameters
----------
index : int
Index of the check button to toggle.
Raises
------
ValueError
If *index* is invalid.
"""
if index not in range(len(self.labels)):
raise ValueError(f'Invalid CheckButton index: {index}')
l1, l2 = self.lines[index]
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
if self.drawon:
self.ax.figure.canvas.draw()
if self.eventson:
self._observers.process('clicked', self.labels[index].get_text())
def get_status(self):
"""
Return a tuple of the status (True/False) of all of the check buttons.
"""
return [l1.get_visible() for (l1, l2) in self.lines]
def on_clicked(self, func):
"""
Connect the callback function *func* to button click events.
Returns a connection id, which can be used to disconnect the callback.
"""
return self._observers.connect('clicked', lambda text: func(text))
def disconnect(self, cid):
"""Remove the observer with connection id *cid*."""
self._observers.disconnect(cid)
class TextBox(AxesWidget):
"""
A GUI neutral text input box.
For the text box to remain responsive you must keep a reference to it.
Call `.on_text_change` to be updated whenever the text changes.
Call `.on_submit` to be updated whenever the user hits enter or
leaves the text entry field.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
label : `.Text`
color : color
The color of the text box when not hovering.
hovercolor : color
The color of the text box when hovering.
"""
params_to_disable = _api.deprecated("3.3")(property(
lambda self: [key for key in mpl.rcParams if 'keymap' in key]))
cnt = _api.deprecated("3.4")(property( # Not real, but close enough.
lambda self: sum(len(d) for d in self._observers.callbacks.values())))
change_observers = _api.deprecated("3.4")(property(
lambda self: self._observers.callbacks['change']))
submit_observers = _api.deprecated("3.4")(property(
lambda self: self._observers.callbacks['submit']))
def __init__(self, ax, label, initial='',
color='.95', hovercolor='1', label_pad=.01):
"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The `~.axes.Axes` instance the button will be placed into.
label : str
Label for this text box.
initial : str
Initial value in the text box.
color : color
The color of the box.
hovercolor : color
The color of the box when the mouse is over it.
label_pad : float
The distance between the label and the right side of the textbox.
"""
super().__init__(ax)
self.DIST_FROM_LEFT = .05
self.label = ax.text(
-label_pad, 0.5, label, transform=ax.transAxes,
verticalalignment='center', horizontalalignment='right')
self.text_disp = self.ax.text(
self.DIST_FROM_LEFT, 0.5, initial, transform=self.ax.transAxes,
verticalalignment='center', horizontalalignment='left')
self._observers = cbook.CallbackRegistry()
ax.set(
xlim=(0, 1), ylim=(0, 1), # s.t. cursor appears from first click.
navigate=False, facecolor=color,
xticks=[], yticks=[])
self.cursor_index = 0
self.cursor = ax.vlines(0, 0, 0, visible=False,
transform=mpl.transforms.IdentityTransform())
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
self.connect_event('key_press_event', self._keypress)
self.connect_event('resize_event', self._resize)
self.color = color
self.hovercolor = hovercolor
self.capturekeystrokes = False
@property
def text(self):
return self.text_disp.get_text()
def _rendercursor(self):
# this is a hack to figure out where the cursor should go.
# we draw the text up to where the cursor should go, measure
# and save its dimensions, draw the real text, then put the cursor
# at the saved dimensions
# This causes a single extra draw if the figure has never been rendered
# yet, which should be fine as we're going to repeatedly re-render the
# figure later anyways.
if self.ax.figure._cachedRenderer is None:
self.ax.figure.canvas.draw()
text = self.text_disp.get_text() # Save value before overwriting it.
widthtext = text[:self.cursor_index]
self.text_disp.set_text(widthtext or ",")
bb = self.text_disp.get_window_extent()
if not widthtext: # Use the comma for the height, but keep width to 0.
bb.x1 = bb.x0
self.cursor.set(
segments=[[(bb.x1, bb.y0), (bb.x1, bb.y1)]], visible=True)
self.text_disp.set_text(text)
self.ax.figure.canvas.draw()
def _release(self, event):
if self.ignore(event):
return
if event.canvas.mouse_grabber != self.ax:
return
event.canvas.release_mouse(self.ax)
def _keypress(self, event):
if self.ignore(event):
return
if self.capturekeystrokes:
key = event.key
text = self.text
if len(key) == 1:
text = (text[:self.cursor_index] + key +
text[self.cursor_index:])
self.cursor_index += 1
elif key == "right":
if self.cursor_index != len(text):
self.cursor_index += 1
elif key == "left":
if self.cursor_index != 0:
self.cursor_index -= 1
elif key == "home":
self.cursor_index = 0
elif key == "end":
self.cursor_index = len(text)
elif key == "backspace":
if self.cursor_index != 0:
text = (text[:self.cursor_index - 1] +
text[self.cursor_index:])
self.cursor_index -= 1
elif key == "delete":
if self.cursor_index != len(self.text):
text = (text[:self.cursor_index] +
text[self.cursor_index + 1:])
self.text_disp.set_text(text)
self._rendercursor()
if self.eventson:
self._observers.process('change', self.text)
if key in ["enter", "return"]:
self._observers.process('submit', self.text)
def set_val(self, val):
newval = str(val)
if self.text == newval:
return
self.text_disp.set_text(newval)
self._rendercursor()
if self.eventson:
self._observers.process('change', self.text)
self._observers.process('submit', self.text)
def begin_typing(self, x):
self.capturekeystrokes = True
# Disable keypress shortcuts, which may otherwise cause the figure to
# be saved, closed, etc., until the user stops typing. The way to
# achieve this depends on whether toolmanager is in use.
stack = ExitStack() # Register cleanup actions when user stops typing.
self._on_stop_typing = stack.close
toolmanager = getattr(
self.ax.figure.canvas.manager, "toolmanager", None)
if toolmanager is not None:
# If using toolmanager, lock keypresses, and plan to release the
# lock when typing stops.
toolmanager.keypresslock(self)
stack.push(toolmanager.keypresslock.release, self)
else:
# If not using toolmanager, disable all keypress-related rcParams.
# Avoid spurious warnings if keymaps are getting deprecated.
with _api.suppress_matplotlib_deprecation_warning():
stack.enter_context(mpl.rc_context(
{k: [] for k in mpl.rcParams if k.startswith("keymap.")}))
def stop_typing(self):
if self.capturekeystrokes:
self._on_stop_typing()
self._on_stop_typing = None
notifysubmit = True
else:
notifysubmit = False
self.capturekeystrokes = False
self.cursor.set_visible(False)
self.ax.figure.canvas.draw()
if notifysubmit and self.eventson:
# Because process() might throw an error in the user's code, only
# call it once we've already done our cleanup.
self._observers.process('submit', self.text)
def position_cursor(self, x):
# now, we have to figure out where the cursor goes.
# approximate it based on assuming all characters the same length
if len(self.text) == 0:
self.cursor_index = 0
else:
bb = self.text_disp.get_window_extent()
ratio = np.clip((x - bb.x0) / bb.width, 0, 1)
self.cursor_index = int(len(self.text) * ratio)
self._rendercursor()
def _click(self, event):
if self.ignore(event):
return
if event.inaxes != self.ax:
self.stop_typing()
return
if not self.eventson:
return
if event.canvas.mouse_grabber != self.ax:
event.canvas.grab_mouse(self.ax)
if not self.capturekeystrokes:
self.begin_typing(event.x)
self.position_cursor(event.x)
def _resize(self, event):
self.stop_typing()
def _motion(self, event):
if self.ignore(event):
return
c = self.hovercolor if event.inaxes == self.ax else self.color
if not colors.same_color(c, self.ax.get_facecolor()):
self.ax.set_facecolor(c)
if self.drawon:
self.ax.figure.canvas.draw()
def on_text_change(self, func):
"""
When the text changes, call this *func* with event.
A connection id is returned which can be used to disconnect.
"""
return self._observers.connect('change', lambda text: func(text))
def on_submit(self, func):
"""
When the user hits enter or leaves the submission box, call this
*func* with event.
A connection id is returned which can be used to disconnect.
"""
return self._observers.connect('submit', lambda text: func(text))
def disconnect(self, cid):
"""Remove the observer with connection id *cid*."""
self._observers.disconnect(cid)
class RadioButtons(AxesWidget):
"""
A GUI neutral radio button.
For the buttons to remain responsive you must keep a reference to this
object.
Connect to the RadioButtons with the `.on_clicked` method.
Attributes
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
activecolor : color
The color of the selected button.
labels : list of `.Text`
The button labels.
circles : list of `~.patches.Circle`
The buttons.
value_selected : str
The label text of the currently selected button.
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to an `~.axes.Axes`.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The axes to add the buttons to.
labels : list of str
The button labels.
active : int
The index of the initially selected button.
activecolor : color
The color of the selected button.
"""
super().__init__(ax)
self.activecolor = activecolor
self.value_selected = None
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1. / (len(labels) + 1)
ys = np.linspace(1 - dy, dy, len(labels))
cnt = 0
axcolor = ax.get_facecolor()
# scale the radius of the circle with the spacing between each one
circle_radius = dy / 2 - 0.01
# default to hard-coded value if the radius becomes too large
circle_radius = min(circle_radius, 0.05)
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt == active:
self.value_selected = label
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=circle_radius, edgecolor='black',
facecolor=facecolor, transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self._observers = cbook.CallbackRegistry()
cnt = _api.deprecated("3.4")(property( # Not real, but close enough.
lambda self: len(self._observers.callbacks['clicked'])))
observers = _api.deprecated("3.4")(property(
lambda self: self._observers.callbacks['clicked']))
def _clicked(self, event):
if self.ignore(event) or event.button != 1 or event.inaxes != self.ax:
return
pclicked = self.ax.transAxes.inverted().transform((event.x, event.y))
distances = {}
for i, (p, t) in enumerate(zip(self.circles, self.labels)):
if (t.get_window_extent().contains(event.x, event.y)
or np.linalg.norm(pclicked - p.center) < p.radius):
distances[i] = np.linalg.norm(pclicked - p.center)
if len(distances) > 0:
closest = min(distances, key=distances.get)
self.set_active(closest)
def set_active(self, index):
"""
Select button with number *index*.
Callbacks will be triggered if :attr:`eventson` is True.
"""
if index not in range(len(self.labels)):
raise ValueError(f'Invalid RadioButton index: {index}')
self.value_selected = self.labels[index].get_text()
for i, p in enumerate(self.circles):
if i == index:
color = self.activecolor
else:
color = self.ax.get_facecolor()
p.set_facecolor(color)
if self.drawon:
self.ax.figure.canvas.draw()
if self.eventson:
self._observers.process('clicked', self.labels[index].get_text())
def on_clicked(self, func):
"""
Connect the callback function *func* to button click events.
Returns a connection id, which can be used to disconnect the callback.
"""
return self._observers.connect('clicked', func)
def disconnect(self, cid):
"""Remove the observer with connection id *cid*."""
self._observers.disconnect(cid)
class SubplotTool(Widget):
"""
A tool to adjust the subplot params of a `matplotlib.figure.Figure`.
"""
def __init__(self, targetfig, toolfig):
"""
Parameters
----------
targetfig : `.Figure`
The figure instance to adjust.
toolfig : `.Figure`
The figure instance to embed the subplot tool into.
"""
self.figure = toolfig
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
toolfig.suptitle("Click on slider to adjust subplot param")
self._sliders = []
names = ["left", "bottom", "right", "top", "wspace", "hspace"]
# The last subplot, removed below, keeps space for the "Reset" button.
for name, ax in zip(names, toolfig.subplots(len(names) + 1)):
ax.set_navigate(False)
slider = Slider(ax, name,
0, 1, getattr(targetfig.subplotpars, name))
slider.on_changed(self._on_slider_changed)
self._sliders.append(slider)
toolfig.axes[-1].remove()
(self.sliderleft, self.sliderbottom, self.sliderright, self.slidertop,
self.sliderwspace, self.sliderhspace) = self._sliders
for slider in [self.sliderleft, self.sliderbottom,
self.sliderwspace, self.sliderhspace]:
slider.closedmax = False
for slider in [self.sliderright, self.slidertop]:
slider.closedmin = False
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
# During reset there can be a temporary invalid state depending on the
# order of the reset so we turn off validation for the resetting
with cbook._setattr_cm(toolfig.subplotpars, validate=False):
self.buttonreset.on_clicked(self._on_reset)
def _on_slider_changed(self, _):
self.targetfig.subplots_adjust(
**{slider.label.get_text(): slider.val
for slider in self._sliders})
if self.drawon:
self.targetfig.canvas.draw()
def _on_reset(self, event):
with ExitStack() as stack:
# Temporarily disable drawing on self and self's sliders.
stack.enter_context(cbook._setattr_cm(self, drawon=False))
for slider in self._sliders:
stack.enter_context(cbook._setattr_cm(slider, drawon=False))
# Reset the slider to the initial position.
for slider in self._sliders:
slider.reset()
# Draw the canvas.
if self.drawon:
event.canvas.draw()
self.targetfig.canvas.draw()
axleft = _api.deprecated("3.3")(
property(lambda self: self.sliderleft.ax))
axright = _api.deprecated("3.3")(
property(lambda self: self.sliderright.ax))
axbottom = _api.deprecated("3.3")(
property(lambda self: self.sliderbottom.ax))
axtop = _api.deprecated("3.3")(
property(lambda self: self.slidertop.ax))
axwspace = _api.deprecated("3.3")(
property(lambda self: self.sliderwspace.ax))
axhspace = _api.deprecated("3.3")(
property(lambda self: self.sliderhspace.ax))
@_api.deprecated("3.3")
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon:
self.targetfig.canvas.draw()
@_api.deprecated("3.3")
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon:
self.targetfig.canvas.draw()
@_api.deprecated("3.3")
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon:
self.targetfig.canvas.draw()
@_api.deprecated("3.3")
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon:
self.targetfig.canvas.draw()
@_api.deprecated("3.3")
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon:
self.targetfig.canvas.draw()
@_api.deprecated("3.3")
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon:
self.targetfig.canvas.draw()
class Cursor(AxesWidget):
"""
A crosshair cursor that spans the axes and moves with mouse cursor.
For the cursor to remain responsive you must keep a reference to it.
Parameters
----------
ax : `matplotlib.axes.Axes`
The `~.axes.Axes` to attach the cursor to.
horizOn : bool, default: True
Whether to draw the horizontal line.
vertOn : bool, default: True
Whether to draw the vertical line.
useblit : bool, default: False
Use blitting for faster drawing if supported by the backend.
Other Parameters
----------------
**lineprops
`.Line2D` properties that control the appearance of the lines.
See also `~.Axes.axhline`.
Examples
--------
See :doc:`/gallery/widgets/cursor`.
"""
def __init__(self, ax, horizOn=True, vertOn=True, useblit=False,
**lineprops):
super().__init__(ax)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.clear)
self.visible = True
self.horizOn = horizOn
self.vertOn = vertOn
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
lineprops['animated'] = True
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
"""Internal event handler to clear the cursor."""
if self.ignore(event):
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
"""Internal event handler to draw the cursor when the mouse moves."""
if self.ignore(event):
return
if not self.canvas.widgetlock.available(self):
return
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible:
return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor(Widget):
"""
Provide a vertical (default) and/or horizontal line cursor shared between
multiple axes.
For the cursor to remain responsive you must keep a reference to it.
Example usage::
from matplotlib.widgets import MultiCursor
import matplotlib.pyplot as plt
import numpy as np
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
t = np.arange(0.0, 2.0, 0.01)
ax1.plot(t, np.sin(2*np.pi*t))
ax2.plot(t, np.sin(4*np.pi*t))
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1,
horizOn=False, vertOn=True)
plt.show()
"""
def __init__(self, canvas, axes, useblit=True, horizOn=False, vertOn=True,
**lineprops):
self.canvas = canvas
self.axes = axes
self.horizOn = horizOn
self.vertOn = vertOn
xmin, xmax = axes[-1].get_xlim()
ymin, ymax = axes[-1].get_ylim()
xmid = 0.5 * (xmin + xmax)
ymid = 0.5 * (ymin + ymax)
self.visible = True
self.useblit = useblit and self.canvas.supports_blit
self.background = None
self.needclear = False
if self.useblit:
lineprops['animated'] = True
if vertOn:
self.vlines = [ax.axvline(xmid, visible=False, **lineprops)
for ax in axes]
else:
self.vlines = []
if horizOn:
self.hlines = [ax.axhline(ymid, visible=False, **lineprops)
for ax in axes]
else:
self.hlines = []
self.connect()
def connect(self):
"""Connect events."""
self._cidmotion = self.canvas.mpl_connect('motion_notify_event',
self.onmove)
self._ciddraw = self.canvas.mpl_connect('draw_event', self.clear)
def disconnect(self):
"""Disconnect events."""
self.canvas.mpl_disconnect(self._cidmotion)
self.canvas.mpl_disconnect(self._ciddraw)
def clear(self, event):
"""Clear the cursor."""
if self.ignore(event):
return
if self.useblit:
self.background = (
self.canvas.copy_from_bbox(self.canvas.figure.bbox))
for line in self.vlines + self.hlines:
line.set_visible(False)
def onmove(self, event):
if self.ignore(event):
return
if event.inaxes is None:
return
if not self.canvas.widgetlock.available(self):
return
self.needclear = True
if not self.visible:
return
if self.vertOn:
for line in self.vlines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
if self.horizOn:
for line in self.hlines:
line.set_ydata((event.ydata, event.ydata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
if self.vertOn:
for ax, line in zip(self.axes, self.vlines):
ax.draw_artist(line)
if self.horizOn:
for ax, line in zip(self.axes, self.hlines):
ax.draw_artist(line)
self.canvas.blit()
else:
self.canvas.draw_idle()
class _SelectorWidget(AxesWidget):
def __init__(self, ax, onselect, useblit=False, button=None,
state_modifier_keys=None):
super().__init__(ax)
self.visible = True
self.onselect = onselect
self.useblit = useblit and self.canvas.supports_blit
self.connect_default_events()
self.state_modifier_keys = dict(move=' ', clear='escape',
square='shift', center='control')
self.state_modifier_keys.update(state_modifier_keys or {})
self.background = None
self.artists = []
if isinstance(button, Integral):
self.validButtons = [button]
else:
self.validButtons = button
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
self._prev_event = None
self.state = set()
def set_active(self, active):
super().set_active(active)
if active:
self.update_background(None)
def update_background(self, event):
"""Force an update of the background."""
# If you add a call to `ignore` here, you'll want to check edge case:
# `release` can call a draw event even when `ignore` is True.
if not self.useblit:
return
# Make sure that widget artists don't get accidentally included in the
# background, by re-rendering the background if needed (and then
# re-re-rendering the canvas with the visible widget artists).
needs_redraw = any(artist.get_visible() for artist in self.artists)
with ExitStack() as stack:
if needs_redraw:
for artist in self.artists:
stack.callback(artist.set_visible, artist.get_visible())
artist.set_visible(False)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
if needs_redraw:
self.update()
def connect_default_events(self):
"""Connect the major canvas events to methods."""
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
self.connect_event('key_press_event', self.on_key_press)
self.connect_event('key_release_event', self.on_key_release)
self.connect_event('scroll_event', self.on_scroll)
def ignore(self, event):
# docstring inherited
if not self.active or not self.ax.get_visible():
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
if not hasattr(event, 'button'):
event.button = None
# Only do rectangle selection if event was triggered
# with a desired button
if (self.validButtons is not None
and event.button not in self.validButtons):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress is None:
return event.inaxes != self.ax
# If a button was pressed, check if the release-button is the same.
if event.button == self.eventpress.button:
return False
# If a button was pressed, check if the release-button is the same.
return (event.inaxes != self.ax or
event.button != self.eventpress.button)
def update(self):
"""Draw using blit() or draw_idle(), depending on ``self.useblit``."""
if not self.ax.get_visible():
return False
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for artist in self.artists:
self.ax.draw_artist(artist)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def _get_data(self, event):
"""Get the xdata and ydata for event, with limits."""
if event.xdata is None:
return None, None
xdata = np.clip(event.xdata, *self.ax.get_xbound())
ydata = np.clip(event.ydata, *self.ax.get_ybound())
return xdata, ydata
def _clean_event(self, event):
"""
Preprocess an event:
- Replace *event* by the previous event if *event* has no ``xdata``.
- Clip ``xdata`` and ``ydata`` to the axes limits.
- Update the previous event.
"""
if event.xdata is None:
event = self._prev_event
else:
event = copy.copy(event)
event.xdata, event.ydata = self._get_data(event)
self._prev_event = event
return event
def press(self, event):
"""Button press handler and validator."""
if not self.ignore(event):
event = self._clean_event(event)
self.eventpress = event
self._prev_event = event
key = event.key or ''
key = key.replace('ctrl', 'control')
# move state is locked in on a button press
if key == self.state_modifier_keys['move']:
self.state.add('move')
self._press(event)
return True
return False
def _press(self, event):
"""Button press event handler."""
def release(self, event):
"""Button release event handler and validator."""
if not self.ignore(event) and self.eventpress:
event = self._clean_event(event)
self.eventrelease = event
self._release(event)
self.eventpress = None
self.eventrelease = None
self.state.discard('move')
return True
return False
def _release(self, event):
"""Button release event handler."""
def onmove(self, event):
"""Cursor move event handler and validator."""
if not self.ignore(event) and self.eventpress:
event = self._clean_event(event)
self._onmove(event)
return True
return False
def _onmove(self, event):
"""Cursor move event handler."""
def on_scroll(self, event):
"""Mouse scroll event handler and validator."""
if not self.ignore(event):
self._on_scroll(event)
def _on_scroll(self, event):
"""Mouse scroll event handler."""
def on_key_press(self, event):
"""Key press event handler and validator for all selection widgets."""
if self.active:
key = event.key or ''
key = key.replace('ctrl', 'control')
if key == self.state_modifier_keys['clear']:
for artist in self.artists:
artist.set_visible(False)
self.update()
return
for (state, modifier) in self.state_modifier_keys.items():
if modifier in key:
self.state.add(state)
self._on_key_press(event)
def _on_key_press(self, event):
"""Key press event handler - for widget-specific key press actions."""
def on_key_release(self, event):
"""Key release event handler and validator."""
if self.active:
key = event.key or ''
for (state, modifier) in self.state_modifier_keys.items():
if modifier in key:
self.state.discard(state)
self._on_key_release(event)
def _on_key_release(self, event):
"""Key release event handler."""
def set_visible(self, visible):
"""Set the visibility of our artists."""
self.visible = visible
for artist in self.artists:
artist.set_visible(visible)
class SpanSelector(_SelectorWidget):
"""
Visually select a min/max range on a single axis and call a function with
those values.
To guarantee that the selector remains responsive, keep a reference to it.
In order to turn off the SpanSelector, set ``span_selector.active`` to
False. To turn it back on, set it to True.
Parameters
----------
ax : `matplotlib.axes.Axes`
onselect : func(min, max), min/max are floats
direction : {"horizontal", "vertical"}
The direction along which to draw the span selector.
minspan : float, default: None
If selection is less than *minspan*, do not call *onselect*.
useblit : bool, default: False
If True, use the backend-dependent blitting features for faster
canvas updates.
rectprops : dict, default: None
Dictionary of `matplotlib.patches.Patch` properties.
onmove_callback : func(min, max), min/max are floats, default: None
Called on mouse move while the span is being selected.
span_stays : bool, default: False
If True, the span stays visible after the mouse is released.
button : `.MouseButton` or list of `.MouseButton`
The mouse buttons which activate the span selector.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import matplotlib.widgets as mwidgets
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [10, 50, 100])
>>> def onselect(vmin, vmax):
... print(vmin, vmax)
>>> rectprops = dict(facecolor='blue', alpha=0.5)
>>> span = mwidgets.SpanSelector(ax, onselect, 'horizontal',
... rectprops=rectprops)
>>> fig.show()
See also: :doc:`/gallery/widgets/span_selector`
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False,
rectprops=None, onmove_callback=None, span_stays=False,
button=None):
super().__init__(ax, onselect, useblit=useblit, button=button)
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
rectprops['animated'] = self.useblit
_api.check_in_list(['horizontal', 'vertical'], direction=direction)
self.direction = direction
self.rect = None
self.pressv = None
self.rectprops = rectprops
self.onmove_callback = onmove_callback
self.minspan = minspan
self.span_stays = span_stays
# Needed when dragging out of axes
self.prev = (0, 0)
# Reset canvas so that `new_axes` connects events.
self.canvas = None
self.new_axes(ax)
def new_axes(self, ax):
"""Set SpanSelector to operate on a new Axes."""
self.ax = ax
if self.canvas is not ax.figure.canvas:
if self.canvas is not None:
self.disconnect_events()
self.canvas = ax.figure.canvas
self.connect_default_events()
if self.direction == 'horizontal':
trans = ax.get_xaxis_transform()
w, h = 0, 1
else:
trans = ax.get_yaxis_transform()
w, h = 1, 0
self.rect = Rectangle((0, 0), w, h,
transform=trans,
visible=False,
**self.rectprops)
if self.span_stays:
self.stay_rect = Rectangle((0, 0), w, h,
transform=trans,
visible=False,
**self.rectprops)
self.stay_rect.set_animated(False)
self.ax.add_patch(self.stay_rect)
self.ax.add_patch(self.rect)
self.artists = [self.rect]
def ignore(self, event):
# docstring inherited
return super().ignore(event) or not self.visible
def _press(self, event):
"""Button press event handler."""
self.rect.set_visible(self.visible)
if self.span_stays:
self.stay_rect.set_visible(False)
# really force a draw so that the stay rect is not in
# the blit background
if self.useblit:
self.canvas.draw()
xdata, ydata = self._get_data(event)
if self.direction == 'horizontal':
self.pressv = xdata
else:
self.pressv = ydata
self._set_span_xy(event)
return False
def _release(self, event):
"""Button release event handler."""
if self.pressv is None:
return
self.rect.set_visible(False)
if self.span_stays:
self.stay_rect.set_x(self.rect.get_x())
self.stay_rect.set_y(self.rect.get_y())
self.stay_rect.set_width(self.rect.get_width())
self.stay_rect.set_height(self.rect.get_height())
self.stay_rect.set_visible(True)
self.canvas.draw_idle()
vmin = self.pressv
xdata, ydata = self._get_data(event)
if self.direction == 'horizontal':
vmax = xdata or self.prev[0]
else:
vmax = ydata or self.prev[1]
if vmin > vmax:
vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span < self.minspan:
return
self.onselect(vmin, vmax)
self.pressv = None
return False
def _onmove(self, event):
"""Motion notify event handler."""
if self.pressv is None:
return
self._set_span_xy(event)
if self.onmove_callback is not None:
vmin = self.pressv
xdata, ydata = self._get_data(event)
if self.direction == 'horizontal':
vmax = xdata or self.prev[0]
else:
vmax = ydata or self.prev[1]
if vmin > vmax:
vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
def _set_span_xy(self, event):
"""Set the span coordinates."""
x, y = self._get_data(event)
if x is None:
return
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv > maxv:
minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv - minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv - minv)
class ToolHandles:
"""
Control handles for canvas tools.
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str
Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
marker_props : dict
Additional marker properties. See `matplotlib.lines.Line2D`.
"""
def __init__(self, ax, x, y, marker='o', marker_props=None, useblit=True):
self.ax = ax
props = {'marker': marker, 'markersize': 7, 'markerfacecolor': 'w',
'linestyle': 'none', 'alpha': 0.5, 'visible': False,
'label': '_nolegend_',
**cbook.normalize_kwargs(marker_props, Line2D._alias_map)}
self._markers = Line2D(x, y, animated=useblit, **props)
self.ax.add_line(self._markers)
self.artist = self._markers
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
def set_data(self, pts, y=None):
"""Set x and y positions of handles."""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.column_stack([self.x, self.y])
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - [x, y]
dist = np.hypot(*diff.T)
min_index = np.argmin(dist)
return min_index, dist[min_index]
class RectangleSelector(_SelectorWidget):
"""
Select a rectangular region of an axes.
For the cursor to remain responsive you must keep a reference to it.
Examples
--------
:doc:`/gallery/widgets/rectangle_selector`
"""
_shape_klass = Rectangle
def __init__(self, ax, onselect, drawtype='box',
minspanx=0, minspany=0, useblit=False,
lineprops=None, rectprops=None, spancoords='data',
button=None, maxdist=10, marker_props=None,
interactive=False, state_modifier_keys=None):
r"""
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
onselect : function
A callback function that is called after a selection is completed.
It must have the signature::
def onselect(eclick: MouseEvent, erelease: MouseEvent)
where *eclick* and *erelease* are the mouse click and release
`.MouseEvent`\s that start and complete the selection.
drawtype : {"box", "line", "none"}, default: "box"
Whether to draw the full rectangle box, the diagonal line of the
rectangle, or nothing at all.
minspanx : float, default: 0
Selections with an x-span less than *minspanx* are ignored.
minspany : float, default: 0
Selections with an y-span less than *minspany* are ignored.
useblit : bool, default: False
Whether to use blitting for faster drawing (if supported by the
backend).
lineprops : dict, optional
Properties with which the line is drawn, if ``drawtype == "line"``.
Default::
dict(color="black", linestyle="-", linewidth=2, alpha=0.5)
rectprops : dict, optional
Properties with which the rectangle is drawn, if ``drawtype ==
"box"``. Default::
dict(facecolor="red", edgecolor="black", alpha=0.2, fill=True)
spancoords : {"data", "pixels"}, default: "data"
Whether to interpret *minspanx* and *minspany* in data or in pixel
coordinates.
button : `.MouseButton`, list of `.MouseButton`, default: all buttons
Button(s) that trigger rectangle selection.
maxdist : float, default: 10
Distance in pixels within which the interactive tool handles can be
activated.
marker_props : dict
Properties with which the interactive handles are drawn. Currently
not implemented and ignored.
interactive : bool, default: False
Whether to draw a set of handles that allow interaction with the
widget after it is drawn.
state_modifier_keys : dict, optional
Keyboard modifiers which affect the widget's behavior. Values
amend the defaults.
- "move": Move the existing shape, default: no modifier.
- "clear": Clear the current shape, default: "escape".
- "square": Makes the shape square, default: "shift".
- "center": Make the initial point the center of the shape,
default: "ctrl".
"square" and "center" can be combined.
"""
super().__init__(ax, onselect, useblit=useblit, button=button,
state_modifier_keys=state_modifier_keys)
self.to_draw = None
self.visible = True
self.interactive = interactive
if drawtype == 'none': # draw a line but make it invisible
drawtype = 'line'
self.visible = False
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='red', edgecolor='black',
alpha=0.2, fill=True)
rectprops['animated'] = self.useblit
self.rectprops = rectprops
self.to_draw = self._shape_klass((0, 0), 0, 1, visible=False,
**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth=2, alpha=0.5)
lineprops['animated'] = self.useblit
self.lineprops = lineprops
self.to_draw = Line2D([0, 0], [0, 0], visible=False,
**self.lineprops)
self.ax.add_line(self.to_draw)
self.minspanx = minspanx
self.minspany = minspany
_api.check_in_list(['data', 'pixels'], spancoords=spancoords)
self.spancoords = spancoords
self.drawtype = drawtype
self.maxdist = maxdist
if rectprops is None:
props = dict(markeredgecolor='r')
else:
props = dict(markeredgecolor=rectprops.get('edgecolor', 'r'))
props.update(cbook.normalize_kwargs(marker_props, Line2D._alias_map))
self._corner_order = ['NW', 'NE', 'SE', 'SW']
xc, yc = self.corners
self._corner_handles = ToolHandles(self.ax, xc, yc, marker_props=props,
useblit=self.useblit)
self._edge_order = ['W', 'N', 'E', 'S']
xe, ye = self.edge_centers
self._edge_handles = ToolHandles(self.ax, xe, ye, marker='s',
marker_props=props,
useblit=self.useblit)
xc, yc = self.center
self._center_handle = ToolHandles(self.ax, [xc], [yc], marker='s',
marker_props=props,
useblit=self.useblit)
self.active_handle = None
self.artists = [self.to_draw, self._center_handle.artist,
self._corner_handles.artist,
self._edge_handles.artist]
if not self.interactive:
self.artists = [self.to_draw]
self._extents_on_press = None
def _press(self, event):
"""Button press event handler."""
# make the drawn box/line visible get the click-coordinates,
# button, ...
if self.interactive and self.to_draw.get_visible():
self._set_active_handle(event)
else:
self.active_handle = None
if self.active_handle is None or not self.interactive:
# Clear previous rectangle before drawing new rectangle.
self.update()
if not self.interactive:
x = event.xdata
y = event.ydata
self.extents = x, x, y, y
self.set_visible(self.visible)
def _release(self, event):
"""Button release event handler."""
if not self.interactive:
self.to_draw.set_visible(False)
# update the eventpress and eventrelease with the resulting extents
x1, x2, y1, y2 = self.extents
self.eventpress.xdata = x1
self.eventpress.ydata = y1
xy1 = self.ax.transData.transform([x1, y1])
self.eventpress.x, self.eventpress.y = xy1
self.eventrelease.xdata = x2
self.eventrelease.ydata = y2
xy2 = self.ax.transData.transform([x2, y2])
self.eventrelease.x, self.eventrelease.y = xy2
# calculate dimensions of box or line
if self.spancoords == 'data':
spanx = abs(self.eventpress.xdata - self.eventrelease.xdata)
spany = abs(self.eventpress.ydata - self.eventrelease.ydata)
elif self.spancoords == 'pixels':
spanx = abs(self.eventpress.x - self.eventrelease.x)
spany = abs(self.eventpress.y - self.eventrelease.y)
else:
_api.check_in_list(['data', 'pixels'],
spancoords=self.spancoords)
# check if drawn distance (if it exists) is not too small in
# either x or y-direction
if (self.drawtype != 'none'
and (self.minspanx is not None and spanx < self.minspanx
or self.minspany is not None and spany < self.minspany)):
for artist in self.artists:
artist.set_visible(False)
self.update()
return
# call desired function
self.onselect(self.eventpress, self.eventrelease)
self.update()
return False
def _onmove(self, event):
"""Motion notify event handler."""
# resize an existing shape
if self.active_handle and self.active_handle != 'C':
x1, x2, y1, y2 = self._extents_on_press
if self.active_handle in ['E', 'W'] + self._corner_order:
x2 = event.xdata
if self.active_handle in ['N', 'S'] + self._corner_order:
y2 = event.ydata
# move existing shape
elif (('move' in self.state or self.active_handle == 'C')
and self._extents_on_press is not None):
x1, x2, y1, y2 = self._extents_on_press
dx = event.xdata - self.eventpress.xdata
dy = event.ydata - self.eventpress.ydata
x1 += dx
x2 += dx
y1 += dy
y2 += dy
# new shape
else:
center = [self.eventpress.xdata, self.eventpress.ydata]
center_pix = [self.eventpress.x, self.eventpress.y]
dx = (event.xdata - center[0]) / 2.
dy = (event.ydata - center[1]) / 2.
# square shape
if 'square' in self.state:
dx_pix = abs(event.x - center_pix[0])
dy_pix = abs(event.y - center_pix[1])
if not dx_pix:
return
maxd = max(abs(dx_pix), abs(dy_pix))
if abs(dx_pix) < maxd:
dx *= maxd / (abs(dx_pix) + 1e-6)
if abs(dy_pix) < maxd:
dy *= maxd / (abs(dy_pix) + 1e-6)
# from center
if 'center' in self.state:
dx *= 2
dy *= 2
# from corner
else:
center[0] += dx
center[1] += dy
x1, x2, y1, y2 = (center[0] - dx, center[0] + dx,
center[1] - dy, center[1] + dy)
self.extents = x1, x2, y1, y2
@property
def _rect_bbox(self):
if self.drawtype == 'box':
x0 = self.to_draw.get_x()
y0 = self.to_draw.get_y()
width = self.to_draw.get_width()
height = self.to_draw.get_height()
return x0, y0, width, height
else:
x, y = self.to_draw.get_data()
x0, x1 = min(x), max(x)
y0, y1 = min(y), max(y)
return x0, y0, x1 - x0, y1 - y0
@property
def corners(self):
"""Corners of rectangle from lower left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
return xc, yc
@property
def edge_centers(self):
"""Midpoint of rectangle edges from left, moving anti-clockwise."""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
return xe, ye
@property
def center(self):
"""Center of rectangle."""
x0, y0, width, height = self._rect_bbox
return x0 + width / 2., y0 + height / 2.
@property
def extents(self):
"""Return (xmin, xmax, ymin, ymax)."""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
@extents.setter
def extents(self, extents):
# Update displayed shape
self.draw_shape(extents)
# Update displayed handles
self._corner_handles.set_data(*self.corners)
self._edge_handles.set_data(*self.edge_centers)
self._center_handle.set_data(*self.center)
self.set_visible(self.visible)
self.update()
def draw_shape(self, extents):
x0, x1, y0, y1 = extents
xmin, xmax = sorted([x0, x1])
ymin, ymax = sorted([y0, y1])
xlim = sorted(self.ax.get_xlim())
ylim = sorted(self.ax.get_ylim())
xmin = max(xlim[0], xmin)
ymin = max(ylim[0], ymin)
xmax = min(xmax, xlim[1])
ymax = min(ymax, ylim[1])
if self.drawtype == 'box':
self.to_draw.set_x(xmin)
self.to_draw.set_y(ymin)
self.to_draw.set_width(xmax - xmin)
self.to_draw.set_height(ymax - ymin)
elif self.drawtype == 'line':
self.to_draw.set_data([xmin, xmax], [ymin, ymax])
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event."""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
m_idx, m_dist = self._center_handle.closest(event.x, event.y)
if 'move' in self.state:
self.active_handle = 'C'
self._extents_on_press = self.extents
# Set active handle as closest handle, if mouse click is close enough.
elif m_dist < self.maxdist * 2:
self.active_handle = 'C'
elif c_dist > self.maxdist and e_dist > self.maxdist:
self.active_handle = None
return
elif c_dist < e_dist:
self.active_handle = self._corner_order[c_idx]
else:
self.active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
x1, x2, y1, y2 = self.extents
# Switch variables so that only x2 and/or y2 are updated on move.
if self.active_handle in ['W', 'SW', 'NW']:
x1, x2 = x2, event.xdata
if self.active_handle in ['N', 'NW', 'NE']:
y1, y2 = y2, event.ydata
self._extents_on_press = x1, x2, y1, y2
@property
def geometry(self):
"""
Return an array of shape (2, 5) containing the
x (``RectangleSelector.geometry[1, :]``) and
y (``RectangleSelector.geometry[0, :]``) coordinates
of the four corners of the rectangle starting and ending
in the top left corner.
"""
if hasattr(self.to_draw, 'get_verts'):
xfm = self.ax.transData.inverted()
y, x = xfm.transform(self.to_draw.get_verts()).T
return np.array([x, y])
else:
return np.array(self.to_draw.get_data())
class EllipseSelector(RectangleSelector):
"""
Select an elliptical region of an axes.
For the cursor to remain responsive you must keep a reference to it.
Example usage::
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import EllipseSelector
def onselect(eclick, erelease):
"eclick and erelease are matplotlib events at press and release."
print('startposition: (%f, %f)' % (eclick.xdata, eclick.ydata))
print('endposition : (%f, %f)' % (erelease.xdata, erelease.ydata))
print('used button : ', eclick.button)
def toggle_selector(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.ES.active:
print('EllipseSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.ES.active:
print('EllipseSelector activated.')
toggle_selector.ES.set_active(True)
x = np.arange(100.) / 99
y = np.sin(x)
fig, ax = plt.subplots()
ax.plot(x, y)
toggle_selector.ES = EllipseSelector(ax, onselect, drawtype='line')
fig.canvas.mpl_connect('key_press_event', toggle_selector)
plt.show()
"""
_shape_klass = Ellipse
def draw_shape(self, extents):
x1, x2, y1, y2 = extents
xmin, xmax = sorted([x1, x2])
ymin, ymax = sorted([y1, y2])
center = [x1 + (x2 - x1) / 2., y1 + (y2 - y1) / 2.]
a = (xmax - xmin) / 2.
b = (ymax - ymin) / 2.
if self.drawtype == 'box':
self.to_draw.center = center
self.to_draw.width = 2 * a
self.to_draw.height = 2 * b
else:
rad = np.deg2rad(np.arange(31) * 12)
x = a * np.cos(rad) + center[0]
y = b * np.sin(rad) + center[1]
self.to_draw.set_data(x, y)
@property
def _rect_bbox(self):
if self.drawtype == 'box':
x, y = self.to_draw.center
width = self.to_draw.width
height = self.to_draw.height
return x - width / 2., y - height / 2., width, height
else:
x, y = self.to_draw.get_data()
x0, x1 = min(x), max(x)
y0, y1 = min(y), max(y)
return x0, y0, x1 - x0, y1 - y0
class LassoSelector(_SelectorWidget):
"""
Selection curve of an arbitrary shape.
For the selector to remain responsive you must keep a reference to it.
The selected path can be used in conjunction with `~.Path.contains_point`
to select data points from an image.
In contrast to `Lasso`, `LassoSelector` is written with an interface
similar to `RectangleSelector` and `SpanSelector`, and will continue to
interact with the axes until disconnected.
Example usage::
ax = plt.subplot()
ax.plot(x, y)
def onselect(verts):
print(verts)
lasso = LassoSelector(ax, onselect)
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
onselect : function
Whenever the lasso is released, the *onselect* function is called and
passed the vertices of the selected path.
button : `.MouseButton` or list of `.MouseButton`, optional
The mouse buttons used for rectangle selection. Default is ``None``,
which corresponds to all buttons.
"""
def __init__(self, ax, onselect=None, useblit=True, lineprops=None,
button=None):
super().__init__(ax, onselect, useblit=useblit, button=button)
self.verts = None
if lineprops is None:
lineprops = dict()
# self.useblit may be != useblit, if the canvas doesn't support blit.
lineprops.update(animated=self.useblit, visible=False)
self.line = Line2D([], [], **lineprops)
self.ax.add_line(self.line)
self.artists = [self.line]
def onpress(self, event):
self.press(event)
def _press(self, event):
self.verts = [self._get_data(event)]
self.line.set_visible(True)
def onrelease(self, event):
self.release(event)
def _release(self, event):
if self.verts is not None:
self.verts.append(self._get_data(event))
self.onselect(self.verts)
self.line.set_data([[], []])
self.line.set_visible(False)
self.verts = None
def _onmove(self, event):
if self.verts is None:
return
self.verts.append(self._get_data(event))
self.line.set_data(list(zip(*self.verts)))
self.update()
class PolygonSelector(_SelectorWidget):
"""
Select a polygon region of an axes.
Place vertices with each mouse click, and make the selection by completing
the polygon (clicking on the first vertex). Hold the *ctrl* key and click
and drag a vertex to reposition it (the *ctrl* key is not necessary if the
polygon has already been completed). Hold the *shift* key and click and
drag anywhere in the axes to move all vertices. Press the *esc* key to
start a new polygon.
For the selector to remain responsive you must keep a reference to it.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
onselect : function
When a polygon is completed or modified after completion,
the *onselect* function is called and passed a list of the vertices as
``(xdata, ydata)`` tuples.
useblit : bool, default: False
lineprops : dict, default: \
``dict(color='k', linestyle='-', linewidth=2, alpha=0.5)``.
Artist properties for the line representing the edges of the polygon.
markerprops : dict, default: \
``dict(marker='o', markersize=7, mec='k', mfc='k', alpha=0.5)``.
Artist properties for the markers drawn at the vertices of the polygon.
vertex_select_radius : float, default: 15px
A vertex is selected (to complete the polygon or to move a vertex) if
the mouse click is within *vertex_select_radius* pixels of the vertex.
Examples
--------
:doc:`/gallery/widgets/polygon_selector_demo`
"""
def __init__(self, ax, onselect, useblit=False,
lineprops=None, markerprops=None, vertex_select_radius=15):
# The state modifiers 'move', 'square', and 'center' are expected by
# _SelectorWidget but are not supported by PolygonSelector
# Note: could not use the existing 'move' state modifier in-place of
# 'move_all' because _SelectorWidget automatically discards 'move'
# from the state on button release.
state_modifier_keys = dict(clear='escape', move_vertex='control',
move_all='shift', move='not-applicable',
square='not-applicable',
center='not-applicable')
super().__init__(ax, onselect, useblit=useblit,
state_modifier_keys=state_modifier_keys)
self._xs, self._ys = [0], [0]
self._polygon_completed = False
if lineprops is None:
lineprops = dict(color='k', linestyle='-', linewidth=2, alpha=0.5)
lineprops['animated'] = self.useblit
self.line = Line2D(self._xs, self._ys, **lineprops)
self.ax.add_line(self.line)
if markerprops is None:
markerprops = dict(markeredgecolor='k',
markerfacecolor=lineprops.get('color', 'k'))
self._polygon_handles = ToolHandles(self.ax, self._xs, self._ys,
useblit=self.useblit,
marker_props=markerprops)
self._active_handle_idx = -1
self.vertex_select_radius = vertex_select_radius
self.artists = [self.line, self._polygon_handles.artist]
self.set_visible(True)
def _press(self, event):
"""Button press event handler."""
# Check for selection of a tool handle.
if ((self._polygon_completed or 'move_vertex' in self.state)
and len(self._xs) > 0):
h_idx, h_dist = self._polygon_handles.closest(event.x, event.y)
if h_dist < self.vertex_select_radius:
self._active_handle_idx = h_idx
# Save the vertex positions at the time of the press event (needed to
# support the 'move_all' state modifier).
self._xs_at_press, self._ys_at_press = self._xs.copy(), self._ys.copy()
def _release(self, event):
"""Button release event handler."""
# Release active tool handle.
if self._active_handle_idx >= 0:
self._active_handle_idx = -1
# Complete the polygon.
elif (len(self._xs) > 3
and self._xs[-1] == self._xs[0]
and self._ys[-1] == self._ys[0]):
self._polygon_completed = True
# Place new vertex.
elif (not self._polygon_completed
and 'move_all' not in self.state
and 'move_vertex' not in self.state):
self._xs.insert(-1, event.xdata)
self._ys.insert(-1, event.ydata)
if self._polygon_completed:
self.onselect(self.verts)
def onmove(self, event):
"""Cursor move event handler and validator."""
# Method overrides _SelectorWidget.onmove because the polygon selector
# needs to process the move callback even if there is no button press.
# _SelectorWidget.onmove include logic to ignore move event if
# eventpress is None.
if not self.ignore(event):
event = self._clean_event(event)
self._onmove(event)
return True
return False
def _onmove(self, event):
"""Cursor move event handler."""
# Move the active vertex (ToolHandle).
if self._active_handle_idx >= 0:
idx = self._active_handle_idx
self._xs[idx], self._ys[idx] = event.xdata, event.ydata
# Also update the end of the polygon line if the first vertex is
# the active handle and the polygon is completed.
if idx == 0 and self._polygon_completed:
self._xs[-1], self._ys[-1] = event.xdata, event.ydata
# Move all vertices.
elif 'move_all' in self.state and self.eventpress:
dx = event.xdata - self.eventpress.xdata
dy = event.ydata - self.eventpress.ydata
for k in range(len(self._xs)):
self._xs[k] = self._xs_at_press[k] + dx
self._ys[k] = self._ys_at_press[k] + dy
# Do nothing if completed or waiting for a move.
elif (self._polygon_completed
or 'move_vertex' in self.state or 'move_all' in self.state):
return
# Position pending vertex.
else:
# Calculate distance to the start vertex.
x0, y0 = self.line.get_transform().transform((self._xs[0],
self._ys[0]))
v0_dist = np.hypot(x0 - event.x, y0 - event.y)
# Lock on to the start vertex if near it and ready to complete.
if len(self._xs) > 3 and v0_dist < self.vertex_select_radius:
self._xs[-1], self._ys[-1] = self._xs[0], self._ys[0]
else:
self._xs[-1], self._ys[-1] = event.xdata, event.ydata
self._draw_polygon()
def _on_key_press(self, event):
"""Key press event handler."""
# Remove the pending vertex if entering the 'move_vertex' or
# 'move_all' mode
if (not self._polygon_completed
and ('move_vertex' in self.state or 'move_all' in self.state)):
self._xs, self._ys = self._xs[:-1], self._ys[:-1]
self._draw_polygon()
def _on_key_release(self, event):
"""Key release event handler."""
# Add back the pending vertex if leaving the 'move_vertex' or
# 'move_all' mode (by checking the released key)
if (not self._polygon_completed
and
(event.key == self.state_modifier_keys.get('move_vertex')
or event.key == self.state_modifier_keys.get('move_all'))):
self._xs.append(event.xdata)
self._ys.append(event.ydata)
self._draw_polygon()
# Reset the polygon if the released key is the 'clear' key.
elif event.key == self.state_modifier_keys.get('clear'):
event = self._clean_event(event)
self._xs, self._ys = [event.xdata], [event.ydata]
self._polygon_completed = False
self.set_visible(True)
def _draw_polygon(self):
"""Redraw the polygon based on the new vertex positions."""
self.line.set_data(self._xs, self._ys)
# Only show one tool handle at the start and end vertex of the polygon
# if the polygon is completed or the user is locked on to the start
# vertex.
if (self._polygon_completed
or (len(self._xs) > 3
and self._xs[-1] == self._xs[0]
and self._ys[-1] == self._ys[0])):
self._polygon_handles.set_data(self._xs[:-1], self._ys[:-1])
else:
self._polygon_handles.set_data(self._xs, self._ys)
self.update()
@property
def verts(self):
"""The polygon vertices, as a list of ``(x, y)`` pairs."""
return list(zip(self._xs[:-1], self._ys[:-1]))
class Lasso(AxesWidget):
"""
Selection curve of an arbitrary shape.
The selected path can be used in conjunction with
`~matplotlib.path.Path.contains_point` to select data points from an image.
Unlike `LassoSelector`, this must be initialized with a starting
point *xy*, and the `Lasso` events are destroyed upon release.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The parent axes for the widget.
xy : (float, float)
Coordinates of the start of the lasso.
callback : callable
Whenever the lasso is released, the *callback* function is called and
passed the vertices of the selected path.
"""
def __init__(self, ax, xy, callback=None, useblit=True):
super().__init__(ax)
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
x, y = xy
self.verts = [(x, y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.ax.add_line(self.line)
self.callback = callback
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts) > 2:
self.callback(self.verts)
self.ax.lines.remove(self.line)
self.verts = None
self.disconnect_events()
def onmove(self, event):
if self.ignore(event):
return
if self.verts is None:
return
if event.inaxes != self.ax:
return
if event.button != 1:
return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(list(zip(*self.verts)))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
| 34.611352
| 79
| 0.572549
|
37fc3a04d2f4cb44469f9b2fbbc53b473a5ec26e
| 2,927
|
py
|
Python
|
scons-local/scons-local-2.3.6/SCons/Tool/pdflatex.py
|
rdadolf/msms
|
ef1dcf345234a8241f12b7952801bde9d7b406b8
|
[
"BSD-3-Clause"
] | null | null | null |
scons-local/scons-local-2.3.6/SCons/Tool/pdflatex.py
|
rdadolf/msms
|
ef1dcf345234a8241f12b7952801bde9d7b406b8
|
[
"BSD-3-Clause"
] | null | null | null |
scons-local/scons-local-2.3.6/SCons/Tool/pdflatex.py
|
rdadolf/msms
|
ef1dcf345234a8241f12b7952801bde9d7b406b8
|
[
"BSD-3-Clause"
] | null | null | null |
"""SCons.Tool.pdflatex
Tool-specific initialization for pdflatex.
Generates .pdf files from .latex or .ltx files
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdflatex.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import SCons.Action
import SCons.Util
import SCons.Tool.pdf
import SCons.Tool.tex
PDFLaTeXAction = None
def PDFLaTeXAuxFunction(target = None, source= None, env=None):
result = SCons.Tool.tex.InternalLaTeXAuxAction( PDFLaTeXAction, target, source, env )
if result != 0:
SCons.Tool.tex.check_file_error_message(env['PDFLATEX'])
return result
PDFLaTeXAuxAction = None
def generate(env):
"""Add Builders and construction variables for pdflatex to an Environment."""
global PDFLaTeXAction
if PDFLaTeXAction is None:
PDFLaTeXAction = SCons.Action.Action('$PDFLATEXCOM', '$PDFLATEXCOMSTR')
global PDFLaTeXAuxAction
if PDFLaTeXAuxAction is None:
PDFLaTeXAuxAction = SCons.Action.Action(PDFLaTeXAuxFunction,
strfunction=SCons.Tool.tex.TeXLaTeXStrFunction)
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ltx', PDFLaTeXAuxAction)
bld.add_action('.latex', PDFLaTeXAuxAction)
bld.add_emitter('.ltx', SCons.Tool.tex.tex_pdf_emitter)
bld.add_emitter('.latex', SCons.Tool.tex.tex_pdf_emitter)
SCons.Tool.tex.generate_common(env)
def exists(env):
SCons.Tool.tex.generate_darwin(env)
return env.Detect('pdflatex')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 34.435294
| 107
| 0.748206
|
fcff98a0b2c2533f7e12bea2f9b0e779ea278c79
| 1,936
|
py
|
Python
|
queueos/broker/Environment.py
|
ecmwf/queueos
|
aa6b4f2e683194c2b2955b47337120d594e374db
|
[
"Apache-2.0"
] | null | null | null |
queueos/broker/Environment.py
|
ecmwf/queueos
|
aa6b4f2e683194c2b2955b47337120d594e374db
|
[
"Apache-2.0"
] | null | null | null |
queueos/broker/Environment.py
|
ecmwf/queueos
|
aa6b4f2e683194c2b2955b47337120d594e374db
|
[
"Apache-2.0"
] | null | null | null |
# (C) Copyright 2021 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import threading
from functools import wraps
UNDEF = object()
def locked(method):
@wraps(method)
def wrapped(self, *args, **kwargs):
with self.lock:
return method(self, *args, **kwargs)
return wrapped
class Environment:
def __init__(self):
self.lock = threading.RLock()
self._enabled = {}
self._values = {}
self._observers = []
@locked
def set(self, resource, value):
self._values[resource] = value
self._notify_observers()
@locked
def get(self, resource, value=UNDEF):
if value is UNDEF:
return self._values[resource]
else:
return self._values.get(resource, value)
@locked
def resource_enabled(self, resource):
return self._enabled.get(resource, True)
@locked
def enable_resource(self, resource):
self._enabled[resource] = True
self._notify_observers()
@locked
def disable_resource(self, resource):
self._enabled[resource] = False
self._notify_observers()
@locked
def add_observer(self, observer):
self._observers.append(observer)
@locked
def remove_observer(self, observer):
self._observers.remove(observer)
def _notify_observers(self, *args, **kwargs):
def notify(o):
o.notify_environment_changed(*args, **kwargs)
for o in self._observers:
# Notify in a thread so we don't create deadlocks
threading.Thread(target=notify, args=(o,), daemon=True)
| 26.520548
| 78
| 0.649793
|
beaf67ac204f8f565fa8d83f46436709e4ea063b
| 947
|
py
|
Python
|
SDKs/Aspose.Imaging-Cloud-SDK-for-Python/asposeimagingcloud/models/PsdProperties.py
|
naeem244/Aspose.Imaging-for-Cloud
|
20585a2163f34624d7a46641092444747360f3e3
|
[
"MIT"
] | null | null | null |
SDKs/Aspose.Imaging-Cloud-SDK-for-Python/asposeimagingcloud/models/PsdProperties.py
|
naeem244/Aspose.Imaging-for-Cloud
|
20585a2163f34624d7a46641092444747360f3e3
|
[
"MIT"
] | null | null | null |
SDKs/Aspose.Imaging-Cloud-SDK-for-Python/asposeimagingcloud/models/PsdProperties.py
|
naeem244/Aspose.Imaging-for-Cloud
|
20585a2163f34624d7a46641092444747360f3e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
class PsdProperties(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'BitsPerChannel': 'int',
'ChannelsCount': 'int',
'ColorMode': 'str',
'Compression': 'str'
}
self.attributeMap = {
'BitsPerChannel': 'BitsPerChannel','ChannelsCount': 'ChannelsCount','ColorMode': 'ColorMode','Compression': 'Compression'}
self.BitsPerChannel = None # int
self.ChannelsCount = None # int
self.ColorMode = None # str
self.Compression = None # str
| 31.566667
| 141
| 0.588173
|
116753dbb1c6059a34d583810f80fada81e222f1
| 908
|
py
|
Python
|
classgrade/gradapp/migrations/0024_auto_20161109_0854.py
|
classgrade/classgrade
|
144dcfc9579e6858ff4aa79835c76b9611ed73b2
|
[
"MIT"
] | 5
|
2016-11-15T17:46:27.000Z
|
2022-01-10T08:06:17.000Z
|
classgrade/gradapp/migrations/0024_auto_20161109_0854.py
|
classgrade/classgrade
|
144dcfc9579e6858ff4aa79835c76b9611ed73b2
|
[
"MIT"
] | 21
|
2016-11-07T14:58:22.000Z
|
2021-02-02T21:41:12.000Z
|
classgrade/gradapp/migrations/0024_auto_20161109_0854.py
|
classgrade/classgrade
|
144dcfc9579e6858ff4aa79835c76b9611ed73b2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-09 08:54
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('gradapp', '0023_auto_20161107_1309'),
]
operations = [
migrations.AddField(
model_name='evalassignment',
name='evaluator_bis',
field=models.ForeignKey(null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='evalassignment',
name='is_supereval',
field=models.BooleanField(default=False),
),
]
| 29.290323
| 80
| 0.609031
|
c7e520d169b8a598c94f1160000251515b087f1c
| 4,137
|
py
|
Python
|
packages/core/minos-microservice-networks/tests/test_networks/test_decorators/test_collectors.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | null | null | null |
packages/core/minos-microservice-networks/tests/test_networks/test_decorators/test_collectors.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | null | null | null |
packages/core/minos-microservice-networks/tests/test_networks/test_decorators/test_collectors.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | null | null | null |
import unittest
from minos.common import (
classname,
)
from minos.networks import (
BrokerCommandEnrouteDecorator,
BrokerEventEnrouteDecorator,
BrokerQueryEnrouteDecorator,
EnrouteCollector,
PeriodicEventEnrouteDecorator,
RestCommandEnrouteDecorator,
RestQueryEnrouteDecorator,
)
from tests.utils import (
FakeService,
FakeServiceWithGetEnroute,
)
class TestEnrouteCollector(unittest.IsolatedAsyncioTestCase):
def test_decorated_str(self):
analyzer = EnrouteCollector(classname(FakeService))
self.assertEqual(FakeService, analyzer.decorated)
def test_get_all(self):
analyzer = EnrouteCollector(FakeService)
observed = analyzer.get_all()
expected = {
"get_tickets": {BrokerQueryEnrouteDecorator("GetTickets"), RestQueryEnrouteDecorator("tickets/", "GET")},
"create_ticket": {
BrokerCommandEnrouteDecorator("CreateTicket"),
BrokerCommandEnrouteDecorator("AddTicket"),
RestCommandEnrouteDecorator("orders/", "GET"),
},
"ticket_added": {BrokerEventEnrouteDecorator("TicketAdded")},
"delete_ticket": {
BrokerCommandEnrouteDecorator("DeleteTicket"),
RestCommandEnrouteDecorator("orders/", "DELETE"),
},
"send_newsletter": {PeriodicEventEnrouteDecorator("@daily")},
"check_inactive_users": {PeriodicEventEnrouteDecorator("@daily")},
}
self.assertEqual(expected, observed)
def test_get_rest_command_query(self):
analyzer = EnrouteCollector(FakeService)
observed = analyzer.get_rest_command_query()
expected = {
"get_tickets": {RestQueryEnrouteDecorator("tickets/", "GET")},
"create_ticket": {RestCommandEnrouteDecorator("orders/", "GET")},
"delete_ticket": {RestCommandEnrouteDecorator("orders/", "DELETE")},
}
self.assertEqual(expected, observed)
def test_get_broker_command_query_event(self):
analyzer = EnrouteCollector(FakeService)
observed = analyzer.get_broker_command_query_event()
expected = {
"get_tickets": {BrokerQueryEnrouteDecorator("GetTickets")},
"create_ticket": {
BrokerCommandEnrouteDecorator("CreateTicket"),
BrokerCommandEnrouteDecorator("AddTicket"),
},
"delete_ticket": {BrokerCommandEnrouteDecorator("DeleteTicket")},
"ticket_added": {BrokerEventEnrouteDecorator("TicketAdded")},
}
self.assertEqual(expected, observed)
def test_get_broker_command_query(self):
analyzer = EnrouteCollector(FakeService)
observed = analyzer.get_broker_command_query()
expected = {
"get_tickets": {BrokerQueryEnrouteDecorator("GetTickets")},
"create_ticket": {
BrokerCommandEnrouteDecorator("CreateTicket"),
BrokerCommandEnrouteDecorator("AddTicket"),
},
"delete_ticket": {BrokerCommandEnrouteDecorator("DeleteTicket")},
}
self.assertEqual(expected, observed)
def test_get_broker_event(self):
analyzer = EnrouteCollector(FakeService)
observed = analyzer.get_broker_event()
expected = {"ticket_added": {BrokerEventEnrouteDecorator("TicketAdded")}}
self.assertEqual(expected, observed)
def test_get_periodic_event(self):
analyzer = EnrouteCollector(FakeService)
observed = analyzer.get_periodic_event()
expected = {
"send_newsletter": {PeriodicEventEnrouteDecorator("@daily")},
"check_inactive_users": {PeriodicEventEnrouteDecorator("@daily")},
}
self.assertEqual(expected, observed)
def test_with_get_enroute(self):
analyzer = EnrouteCollector(FakeServiceWithGetEnroute)
observed = analyzer.get_all()
expected = {"create_foo": {BrokerCommandEnrouteDecorator("CreateFoo")}}
self.assertEqual(expected, observed)
if __name__ == "__main__":
unittest.main()
| 34.190083
| 117
| 0.659657
|
c53d68c3be4ece4d51008e001e3a84d227ee3e2c
| 454
|
py
|
Python
|
app/common/cli_tools/db_operation.py
|
MU-Software/bca_backend
|
282db3908ee0b2c95c5e6bd6d6458077e941f343
|
[
"MIT"
] | 6
|
2021-08-20T01:35:01.000Z
|
2021-12-22T18:23:17.000Z
|
app/common/cli_tools/db_operation.py
|
MU-Software/bca_backend
|
282db3908ee0b2c95c5e6bd6d6458077e941f343
|
[
"MIT"
] | null | null | null |
app/common/cli_tools/db_operation.py
|
MU-Software/bca_backend
|
282db3908ee0b2c95c5e6bd6d6458077e941f343
|
[
"MIT"
] | null | null | null |
import click
import flask
import flask.cli
import app.database
@click.command('drop-db')
@flask.cli.with_appcontext
def drop_db():
try:
if flask.current_app.config.get('RESTAPI_VERSION', 'prod') != 'dev':
print('Cannot drop DB: RESTAPI_VERSION is not \'dev\'')
return
app.database.db.drop_all()
print('Successfully dropped DB')
except Exception:
print('Error raised while dropping DB')
| 22.7
| 76
| 0.64978
|
798879e50453c4f0865df6189bd23f070a296a0c
| 255
|
py
|
Python
|
PyPrograms/tkinter/box/box.py
|
JymPatel/Python
|
6c643339ce12d26071336c87a88e94550cb13ea8
|
[
"MIT"
] | null | null | null |
PyPrograms/tkinter/box/box.py
|
JymPatel/Python
|
6c643339ce12d26071336c87a88e94550cb13ea8
|
[
"MIT"
] | null | null | null |
PyPrograms/tkinter/box/box.py
|
JymPatel/Python
|
6c643339ce12d26071336c87a88e94550cb13ea8
|
[
"MIT"
] | null | null | null |
# This program uses the GPL V3 Licence. Read LICENCE to see it.
# By @bupboi1337
# Editors can put thier names down here:
#
import tkinter # Import Tkinter, the library used to make a GUI.
tk = tkinter.Tk()
tk.mainloop() # Initalize a GUI Window
| 31.875
| 65
| 0.709804
|
8e1aaabb90e682b144b34afa8b1d42cde52899f2
| 2,668
|
py
|
Python
|
tendermint/utils.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 24
|
2017-08-18T20:36:27.000Z
|
2020-03-27T08:55:39.000Z
|
tendermint/utils.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 6
|
2017-10-14T05:50:34.000Z
|
2019-06-03T08:39:49.000Z
|
tendermint/utils.py
|
davebryson/py-tendermint
|
ec6a38a54950d9841759b0f2ed93659b58948a03
|
[
"Apache-2.0"
] | 5
|
2018-01-09T11:07:06.000Z
|
2019-06-02T14:34:34.000Z
|
import os
import os.path
from pathlib import Path
import collections
from math import ceil
from sha3 import keccak_256
from rlp.utils import decode_hex, encode_hex
def home_dir(*paths):
"""
Create a path to dirs/file in OS home dir
Ex: home_dir('temp', 'ex.txt') is:
~/temp/ex.txt
"""
home = str(Path.home())
return os.path.join(home,*paths)
def is_integer(value):
return isinstance(value, int) and not isinstance(value, bool)
def is_bytes(value):
return isinstance(value, (bytes, bytearray))
def is_string(value):
return isinstance(value, (str,bytes, bytearray))
def is_text(value):
return isinstance(value, str)
def is_boolean(value):
return isinstance(value, bool)
def is_dict(obj):
return isinstance(obj, collections.Mapping)
def is_list_like(obj):
return not is_string(obj) and isinstance(obj, collections.Sequence)
def force_text(value):
if is_string(value):
return value
elif is_bytes(value):
return bytes_to_str(value)
else:
raise TypeError("Unsupported type: {0}".format(type(value)))
def obj_to_bytes(obj):
if is_string(obj):
return str_to_bytes(obj)
elif is_dict(obj):
return {
k: obj_to_bytes(v) for k, v in obj.items()
}
elif is_list_like(obj):
return type(obj)(obj_to_bytes(v) for v in obj)
else:
return obj
def obj_to_str(obj):
if is_string(obj):
return bytes_to_str(obj)
elif is_dict(obj):
return {
k: obj_to_str(v) for k, v in obj.items()
}
elif is_list_like(obj):
return type(obj)(obj_to_str(v) for v in obj)
else:
return obj
def int_to_big_endian(value):
byte_length = max(ceil(value.bit_length() / 8), 1)
return (value).to_bytes(byte_length, byteorder='big')
def big_endian_to_int(value):
return int.from_bytes(value, byteorder='big')
def str_to_bytes(data):
if isinstance(data, str):
return data.encode('utf-8')
return data
def bytes_to_str(value):
if isinstance(value, str):
return value
return value.decode('utf-8')
def remove_0x_head(s):
return s[2:] if s[:2] in (b'0x', '0x') else s
def is_hex(s):
return (isinstance(s, str) and s[:2] == '0x')
def to_hex(value):
return '0x' + encode_hex(value)
def from_hex(value):
v = remove_0x_head(value)
return decode_hex(v)
def keccak(value):
value = str_to_bytes(value)
return keccak_256(value).digest()
assert keccak(b'') == b"\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p", "Incorrect sha3. Make sure it's keccak" # noqa: E501
| 24.934579
| 176
| 0.657421
|
bedd1ea3528d1d139acf9ebd2d00bf77bd600c1b
| 5,488
|
py
|
Python
|
dkb2homebank.py
|
stefreschke/dkb2homebank
|
8446837d558bbbcd84fa38629ef91ac944b5bf40
|
[
"MIT"
] | 1
|
2020-11-30T10:36:16.000Z
|
2020-11-30T10:36:16.000Z
|
dkb2homebank.py
|
stefreschke/dkb2homebank
|
8446837d558bbbcd84fa38629ef91ac944b5bf40
|
[
"MIT"
] | null | null | null |
dkb2homebank.py
|
stefreschke/dkb2homebank
|
8446837d558bbbcd84fa38629ef91ac944b5bf40
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import argparse
import csv
from datetime import datetime
class DKB(csv.Dialect):
delimiter = ';'
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = csv.QUOTE_MINIMAL
csv.register_dialect("dkb", DKB)
dkb_field_names = ["buchungstag",
"wertstellung",
"buchungstext",
"beguenstigter",
"verwendungszweck",
"kontonummer",
"blz",
"betrag",
"glaeubigerID",
"mandatsreferenz",
"kundenreferenz"]
visa_field_names = ["abgerechnet",
"wertstellung",
"belegdatum",
"umsatzbeschreibung",
"betrag",
"urspruenglicherBetrag"]
homebank_field_names = ["date",
"paymode",
"info",
"payee",
"memo",
"amount",
"category",
"tags"]
def convert_DKB_cash(filename, output_file="cashHomebank.csv"):
"""
Write a CSV with output consumable by Homebank's import functionality.
:param filename: the input file path as a string
:param output_file: the output file path as a string
"""
with open(filename, 'r', encoding='iso-8859-1') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(find_transaction_lines(csvfile), dialect=dialect, fieldnames=dkb_field_names)
with open(output_file, 'w') as outfile:
writer = csv.DictWriter(outfile, dialect='dkb', fieldnames=homebank_field_names)
for row in reader:
writer.writerow(
{
'date': convert_date(row["buchungstag"]),
'paymode': 8,
'info': None,
'payee': row["beguenstigter"],
'memo': row["verwendungszweck"],
'amount': row["betrag"],
'category': None,
'tags': None
})
def convert_visa(filename, output_file="visaHomebank.csv"):
"""
Convert a DKB visa file to a homebank-readable import CSV.
:param filename: Path to the file to be converted
"""
with open(filename, 'r', encoding='iso-8859-1') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(find_transaction_lines(csvfile), dialect=dialect, fieldnames=visa_field_names)
with open(output_file, 'w') as outfile:
writer = csv.DictWriter(outfile, dialect='dkb', fieldnames=homebank_field_names)
for row in reader:
writer.writerow(
{
'date': convert_date(row["wertstellung"]),
'paymode': 1,
'info': None,
'payee': None,
'memo': row["umsatzbeschreibung"],
'amount': row["betrag"],
'category': None,
'tags': None
})
def find_transaction_lines(file):
"""
Reduce the csv lines to the lines containing actual data relevant for the conversion.
:param file: The export CSV from DKB to be converted
:return: The lines containing the actual transaction data
"""
lines = file.readlines()
i = 1
for line in lines:
# simple heuristic to find the csv header line. Both these strings
# appear in headers of the cash and visa CSVs.
if "Betrag" in line and "Wertstellung" in line:
return lines[i:]
i = i + 1
raise ValueError("Can't convert CSV file without header line")
def convert_date(date_string):
"""Convert the date_string to dd-mm-YYYY format."""
date = datetime.strptime(date_string, "%d.%m.%Y")
return date.strftime('%d-%m-%Y')
def setup_parser():
parser = argparse.ArgumentParser(description=
"Convert a CSV export file from DKB online banking "
"to a Homebank compatible CSV format.")
parser.add_argument("filename", help="The CSV file to convert.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--visa", action="store_true", help="convert a DKB Visa account CSV file")
group.add_argument("-c", "--cash", action="store_true", help="convert a DKB Cash account CSV file")
parser.add_argument('-o', '--output-file', help='choose where to store the output file (default: working directory')
return parser.parse_args()
def main():
args = setup_parser()
if args.visa:
output = args.output_file or "visaHomebank.csv"
convert_visa(args.filename, output)
print(f"DKB Visa file converted. Output file: {output}")
elif args.cash:
output = args.output_file or "cashHomebank.csv"
convert_DKB_cash(args.filename, output)
print(f"DKB Cash file converted. Output file: {output}")
else:
print("You must provide the type of the CSV file (--cash for DKB Cash, --visa for DKB Visa)")
if __name__ == '__main__':
main()
| 33.876543
| 120
| 0.550656
|
bf6efabc69e80936539d2346388c51cd3909920d
| 753
|
py
|
Python
|
nbextensions/magics/__init__.py
|
agilestacks/kubeflow-extensions
|
92b7575c21af00dcb2a42c001c802ebed8d128dd
|
[
"Apache-2.0"
] | 10
|
2019-12-27T08:51:44.000Z
|
2020-12-07T01:33:13.000Z
|
nbextensions/magics/__init__.py
|
agilestacks/kubeflow-extensions
|
92b7575c21af00dcb2a42c001c802ebed8d128dd
|
[
"Apache-2.0"
] | 1
|
2019-12-23T08:24:00.000Z
|
2019-12-23T12:36:40.000Z
|
nbextensions/magics/__init__.py
|
agilestacks/kubeflow-extensions
|
92b7575c21af00dcb2a42c001c802ebed8d128dd
|
[
"Apache-2.0"
] | 4
|
2019-09-12T03:23:17.000Z
|
2020-10-23T09:37:32.000Z
|
from .templates import TemplateMagics
from .argo import ArgoMagics
from .nbvars import load_nbvars, NBVarsMagics
_loaded = False
def load_ipython_extension(ipython, **kwargs):
global _loaded
if not _loaded:
ipython.register_magics(TemplateMagics(ipython, **kwargs))
ipython.register_magics(ArgoMagics(ipython, **kwargs))
ipython.register_magics(NBVarsMagics(ipython, **kwargs))
_loaded = True
def unload_ipython_extension(ipython):
global _loaded
if _loaded:
magic = ipython.magics_manager.registry.pop('TemplateMagics')
magic = ipython.magics_manager.registry.pop('ArgoMagics')
magic = ipython.magics_manager.registry.pop('NBVarsMagics')
_loaded = False
| 34.227273
| 74
| 0.714475
|
4daab5333861b95a11894f01194825743c90da5b
| 276
|
py
|
Python
|
#3 Mundo/#17/78.py
|
Henrique-Navarro/phyton
|
26d66847afa2b15c254677a36eb22f7558816b59
|
[
"MIT"
] | null | null | null |
#3 Mundo/#17/78.py
|
Henrique-Navarro/phyton
|
26d66847afa2b15c254677a36eb22f7558816b59
|
[
"MIT"
] | null | null | null |
#3 Mundo/#17/78.py
|
Henrique-Navarro/phyton
|
26d66847afa2b15c254677a36eb22f7558816b59
|
[
"MIT"
] | null | null | null |
lista= list()
for c in range(0,5):
lista.append(int(input('Digite um valor: ')))
print(f'Maior: \033[32m{max(lista)}\033[m na posição: \033[34m{lista.index(max(lista))}\033[m')
print(f'Menor: \033[32m{min(lista)}\033[m na posição: \033[34m{lista.index(min(lista))}\033[m')
| 55.2
| 95
| 0.673913
|
edbea3fd550f416c0e3b07301c447f785492598c
| 2,047
|
py
|
Python
|
explorer/forms.py
|
drummonds/django-sql-explorer
|
0c2c642008fa04a309edfa07011d2238e57d4e64
|
[
"MIT"
] | null | null | null |
explorer/forms.py
|
drummonds/django-sql-explorer
|
0c2c642008fa04a309edfa07011d2238e57d4e64
|
[
"MIT"
] | null | null | null |
explorer/forms.py
|
drummonds/django-sql-explorer
|
0c2c642008fa04a309edfa07011d2238e57d4e64
|
[
"MIT"
] | 1
|
2018-04-19T20:52:23.000Z
|
2018-04-19T20:52:23.000Z
|
from django.db import DatabaseError
from django.forms import ModelForm, Field, ValidationError, BooleanField, CharField
from django.forms.widgets import CheckboxInput, Select
from explorer import app_settings
from explorer.models import Query, MSG_FAILED_BLACKLIST
class SqlField(Field):
def validate(self, value):
"""
Ensure that the SQL passes the blacklist.
:param value: The SQL for this Query model.
"""
query = Query(sql=value)
passes_blacklist, failing_words = query.passes_blacklist()
error = MSG_FAILED_BLACKLIST % ', '.join(failing_words) if not passes_blacklist else None
if error:
raise ValidationError(
error,
code="InvalidSql"
)
class QueryForm(ModelForm):
sql = SqlField()
snapshot = BooleanField(widget=CheckboxInput, required=False)
connection = CharField(widget=Select, required=False)
def __init__(self, *args, **kwargs):
super(QueryForm, self).__init__(*args, **kwargs)
self.fields['connection'].widget.choices = self.connections
if not self.instance.connection:
self.initial['connection'] = app_settings.EXPLORER_DEFAULT_CONNECTION
self.fields['connection'].widget.attrs['class'] = 'form-control'
def clean(self):
if self.instance and self.data.get('created_by_user', None):
self.cleaned_data['created_by_user'] = self.instance.created_by_user
return super(QueryForm, self).clean()
@property
def created_by_user_email(self):
return self.instance.created_by_user.email if self.instance.created_by_user else '--'
@property
def created_at_time(self):
return self.instance.created_at.strftime('%Y-%m-%d')
@property
def connections(self):
return zip(app_settings.EXPLORER_CONNECTIONS.values(), app_settings.EXPLORER_CONNECTIONS.keys())
class Meta:
model = Query
fields = ['title', 'sql', 'description', 'snapshot', 'connection']
| 31.984375
| 104
| 0.676111
|
ed6180477adc1ac799e6a1f117e19b0833547171
| 13,887
|
py
|
Python
|
utils/box_utils.py
|
GuoQuanhao/RetinaFace-Paddle
|
5f61a952109969a921e0486d6f565036e6c1fe8c
|
[
"MIT"
] | 2
|
2021-09-30T11:50:38.000Z
|
2021-10-11T06:45:14.000Z
|
utils/box_utils.py
|
GuoQuanhao/RetinaFace-Paddle
|
5f61a952109969a921e0486d6f565036e6c1fe8c
|
[
"MIT"
] | null | null | null |
utils/box_utils.py
|
GuoQuanhao/RetinaFace-Paddle
|
5f61a952109969a921e0486d6f565036e6c1fe8c
|
[
"MIT"
] | null | null | null |
import paddle
import numpy as np
def index_fill(input, index, update):
'''
achieve Tensor.index_fill method
only for this repo, it's not common use
'''
for i in range(len(index)):
input[index[i]] = update
return input
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return paddle.concat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return paddle.concat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2], 1) # w, h
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.shape[0]
B = box_b.shape[0]
max_xy = paddle.minimum(box_a[:, 2:].unsqueeze(1).expand([A, B, 2]),
box_b[:, 2:].unsqueeze(0).expand([A, B, 2]))
min_xy = paddle.maximum(box_a[:, :2].unsqueeze(1).expand([A, B, 2]),
box_b[:, :2].unsqueeze(0).expand([A, B, 2]))
inter = paddle.clip(max_xy - min_xy, min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.shape[0], box_b.shape[0]]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def matrix_iou(a, b):
"""
return iou of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
return area_i / (area_a[:, np.newaxis] + area_b - area_i)
def matrix_iof(a, b):
"""
return iof of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
return area_i / np.maximum(area_a[:, np.newaxis], 1)
def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
threshold: (float) The overlap threshold used when mathing boxes.
truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
variances: (tensor) Variances corresponding to each prior coord,
Shape: [num_priors, 4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
landms: (tensor) Ground truth landms, Shape [num_obj, 10].
loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
landm_t: (tensor) Tensor to be filled w/ endcoded landm targets.
idx: (int) current batch index
Return:
The matched indices corresponding to 1)location 2)confidence 3)landm preds.
"""
# jaccard index
overlaps = jaccard(
truths,
point_form(priors)
)
# (Bipartite Matching)
# [1,num_objects] best prior for each ground truth
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True), overlaps.argmax(1, keepdim=True)
# ignore hard gt
valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
best_prior_idx_filter = best_prior_idx.masked_select(valid_gt_idx.unsqueeze(1)).unsqueeze(1)
if best_prior_idx_filter.shape[0] <= 0:
loc_t[idx] = 0
conf_t[idx] = 0
return
# [1,num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True), overlaps.argmax(0, keepdim=True)
best_truth_idx = best_truth_idx.squeeze(0)
best_truth_overlap = best_truth_overlap.squeeze(0)
best_prior_idx = best_prior_idx.squeeze(1)
best_prior_idx_filter = best_prior_idx_filter.squeeze(1)
best_prior_overlap = best_prior_overlap.squeeze(1)
best_truth_overlap = index_fill(best_truth_overlap, best_prior_idx_filter, 2) # ensure best prior
# TODO refactor: index best_prior_idx with long tensor
# ensure every gt matches with its prior of max overlap
for j in range(best_prior_idx.shape[0]): # 判别此anchor是预测哪一个boxes
best_truth_idx[best_prior_idx[j]] = j
matches = paddle.to_tensor(truths.numpy()[best_truth_idx.numpy()]) # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来
conf = paddle.to_tensor(labels.numpy()[best_truth_idx.numpy()]) # Shape: [num_priors] 此处为每一个anchor对应的label取出来
temp_conf = conf.numpy()
temp_conf[(best_truth_overlap < threshold).numpy()] = 0 # label as background overlap<0.35的全部作为负样本
conf = paddle.to_tensor(temp_conf).astype('int32')
loc = encode(matches, priors, variances)
matches_landm = paddle.to_tensor(landms.numpy()[best_truth_idx.numpy()])
landm = encode_landm(matches_landm, priors, variances)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
landm_t[idx] = landm
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = paddle.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return paddle.concat([g_cxcy, g_wh], 1) # [num_priors,4]
def encode_landm(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 10].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded landm (tensor), Shape: [num_priors, 10]
"""
# dist b/t match center and prior's center
matched = paddle.reshape(matched, [matched.shape[0], 5, 2])
priors_cx = priors[:, 0].unsqueeze(1).expand([matched.shape[0], 5]).unsqueeze(2)
priors_cy = priors[:, 1].unsqueeze(1).expand([matched.shape[0], 5]).unsqueeze(2)
priors_w = priors[:, 2].unsqueeze(1).expand([matched.shape[0], 5]).unsqueeze(2)
priors_h = priors[:, 3].unsqueeze(1).expand([matched.shape[0], 5]).unsqueeze(2)
priors = paddle.concat([priors_cx, priors_cy, priors_w, priors_h], axis=2)
g_cxcy = matched[:, :, :2] - priors[:, :, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, :, 2:])
# g_cxcy /= priors[:, :, 2:]
g_cxcy = g_cxcy.reshape([g_cxcy.shape[0], -1])
# return target for smooth_l1_loss
return g_cxcy
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = paddle.concat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * paddle.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def decode_landm(pre, priors, variances):
"""Decode landm from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
pre (tensor): landm predictions for loc layers,
Shape: [num_priors,10]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded landm predictions
"""
landms = paddle.concat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
), axis=1)
return landms
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.max()
return paddle.log(paddle.sum(paddle.exp(x-x_max), 1, keepdim=True)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = paddle.to_tensor(scores.shape[0]).fill_(0).long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = paddle.multiply(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = paddle.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.shape[0] == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
paddle.index_select(x1, 0, idx, out=xx1)
paddle.index_select(y1, 0, idx, out=yy1)
paddle.index_select(x2, 0, idx, out=xx2)
paddle.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = paddle.clip(xx1, min=x1[i])
yy1 = paddle.clip(yy1, min=y1[i])
xx2 = paddle.clip(xx2, max=x2[i])
yy2 = paddle.clip(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = paddle.clip(w, min=0.0)
h = paddle.clip(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = paddle.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| 40.369186
| 122
| 0.611795
|
b290f8626e523d33fb5888c4e17758d914080367
| 4,672
|
py
|
Python
|
scripts/run_parallel.py
|
michaelhabeck/isdhic
|
35ccec0621c815c77e683bcce7d26e1e6c82b53b
|
[
"MIT"
] | 3
|
2017-01-12T12:02:17.000Z
|
2020-12-28T07:15:16.000Z
|
scripts/run_parallel.py
|
michaelhabeck/isdhic
|
35ccec0621c815c77e683bcce7d26e1e6c82b53b
|
[
"MIT"
] | 5
|
2018-06-23T21:48:31.000Z
|
2019-08-09T17:45:18.000Z
|
scripts/run_parallel.py
|
michaelhabeck/isdhic
|
35ccec0621c815c77e683bcce7d26e1e6c82b53b
|
[
"MIT"
] | 2
|
2017-01-07T22:42:17.000Z
|
2019-07-25T07:11:54.000Z
|
"""
Run parallel replica simulation using multiprocessing
"""
import isdhic
import numpy as np
import multiprocessing as mp
from copy import deepcopy
from isdhic import utils
from isdhic.rex import ReplicaState
from run_rex import Replica
def create_posterior(resolution = 500):
filename = './chrX_cell1_{0}kb.py'.format(resolution)
with open(filename) as script:
exec script
return posterior
def create_replica(q, beta, posterior=None):
posterior = posterior or create_posterior()
diameter = posterior['tsallis'].forcefield.d[0,0]
coords = posterior.params['coordinates']
n_particles = len(coords)/3
extended = np.multiply.outer(np.arange(n_particles), np.eye(3)[0]) * diameter
coords.set(extended)
return Replica(posterior, n_steps=10, n_leaps=250, q=q, beta=beta)
def sample(initial, q, beta, stepsize):
sampler = create_replica(q, beta)
sampler.activate()
sampler.state = deepcopy(initial)
sampler.stepsize = stepsize
state = sampler.next()
return deepcopy(state), sampler.stepsize, sampler.history
## class Bridge(object):
## """
## List of replicas
## """
## def __init__(self, samplers):
## self._items = samplers
## def __len__(self):
## return len(self._items)
## def __getitem__(self, i):
## return self._items[i]
## def __iter__(self):
## return iter(self._items)
## def __call__(self, i):
## sampler = self[i]
## return sampler.next()
## def propose(self, workers):
## return ReplicaState(workers.map(self, range(len(self))))
class ParallelReplicaExchange(isdhic.ReplicaExchange):
def __init__(self, samplers, n_cpus=1):
super(ParallelReplicaExchange, self).__init__(samplers)
self.workers = mp.Pool(processes=n_cpus)
def move_parallel(self):
tasks = []
for state, replica in zip(self.state, self.samplers):
tasks.append((state, replica.q, replica.beta, replica.stepsize))
print 'Start parallel computation...'
results = [self.workers.apply_async(sample, task) for task in tasks]
states = []
for i, result in enumerate(results):
state, stepsize, history = result.get()
map(self[i].history.update, history)
self[i].stepsize = stepsize
states.append(state)
print 'Parallel computation finished.'
self.state = ReplicaState(states)
return self.state
def next(self):
state = self.move_parallel()
accept = {}
for i, j in self._swaps.next():
accept[(i,j)] = self.sample_swap(state, i, j)
self.history.update(accept)
self.state = state
if not len(self.history) % 1:
print '-' * 25, len(self.history), '-' * 25
print self.history
return state
def create_samples(n_samples, rex, samples):
while len(samples) < n_samples:
samples.append(rex.next())
if __name__ == '__main__':
from csb.io import load, dump
import os
n_replicas = 100
schedule = np.transpose([
np.logspace(0., np.log10(1.06), n_replicas),
np.linspace(1., 0.1, n_replicas)])
posterior = create_posterior()
replicas = [create_replica(q,beta,posterior) for q, beta in schedule]
rex = ParallelReplicaExchange(replicas, n_cpus=25)
samples = []
if False:
from isd.ro import threaded
threaded(create_samples, 1e4, rex, samples)
if False:
while len(samples) < 1000:
samples.append(rex.next())
if False:
E = np.array([[s.potential_energy for s in S] for S in samples])
out = '{0:.3e}, {1:.3e}, {2:.3e} : {3}, -log_prob={4:.3e}'
for sampler in rex.samplers:
print out.format(sampler.q, sampler.beta, sampler.stepsize, sampler.history,
sampler.state.potential_energy)
rates = np.array([rex.history[pair].acceptance_rate() for pair in rex.history.pairs])
if False:
from csb.bio.utils import rmsd
from scipy.spatial.distance import squareform
burnin = -400
thining = 1#0
mask = np.ones(333,'i')
mask[:11] = 0
mask[47:66] = 0
x = X[burnin::thining].reshape(-1,333,3)
x = np.compress(mask,x,1)
d = [rmsd(xx,x[j]) for i, xx in enumerate(x) for j in range(i+1,len(x))]
from sklearn.cluster import spectral_clustering
K = 4
membership = spectral_clustering(np.exp(-squareform(d)), n_clusters=K, eigen_solver='arpack')
i = np.argsort(membership)
matshow(squareform(d)[i][:,i],origin='lower')
| 24.460733
| 97
| 0.624572
|
aee9aa1bc144304e4a8b72da848db37b77bc92e3
| 3,358
|
py
|
Python
|
serial/src/serial_test2.py
|
Choi-Laboratory/SOBIT-Bringup
|
89a921ac5922b2963155d80739013c7ef4c67abf
|
[
"Apache-2.0"
] | null | null | null |
serial/src/serial_test2.py
|
Choi-Laboratory/SOBIT-Bringup
|
89a921ac5922b2963155d80739013c7ef4c67abf
|
[
"Apache-2.0"
] | null | null | null |
serial/src/serial_test2.py
|
Choi-Laboratory/SOBIT-Bringup
|
89a921ac5922b2963155d80739013c7ef4c67abf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# cording : UTF-8
import serial
import rospy
from sensor_msgs.msg import *
ser=serial.Serial(
port = '/dev/vsrc',
baudrate = 115200,
parity = serial.PARITY_NONE,
bytesize = serial.EIGHTBITS,
stopbits = serial.STOPBITS_ONE,
timeout = None,
xonxoff = 0,
rtscts = 0,
# interCharTimeout = None
)
joint_name = ["","L_wheel","R_wheel","L_shoulder_roll","L_shoulder_pitch","L_elbow_yaw","L_shoulder_pitch", "R_shoulder_roll","R_shoulder_pitch","R_elbow_yaw","R_elbow_pitch", "neck_pitch","neck_roll\t","neck_yaw\t","nouse","nouse","nouse","nouse","nouse","L_hand_yaw","L_hand_thumb","L_hand_index","L_hand_mid","L_hand_ring","L_hand_pinky","R_hand_yaw","R_hand_thumb","R_hand_index","R_hand_mid","R_hand_ring","R_hand_pinky"]
#ser.open()
while True:
try:
print("\n1.pose_init 2.R 3.get_enc 4.gain_on 5.gain_off 6.motion 0.exit")
command = input("select>>")
if command == 1: #<pose_init>
ser.write("@00c8:T8000:T8000:T8000:T0000:T8000:T8000:T8000:T8000:T8000:T8000:T8000:T8000:T8000::::::T8000:T8000:T6800:T9800:T6800:T9800:T8000:T8000:T9800:T6800:T9800:T6800\n")
print ser.readline(),
elif command == 2: #<R>
ser.write("R\n")
print ser.readline(),
elif command == 3: #<get_enc>
ser.write(":Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx::::::T8000:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx:T8000:Cxxxx:Cxxxx:Cxxxx:Cxxxx:Cxxxx\n");
str = ser.readline(),
print str[0]
position_list = str[0].split(";")
position_list_16 = []
position_list_10 = []
#print str[0].split(";")
#print "left_wheel:",position_list[1]
for i in range(1,31):
position_list[i] = position_list[i][1:]
#print type(position_list[i])
#position_list[i] = hex(position_list[i])
#print type(position_list[i])
if len(position_list[i]) != 0:
position_list[i] = int(position_list[i],16)
else:
pass
#print '%2d'%i,")", joint_name[i] ,"\t:", position_list_16[i], position_list_10[i]
print '%2d'%i,")", joint_name[i] ,"\t:", position_list[i]
elif command == 4: #<gain_on>
ser.write(":P0100:P0100:P0040:P0080:P0045:P0040:P0040:P0080:P0045:P0040:P0080:P0200:P0016::::::P0001:P0001:P0001:P0001:P0001:P0001:P0001:P0001:P0001:P0001:P0001:P0001\n")
print ser.readline(),
elif command == 5: #<gain_off>
ser.write("P0000\n")
print ser.readline(),
elif command == 6: #<motion>
print "move left_shoulder_roll"
motion_deg = input("take in(0-180>")
if motion_deg < 0:
print "range out"
motion_deg = 0
motion_shoulder_roll = 32768 + motion_deg * 97
print motion_shoulder_roll
motion_shoulder_roll = '%x' % motion_shoulder_roll
print motion_shoulder_roll
#motion = "@00c8:T8000:T8000:T" + motion_shoulder_roll + ":T0000:T8000:T8000:T8000:T8000:T8000:T8000:T8000:T8000:T8000::::::T8000:T8000:T6800:T9800:T6800:T9800:T8000:T8000:T9800:T6800:T9800:T6800\n"
#print motion
ser.write("@00c8:T8000:T8000:T" + motion_shoulder_roll + ":T0000:T8000:T8000:T8000:T8000:T8000:T8000:T8000:T8000:T8000::::::T8000:T8000:T6800:T9800:T6800:T9800:T8000:T8000:T9800:T6800:T9800:T6800\n")
print ser.readline(),
elif command == 0:
ser.close()
break
elif command == "":
ser.close()
break
else:
ser.close()
break
except:
print "except"
break
| 31.679245
| 427
| 0.678082
|
60575ab696619be0500e50ca788e7fa0ffca024f
| 1,875
|
py
|
Python
|
planet/ForTesting6.py
|
createamind/000planet_A
|
1fda74b693f85d59ae03192b9365de118efd2a17
|
[
"Apache-2.0"
] | 5
|
2019-03-29T10:13:30.000Z
|
2021-05-02T10:26:15.000Z
|
planet/ForTesting6.py
|
createamind/000planet_A
|
1fda74b693f85d59ae03192b9365de118efd2a17
|
[
"Apache-2.0"
] | 5
|
2020-01-28T22:10:24.000Z
|
2022-02-09T23:30:15.000Z
|
planet/ForTesting6.py
|
createamind/000planet_A
|
1fda74b693f85d59ae03192b9365de118efd2a17
|
[
"Apache-2.0"
] | 4
|
2019-03-15T16:25:51.000Z
|
2019-05-09T05:31:39.000Z
|
import tensorflow as tf
import numpy as np
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()
dataset1 = tf.data.Dataset.from_tensor_slices(np.array([1.0, 2.0, 3.0, 4.0, 5.0])) # <TensorSliceDataset shapes: (), types: tf.float64>
dataset2 = tf.data.Dataset.from_tensor_slices(
(np.array([1.0, 2.0, 3.0, 4.0, 5.0]), np.random.uniform(size=(5, 2)))
) # <TensorSliceDataset shapes: ((), (2,)), types: (tf.float64, tf.float64)>
dataset3 = tf.data.Dataset.from_tensor_slices(
{
"a": np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
"b": np.random.uniform(size=(5, 2))
}
) # <TensorSliceDataset shapes: {a: (), b: (2,)}, types: {a: tf.float64, b: tf.float64}>
a = [[1,2,3,4,5], [6,7,8,1,2], [10,1,2,3,2]]
a1 = tf.data.Dataset.from_tensors(a)
b = [[1,2,3,4,5], [6,7,8,1,2], [10,1,2,3,2]]
b1 = tf.data.Dataset.from_tensor_slices(b)
a2 = a1.flat_map(lambda x: tf.data.Dataset.from_tensor_slices(x))
b2 = b1.flat_map(lambda x: tf.data.Dataset.from_tensor_slices(x))
# == {[1,2,3,4,5,6,7,8,9,10]}
for one_element in tfe.Iterator(a1):
print(one_element)
print('\n===============')
for one_element in tfe.Iterator(b1):
print(one_element)
print('\n===============')
for one_element in tfe.Iterator(a2):
print(one_element)
print('\n===============')
for one_element in tfe.Iterator(b2):
print(one_element)
print('\n===============')
for one_element in tfe.Iterator(dataset2):
print(one_element)
print('\n===============')
for one_element in tfe.Iterator(dataset2.batch(2)):
print(one_element)
print('\n===============')
import itertools
tf.enable_eager_execution()
def gen():
for i in itertools.count(1):
yield (i, [[1] * i])
ds = tf.data.Dataset.from_generator(
gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([1,None,])))
for value in ds.take(2):
print (value)
print()
| 26.041667
| 136
| 0.618133
|
c2f8faae777f70c2ba9473804bfb2f0cf8c5380c
| 7,543
|
py
|
Python
|
nikola/plugins/command/serve.py
|
bnmnetp/nikola
|
79e2878de421c99f1835a853d690a8a5a35c3481
|
[
"MIT"
] | null | null | null |
nikola/plugins/command/serve.py
|
bnmnetp/nikola
|
79e2878de421c99f1835a853d690a8a5a35c3481
|
[
"MIT"
] | null | null | null |
nikola/plugins/command/serve.py
|
bnmnetp/nikola
|
79e2878de421c99f1835a853d690a8a5a35c3481
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import os
import socket
import webbrowser
try:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
from http.server import HTTPServer # NOQA
from http.server import SimpleHTTPRequestHandler # NOQA
from nikola.plugin_categories import Command
from nikola.utils import get_logger
class IPv6Server(HTTPServer):
"""An IPv6 HTTPServer."""
address_family = socket.AF_INET6
class CommandServe(Command):
"""Start test server."""
name = "serve"
doc_usage = "[options]"
doc_purpose = "start the test webserver"
logger = None
cmd_options = (
{
'name': 'port',
'short': 'p',
'long': 'port',
'default': 8000,
'type': int,
'help': 'Port number (default: 8000)',
},
{
'name': 'address',
'short': 'a',
'long': 'address',
'type': str,
'default': '',
'help': 'Address to bind (default: 0.0.0.0 – all local IPv4 interfaces)',
},
{
'name': 'browser',
'short': 'b',
'long': 'browser',
'type': bool,
'default': False,
'help': 'Open the test server in a web browser',
},
{
'name': 'ipv6',
'short': '6',
'long': 'ipv6',
'type': bool,
'default': False,
'help': 'Use IPv6',
},
)
def _execute(self, options, args):
"""Start test server."""
self.logger = get_logger('serve', self.site.loghandlers)
out_dir = self.site.config['OUTPUT_FOLDER']
if not os.path.isdir(out_dir):
self.logger.error("Missing '{0}' folder?".format(out_dir))
else:
os.chdir(out_dir)
if '[' in options['address']:
options['address'] = options['address'].strip('[').strip(']')
ipv6 = True
OurHTTP = IPv6Server
elif options['ipv6']:
ipv6 = True
OurHTTP = IPv6Server
else:
ipv6 = False
OurHTTP = HTTPServer
httpd = OurHTTP((options['address'], options['port']),
OurHTTPRequestHandler)
sa = httpd.socket.getsockname()
self.logger.info("Serving HTTP on {0} port {1}...".format(*sa))
if options['browser']:
if ipv6:
server_url = "http://[{0}]:{1}/".format(*sa)
else:
server_url = "http://{0}:{1}/".format(*sa)
self.logger.info("Opening {0} in the default web browser...".format(server_url))
webbrowser.open(server_url)
try:
httpd.serve_forever()
except KeyboardInterrupt:
self.logger.info("Server is shutting down.")
exit(130)
class OurHTTPRequestHandler(SimpleHTTPRequestHandler):
extensions_map = dict(SimpleHTTPRequestHandler.extensions_map)
extensions_map[""] = "text/plain"
# NOTICE: this is a patched version of send_head() to disable all sorts of
# caching. `nikola serve` is a development server, hence caching should
# not happen to have access to the newest resources.
#
# The original code was copy-pasted from Python 2.7. Python 3.3 contains
# the same code, missing the binary mode comment.
#
# Note that it might break in future versions of Python, in which case we
# would need to do even more magic.
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
# begin no-cache patch
# For redirects. With redirects, caching is even worse and can
# break more. Especially with 301 Moved Permanently redirects,
# like this one.
self.send_header("Cache-Control", "no-cache, no-store, "
"must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
# end no-cache patch
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
if os.path.splitext(path)[1] == '.svgz':
# Special handling for svgz to make it work nice with browsers.
self.send_header("Content-Encoding", 'gzip')
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
# begin no-cache patch
# For standard requests.
self.send_header("Cache-Control", "no-cache, no-store, "
"must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
# end no-cache patch
self.end_headers()
return f
| 37.341584
| 96
| 0.57855
|
1a10f6612434deae9a193aa883c46a729776ab87
| 565
|
py
|
Python
|
setup.py
|
andypalmer9669/74_series_computer
|
0b8a4776b49a2380a51863634b48bcc441bf74ec
|
[
"MIT"
] | null | null | null |
setup.py
|
andypalmer9669/74_series_computer
|
0b8a4776b49a2380a51863634b48bcc441bf74ec
|
[
"MIT"
] | 46
|
2019-02-22T16:46:02.000Z
|
2020-03-08T20:26:37.000Z
|
setup.py
|
andypalmer9669/74_series_computer
|
0b8a4776b49a2380a51863634b48bcc441bf74ec
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="eight-bit-computer",
version="0.0.2",
author="Andy Palmer",
author_email="contactninezerozeronine@gmail.com",
description="Tools to build a physical eight bit computer.",
url="https://github.com/ninezerozeronine/eight-bit-computer",
packages=find_packages('src'),
package_dir={'': 'src'},
entry_points={
"console_scripts": [
"ebc-assemble=eight_bit_computer.cli:assemble",
"ebc-gen-roms=eight_bit_computer.cli:gen_roms",
]
}
)
| 29.736842
| 65
| 0.660177
|
28520c11c6f7c23685d5e05c10a40726a767c49a
| 6,040
|
py
|
Python
|
packages/python/plotly/plotly/validators/choropleth/hoverlabel/__init__.py
|
pragyagarg642/plotly.py
|
141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b
|
[
"MIT"
] | 2
|
2020-04-11T19:28:30.000Z
|
2020-05-04T03:16:20.000Z
|
packages/python/plotly/plotly/validators/choropleth/hoverlabel/__init__.py
|
pragyagarg642/plotly.py
|
141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/choropleth/hoverlabel/__init__.py
|
pragyagarg642/plotly.py
|
141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="choropleth.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class NamelengthValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="namelength", parent_name="choropleth.hoverlabel", **kwargs
):
super(NamelengthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", -1),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="choropleth.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="bordercolorsrc",
parent_name="choropleth.hoverlabel",
**kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="choropleth.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="choropleth.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="choropleth.hoverlabel", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="alignsrc", parent_name="choropleth.hoverlabel", **kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="align", parent_name="choropleth.hoverlabel", **kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "right", "auto"]),
**kwargs
)
| 32.473118
| 88
| 0.604636
|
d74507bfe9215f4156bd4d18d088455d2328115e
| 4,502
|
py
|
Python
|
hypnopyze/sequencer.py
|
garstka/hypnopyze
|
eae751f42c168a5b57dbbb72745ed1c2eaa6556b
|
[
"MIT"
] | 5
|
2017-05-30T20:40:53.000Z
|
2021-10-06T17:10:00.000Z
|
hypnopyze/sequencer.py
|
garstka/hypnopyze
|
eae751f42c168a5b57dbbb72745ed1c2eaa6556b
|
[
"MIT"
] | null | null | null |
hypnopyze/sequencer.py
|
garstka/hypnopyze
|
eae751f42c168a5b57dbbb72745ed1c2eaa6556b
|
[
"MIT"
] | 3
|
2017-08-30T19:47:27.000Z
|
2021-11-29T12:00:29.000Z
|
from math import ceil
from hypnopyze.patterns.pattern import *
from hypnopyze.styles.manager import StyleManager
# For creating sequences of notes.
class Sequencer:
# - beats_per_bar - how many beats in a bar (maximum pattern resolution)
# - time_step - each beat will take time_step MIDI beats
# - perturb_velocity_cap - randomly perturbs the base velocities by +- this
def __init__(self,
beats_per_bar: int,
perturb_velocity_cap: int = 0):
# The midi channel to use. Any change takes effect from the next bar.
self.channel = 0
# How many beats in a bar to allow (maximum pattern resolution)
self.beats_per_bar = beats_per_bar
# How much to randomly perturb velocities
self.perturb_velocity_cap = perturb_velocity_cap
# The output notes
self.__notes = []
# Current time
self.__t = 0
# Returns the current time point.
@property
def time(self):
return self.__t
# Sets the current time point.
@time.setter
def time(self, value):
self.__t = value if value >= 0 else 0
# Returns the generated notes.
@property
def notes(self):
return self.__notes
# Returns true, if the pattern is compatible
def compatible(self, pattern: Pattern):
# if bad pattern
if not pattern:
return False
# if pattern resolution is too high
if self.beats_per_bar < pattern.min_beats_per_bar:
return False
# should be fine
return True
# If compatible, returns how much the pattern will be scaled up.
# Not applicable to patterns with pattern.real_time == True
def time_scale(self, pattern: Pattern) -> int:
if pattern.real_time:
return 1
#
# Example 1:
# e.g. beats_per_bar = 16
# pattern.min_beats_per_bar = 4
#
# time_scale = beats_per_bar // p.mbpb = 4
#
# also, can be be repeated, as beats_per_bar % p.mbpb == 0
#
# Example 2:
# e.g. beats_per_bar = 20
# pattern.min_beats_per_bar = 3
#
# time_scale = beats_per_bar // p.mbpb = 6
#
# shouldn't be repeated, as beats_per_bar % p.mbpb != 0
return self.beats_per_bar // pattern.min_beats_per_bar
# Returns true, if the pattern can be looped at this time scale,
# see time_scale().
def repeatable(self, pattern: Pattern) -> bool:
return pattern.repeatable \
and self.beats_per_bar % pattern.min_beats_per_bar == 0
# Appends the pattern at the current time point, if it's compatible.
# Returns the number of bars appended
def append(self, pattern: Pattern) -> int:
if not self.compatible(pattern):
print("Pattern is not compatible")
return 0
prng = StyleManager().prng
time_scale = self.time_scale(pattern)
bar_count = pattern.bars
fill = pattern.real_time and self.repeatable(pattern)
repeats = 1 if not fill \
else self.beats_per_bar // pattern.min_beats_per_bar
perturb_range = list(range(-abs(self.perturb_velocity_cap),
abs(self.perturb_velocity_cap) + 1))
perturb_range_len = len(perturb_range)
p = pattern
t = self.__t
full_step = time_scale
for _ in range(0, repeats):
for (index, velocity, duration) in p.ivd:
# ignore silence
if is_silence(index):
t += full_step # update the time
continue
# perturb the velocity if required
if perturb_range:
velocity += perturb_range[
prng.binomial(perturb_range_len - 1,
0.5)]
velocity = max(0, min(velocity, MAX_VELOCITY))
# correct the duration given scale
duration *= full_step
# append the note
self.__notes.append([[t, index, velocity, duration],
self.channel])
# update the time
t += full_step
lower_bound = bar_count * self.beats_per_bar
if t - self.__t >= lower_bound:
self.__t = t
else:
self.__t += lower_bound
return bar_count
| 31.048276
| 79
| 0.574856
|
5f0b731be9ab8240652f9e00192a3456f1ae63ab
| 8,122
|
py
|
Python
|
nc/loops/loops.py
|
migalkin/NodePiece
|
a2189801657d41797a3db99c38f071a6468be2c5
|
[
"MIT"
] | 84
|
2021-06-24T14:42:01.000Z
|
2022-03-29T06:28:43.000Z
|
nc/loops/loops.py
|
migalkin/NodePiece
|
a2189801657d41797a3db99c38f071a6468be2c5
|
[
"MIT"
] | 3
|
2021-07-02T05:11:28.000Z
|
2022-02-07T06:19:56.000Z
|
nc/loops/loops.py
|
migalkin/NodePiece
|
a2189801657d41797a3db99c38f071a6468be2c5
|
[
"MIT"
] | 8
|
2021-06-24T22:43:24.000Z
|
2022-03-28T03:42:07.000Z
|
import wandb
import torch
import numpy as np
from tqdm.autonotebook import tqdm
from typing import Callable, Dict, Union
from loops.sampler import NodeClSampler
from utils.utils_mytorch import *
from torch_geometric.data import Data
def training_loop_pyg_nc(epochs: int,
opt: torch.optim,
model: Callable,
train_graph: Data,
val_graph: Data,
device: torch.device = torch.device('cpu'),
data_fn: Callable = NodeClSampler,
eval_fn: Callable = None,
eval_every: int = 1,
log_wandb: bool = True,
run_trn_testbench: bool = True,
savedir: str = None,
save_content: Dict[str, list] = None,
grad_clipping: bool = True,
scheduler: Callable = None,
criterion: Callable = None,
**kwargs) -> (list, list, list):
train_loss = []
train_rocauc, train_prcauc, train_ap, train_hard_acc = [], [], [], []
valid_rocauc, valid_prcauc, valid_ap, valid_hard_acc = [], [], [], []
# Epoch level
for e in tqdm(range(epochs)):
# Train
with Timer() as timer:
# Get masks and labels
train_mask, train_y, val_mask, val_y = data_fn()
model.train()
opt.zero_grad()
train_mask_ = torch.tensor(train_mask, dtype=torch.long, device=device)
train_y_ = torch.tensor(train_y, dtype=torch.float, device=device)
val_mask_ = torch.tensor(val_mask, dtype=torch.long, device=device)
val_y_ = torch.tensor(val_y, dtype=torch.float, device=device)
pred = model(train_graph.to(device=device), train_mask_)
loss = criterion(pred, train_y_)
per_epoch_loss = loss.item()
loss.backward()
if grad_clipping:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
opt.step()
if hasattr(model, "post_parameter_update"):
model.post_parameter_update()
# Log this stuff
print(f"[Epoch: {e} ] Loss: {per_epoch_loss}")
train_loss.append(per_epoch_loss)
if e % eval_every == 0 and e >= 1:
with torch.no_grad():
model.eval()
val_preds = torch.sigmoid(model(val_graph.to(device=device), val_mask_))
val_res = eval_fn(val_y_, val_preds)
valid_rocauc.append(val_res["rocauc"])
valid_prcauc.append(val_res["prcauc"])
valid_ap.append(val_res["ap"])
valid_hard_acc.append(val_res["hard_acc"])
if run_trn_testbench:
# Also run train testbench
train_preds = torch.sigmoid(model(train_graph.to(device=device), train_mask_))
unsmoothed_labels = (train_y_ > 0.5).float()
tr_res = eval_fn(unsmoothed_labels, train_preds)
train_rocauc.append(tr_res["rocauc"])
train_prcauc.append(tr_res["prcauc"])
train_ap.append(tr_res["ap"])
train_hard_acc.append(tr_res["hard_acc"])
# Print statement here
print("Epoch: %(epo)03d | Loss: %(loss).5f | Tr_rocauc: %(tr_rocauc)0.5f | "
"Tr_prcauc: %(tr_prcauc)0.5f | Tr_AP: %(tr_ap)0.5f | Tr_hard_acc: %(tr_hard_acc)0.5f |"
"Vl_rocauc: %(val_rocauc)0.5f | Vl_prcauc: %(val_prcauc)0.5f | Vl_AP: %(val_ap)0.5f | "
"Vl_hard_acc: %(val_hard_acc)0.5f | Time_trn: %(time).3f min"
% {'epo': e,
'loss': float(per_epoch_loss),
'tr_rocauc': float(tr_res["rocauc"]),
'tr_prcauc': float(tr_res["prcauc"]),
'tr_ap': float(tr_res["ap"]),
'tr_hard_acc': float(tr_res["hard_acc"]),
'val_rocauc': float(val_res["rocauc"]),
'val_prcauc': float(val_res["prcauc"]),
'val_ap': float(val_res["ap"]),
'val_hard_acc': float(val_res["hard_acc"]),
'time': timer.interval / 60.0})
if log_wandb:
# Wandb stuff
wandb.log({
'epoch': e,
'loss': float(np.mean(per_epoch_loss)),
'tr_rocauc': float(tr_res["rocauc"]),
'tr_prcauc': float(tr_res["prcauc"]),
'tr_ap': float(tr_res["ap"]),
'tr_hard_acc': float(tr_res["hard_acc"]),
'val_rocauc': float(val_res["rocauc"]),
'val_prcauc': float(val_res["prcauc"]),
'val_ap': float(val_res["ap"]),
'val_hard_acc': float(val_res["hard_acc"]),
})
else:
# Don't benchmark over train
# Print Statement here
print("Epoch: %(epo)03d | Loss: %(loss).5f | "
"Vl_rocauc: %(val_rocauc)0.5f | Vl_prcauc: %(val_prcauc)0.5f | Vl_AP: %(val_ap)0.5f | "
"Vl_hard_acc: %(val_hard_acc)0.5f | time_trn: %(time).3f min"
% {'epo': e,
'loss': float(per_epoch_loss),
'val_rocauc': float(val_res["rocauc"]),
'val_prcauc': float(val_res["prcauc"]),
'val_ap': float(val_res["ap"]),
'val_hard_acc': float(val_res["hard_acc"]),
'time': timer.interval / 60.0})
if log_wandb:
# Wandb stuff
wandb.log({
'epoch': e,
'loss': float(per_epoch_loss),
'val_rocauc': float(val_res["rocauc"]),
'val_prcauc': float(val_res["prcauc"]),
'val_ap': float(val_res["ap"]),
'val_hard_acc': float(val_res["hard_acc"]),
})
# We might wanna save the model, too
if savedir is not None:
mt_save(
savedir,
torch_stuff=[tosave(obj=save_content['model'].state_dict(), fname='model.torch')],
pickle_stuff=[tosave(fname='traces.pkl',
obj=[train_loss, valid_rocauc])],
json_stuff=[tosave(obj=save_content['config'], fname='config.json')])
else:
# No test benches this time around
print("Epoch: %(epo)03d | Loss: %(loss).5f | "
"Time_Train: %(time).3f min"
% {'epo': e,
'loss': float(per_epoch_loss),
# 'tracc': float(np.mean(per_epoch_tr_acc)),
'time': timer.interval / 60.0})
if log_wandb:
# Wandb stuff
wandb.log({
'epoch': e,
'loss': float(per_epoch_loss),
# 'trn_acc': float(np.mean(per_epoch_tr_acc))
})
if scheduler is not None:
scheduler.step()
return {
"loss": train_loss,
"train_rocauc": train_rocauc,
"train_prcauc": train_prcauc,
"train_ap": train_ap,
"train_hard_acc": train_hard_acc,
"valid_rocauc": valid_rocauc,
"valid_prcauc": valid_prcauc,
"valid_ap": valid_ap,
"valid_hard_acc": valid_hard_acc
}
| 43.666667
| 113
| 0.466264
|
c94d11588751c930be682982c686e36e220cd99a
| 1,508
|
py
|
Python
|
tests/components/zwave_js/common.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 1
|
2021-04-28T09:51:08.000Z
|
2021-04-28T09:51:08.000Z
|
tests/components/zwave_js/common.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 60
|
2020-08-03T07:32:56.000Z
|
2022-03-31T06:02:07.000Z
|
tests/components/zwave_js/common.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 4
|
2017-01-10T04:17:33.000Z
|
2021-09-02T16:37:24.000Z
|
"""Provide common test tools for Z-Wave JS."""
AIR_TEMPERATURE_SENSOR = "sensor.multisensor_6_air_temperature"
HUMIDITY_SENSOR = "sensor.multisensor_6_humidity"
ENERGY_SENSOR = "sensor.smart_plug_with_two_usb_ports_value_electric_consumed_2"
POWER_SENSOR = "sensor.smart_plug_with_two_usb_ports_value_electric_consumed"
SWITCH_ENTITY = "switch.smart_plug_with_two_usb_ports"
LOW_BATTERY_BINARY_SENSOR = "binary_sensor.multisensor_6_low_battery_level"
ENABLED_LEGACY_BINARY_SENSOR = "binary_sensor.z_wave_door_window_sensor_any"
DISABLED_LEGACY_BINARY_SENSOR = "binary_sensor.multisensor_6_any"
NOTIFICATION_MOTION_BINARY_SENSOR = (
"binary_sensor.multisensor_6_home_security_motion_detection"
)
NOTIFICATION_MOTION_SENSOR = "sensor.multisensor_6_home_security_motion_sensor_status"
PROPERTY_DOOR_STATUS_BINARY_SENSOR = (
"binary_sensor.august_smart_lock_pro_3rd_gen_the_current_status_of_the_door"
)
CLIMATE_RADIO_THERMOSTAT_ENTITY = "climate.z_wave_thermostat"
CLIMATE_DANFOSS_LC13_ENTITY = "climate.living_connect_z_thermostat"
CLIMATE_EUROTRONICS_SPIRIT_Z_ENTITY = "climate.thermostatic_valve"
CLIMATE_FLOOR_THERMOSTAT_ENTITY = "climate.floor_thermostat"
CLIMATE_MAIN_HEAT_ACTIONNER = "climate.main_heat_actionner"
BULB_6_MULTI_COLOR_LIGHT_ENTITY = "light.bulb_6_multi_color"
EATON_RF9640_ENTITY = "light.allloaddimmer"
AEON_SMART_SWITCH_LIGHT_ENTITY = "light.smart_switch_6"
ID_LOCK_CONFIG_PARAMETER_SENSOR = (
"sensor.z_wave_module_for_id_lock_150_and_101_config_parameter_door_lock_mode"
)
| 53.857143
| 86
| 0.8813
|
a0507a8c611e70f77ab5aa920ec357527cdeb198
| 221
|
py
|
Python
|
examples/dictknife/library/src/deepequal02.py
|
podhmo/dictknife
|
a172220c1adc8411b69f31646ea2154932d71516
|
[
"MIT"
] | 13
|
2018-11-23T15:55:18.000Z
|
2021-11-24T02:42:44.000Z
|
examples/dictknife/library/src/deepequal02.py
|
podhmo/dictknife
|
a172220c1adc8411b69f31646ea2154932d71516
|
[
"MIT"
] | 105
|
2017-01-09T02:05:48.000Z
|
2021-07-26T03:39:22.000Z
|
examples/dictknife/library/src/deepequal02.py
|
podhmo/dictknife
|
a172220c1adc8411b69f31646ea2154932d71516
|
[
"MIT"
] | 4
|
2017-07-19T12:34:47.000Z
|
2019-06-20T10:32:13.000Z
|
from dictknife import deepequal
d0 = [[[1, 2, 3], [1]], [[1, 2], [2, 3], [3, 4]]]
d1 = [[[1], [1, 2, 3]], [[1, 2], [3, 4], [2, 3]]]
print("=")
print(d0 == d1)
print("deepequal")
print(deepequal(d0, d1, normalize=True))
| 22.1
| 49
| 0.502262
|
1d87344fd5f04b7fc73f92f14ea6dddbbb2cf486
| 94
|
py
|
Python
|
other/sumitb2019_b.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
other/sumitb2019_b.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
other/sumitb2019_b.py
|
ryosuke0825/atcoder_python
|
185cdbe7db44ecca1aaf357858d16d31ce515ddb
|
[
"MIT"
] | null | null | null |
n = int(input())
x = int((n+1)/1.08)
if int(x*1.08) == n:
print(x)
else:
print(':(')
| 11.75
| 20
| 0.457447
|
c57e796555b3e34042969c7b07134fdfad9fd5b1
| 14,416
|
py
|
Python
|
nltk/test/unit/lm/test_models.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | null | null | null |
nltk/test/unit/lm/test_models.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | null | null | null |
nltk/test/unit/lm/test_models.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | 1
|
2019-10-18T08:58:45.000Z
|
2019-10-18T08:58:45.000Z
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Language Model Unit Tests
#
# Copyright (C) 2001-2018 NLTK Project
# Author: Ilia Kurenkov <ilia.kurenkov@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import division
import math
import sys
import unittest
from six import add_metaclass
from nltk.lm import (
Vocabulary,
MLE,
Lidstone,
Laplace,
WittenBellInterpolated,
KneserNeyInterpolated,
)
from nltk.lm.preprocessing import padded_everygrams
def _prepare_test_data(ngram_order):
return (
Vocabulary(["a", "b", "c", "d", "z", "<s>", "</s>"], unk_cutoff=1),
[
list(padded_everygrams(ngram_order, sent))
for sent in (list("abcd"), list("egadbe"))
],
)
class ParametrizeTestsMeta(type):
"""Metaclass for generating parametrized tests."""
def __new__(cls, name, bases, dct):
contexts = (
("a",),
("c",),
(u"<s>",),
("b",),
(u"<UNK>",),
("d",),
("e",),
("r",),
("w",),
)
for i, c in enumerate(contexts):
dct["test_sumto1_{0}".format(i)] = cls.add_sum_to_1_test(c)
scores = dct.get("score_tests", [])
for i, (word, context, expected_score) in enumerate(scores):
dct["test_score_{0}".format(i)] = cls.add_score_test(
word, context, expected_score
)
return super(ParametrizeTestsMeta, cls).__new__(cls, name, bases, dct)
@classmethod
def add_score_test(cls, word, context, expected_score):
if sys.version_info > (3, 5):
message = "word='{word}', context={context}"
else:
# Python 2 doesn't report the mismatched values if we pass a custom
# message, so we have to report them manually.
message = (
"{score} != {expected_score} within 4 places, "
"word='{word}', context={context}"
)
def test_method(self):
score = self.model.score(word, context)
self.assertAlmostEqual(
score,
expected_score,
msg=message.format(**locals()),
places=4,
)
return test_method
@classmethod
def add_sum_to_1_test(cls, context):
def test(self):
s = sum(self.model.score(w, context) for w in self.model.vocab)
self.assertAlmostEqual(s, 1.0, msg="The context is {}".format(context))
return test
@add_metaclass(ParametrizeTestsMeta)
class MleBigramTests(unittest.TestCase):
"""unit tests for MLENgramModel class"""
score_tests = [
("d", ["c"], 1),
# Unseen ngrams should yield 0
("d", ["e"], 0),
# Unigrams should also be 0
("z", None, 0),
# N unigrams = 14
# count('a') = 2
("a", None, 2. / 14),
# count('y') = 3
("y", None, 3. / 14),
]
def setUp(self):
vocab, training_text = _prepare_test_data(2)
self.model = MLE(2, vocabulary=vocab)
self.model.fit(training_text)
def test_logscore_zero_score(self):
# logscore of unseen ngrams should be -inf
logscore = self.model.logscore("d", ["e"])
self.assertTrue(math.isinf(logscore))
def test_entropy_perplexity_seen(self):
# ngrams seen during training
trained = [
("<s>", "a"),
("a", "b"),
("b", "<UNK>"),
("<UNK>", "a"),
("a", "d"),
("d", "</s>"),
]
# Ngram = Log score
# <s>, a = -1
# a, b = -1
# b, UNK = -1
# UNK, a = -1.585
# a, d = -1
# d, </s> = -1
# TOTAL logscores = -6.585
# - AVG logscores = 1.0975
H = 1.0975
perplexity = 2.1398
self.assertAlmostEqual(H, self.model.entropy(trained), places=4)
self.assertAlmostEqual(perplexity, self.model.perplexity(trained), places=4)
def test_entropy_perplexity_unseen(self):
# In MLE, even one unseen ngram should make entropy and perplexity infinite
untrained = [("<s>", "a"), ("a", "c"), ("c", "d"), ("d", "</s>")]
self.assertTrue(math.isinf(self.model.entropy(untrained)))
self.assertTrue(math.isinf(self.model.perplexity(untrained)))
def test_entropy_perplexity_unigrams(self):
# word = score, log score
# <s> = 0.1429, -2.8074
# a = 0.1429, -2.8074
# c = 0.0714, -3.8073
# UNK = 0.2143, -2.2224
# d = 0.1429, -2.8074
# c = 0.0714, -3.8073
# </s> = 0.1429, -2.8074
# TOTAL logscores = -21.6243
# - AVG logscores = 3.0095
H = 3.0095
perplexity = 8.0529
text = [("<s>",), ("a",), ("c",), ("-",), ("d",), ("c",), ("</s>",)]
self.assertAlmostEqual(H, self.model.entropy(text), places=4)
self.assertAlmostEqual(perplexity, self.model.perplexity(text), places=4)
@add_metaclass(ParametrizeTestsMeta)
class MleTrigramTests(unittest.TestCase):
"""MLE trigram model tests"""
score_tests = [
# count(d | b, c) = 1
# count(b, c) = 1
("d", ("b", "c"), 1),
# count(d | c) = 1
# count(c) = 1
("d", ["c"], 1),
# total number of tokens is 18, of which "a" occured 2 times
("a", None, 2.0 / 18),
# in vocabulary but unseen
("z", None, 0),
# out of vocabulary should use "UNK" score
("y", None, 3.0 / 18),
]
def setUp(self):
vocab, training_text = _prepare_test_data(3)
self.model = MLE(3, vocabulary=vocab)
self.model.fit(training_text)
@add_metaclass(ParametrizeTestsMeta)
class LidstoneBigramTests(unittest.TestCase):
"""unit tests for Lidstone class"""
score_tests = [
# count(d | c) = 1
# *count(d | c) = 1.1
# Count(w | c for w in vocab) = 1
# *Count(w | c for w in vocab) = 1.8
("d", ["c"], 1.1 / 1.8),
# Total unigrams: 14
# Vocab size: 8
# Denominator: 14 + 0.8 = 14.8
# count("a") = 2
# *count("a") = 2.1
("a", None, 2.1 / 14.8),
# in vocabulary but unseen
# count("z") = 0
# *count("z") = 0.1
("z", None, 0.1 / 14.8),
# out of vocabulary should use "UNK" score
# count("<UNK>") = 3
# *count("<UNK>") = 3.1
("y", None, 3.1 / 14.8),
]
def setUp(self):
vocab, training_text = _prepare_test_data(2)
self.model = Lidstone(0.1, 2, vocabulary=vocab)
self.model.fit(training_text)
def test_gamma(self):
self.assertEqual(0.1, self.model.gamma)
def test_entropy_perplexity(self):
text = [
("<s>", "a"),
("a", "c"),
("c", "<UNK>"),
("<UNK>", "d"),
("d", "c"),
("c", "</s>"),
]
# Unlike MLE this should be able to handle completely novel ngrams
# Ngram = score, log score
# <s>, a = 0.3929, -1.3479
# a, c = 0.0357, -4.8074
# c, UNK = 0.0(5), -4.1699
# UNK, d = 0.0263, -5.2479
# d, c = 0.0357, -4.8074
# c, </s> = 0.0(5), -4.1699
# TOTAL logscore: −24.5504
# - AVG logscore: 4.0917
H = 4.0917
perplexity = 17.0504
self.assertAlmostEqual(H, self.model.entropy(text), places=4)
self.assertAlmostEqual(perplexity, self.model.perplexity(text), places=4)
@add_metaclass(ParametrizeTestsMeta)
class LidstoneTrigramTests(unittest.TestCase):
score_tests = [
# Logic behind this is the same as for bigram model
("d", ["c"], 1.1 / 1.8),
# if we choose a word that hasn't appeared after (b, c)
("e", ["c"], 0.1 / 1.8),
# Trigram score now
("d", ["b", "c"], 1.1 / 1.8),
("e", ["b", "c"], 0.1 / 1.8),
]
def setUp(self):
vocab, training_text = _prepare_test_data(3)
self.model = Lidstone(0.1, 3, vocabulary=vocab)
self.model.fit(training_text)
@add_metaclass(ParametrizeTestsMeta)
class LaplaceBigramTests(unittest.TestCase):
"""unit tests for Laplace class"""
score_tests = [
# basic sanity-check:
# count(d | c) = 1
# *count(d | c) = 2
# Count(w | c for w in vocab) = 1
# *Count(w | c for w in vocab) = 9
("d", ["c"], 2. / 9),
# Total unigrams: 14
# Vocab size: 8
# Denominator: 14 + 8 = 22
# count("a") = 2
# *count("a") = 3
("a", None, 3. / 22),
# in vocabulary but unseen
# count("z") = 0
# *count("z") = 1
("z", None, 1. / 22),
# out of vocabulary should use "UNK" score
# count("<UNK>") = 3
# *count("<UNK>") = 4
("y", None, 4. / 22),
]
def setUp(self):
vocab, training_text = _prepare_test_data(2)
self.model = Laplace(2, vocabulary=vocab)
self.model.fit(training_text)
def test_gamma(self):
# Make sure the gamma is set to 1
self.assertEqual(1, self.model.gamma)
def test_entropy_perplexity(self):
text = [
("<s>", "a"),
("a", "c"),
("c", "<UNK>"),
("<UNK>", "d"),
("d", "c"),
("c", "</s>"),
]
# Unlike MLE this should be able to handle completely novel ngrams
# Ngram = score, log score
# <s>, a = 0.2, -2.3219
# a, c = 0.1, -3.3219
# c, UNK = 0.(1), -3.1699
# UNK, d = 0.(09), 3.4594
# d, c = 0.1 -3.3219
# c, </s> = 0.(1), -3.1699
# Total logscores: −18.7651
# - AVG logscores: 3.1275
H = 3.1275
perplexity = 8.7393
self.assertAlmostEqual(H, self.model.entropy(text), places=4)
self.assertAlmostEqual(perplexity, self.model.perplexity(text), places=4)
@add_metaclass(ParametrizeTestsMeta)
class WittenBellInterpolatedTrigramTests(unittest.TestCase):
def setUp(self):
vocab, training_text = _prepare_test_data(3)
self.model = WittenBellInterpolated(3, vocabulary=vocab)
self.model.fit(training_text)
score_tests = [
# For unigram scores by default revert to MLE
# Total unigrams: 18
# count('c'): 1
("c", None, 1. / 18),
# in vocabulary but unseen
# count("z") = 0
("z", None, 0. / 18),
# out of vocabulary should use "UNK" score
# count("<UNK>") = 3
("y", None, 3. / 18),
# gamma(['b']) = 0.1111
# mle.score('c', ['b']) = 0.5
# (1 - gamma) * mle + gamma * mle('c') ~= 0.45 + .3 / 18
("c", ["b"], (1 - 0.1111) * 0.5 + 0.1111 * 1 / 18),
# building on that, let's try 'a b c' as the trigram
# gamma(['a', 'b']) = 0.0667
# mle("c", ["a", "b"]) = 1
("c", ["a", "b"], (1 - 0.0667) + 0.0667 * ((1 - 0.1111) * 0.5 + 0.1111 / 18)),
]
@add_metaclass(ParametrizeTestsMeta)
class KneserNeyInterpolatedTrigramTests(unittest.TestCase):
def setUp(self):
vocab, training_text = _prepare_test_data(3)
self.model = KneserNeyInterpolated(3, vocabulary=vocab)
self.model.fit(training_text)
score_tests = [
# For unigram scores revert to uniform
# Vocab size: 8
# count('c'): 1
("c", None, 1. / 8),
# in vocabulary but unseen, still uses uniform
("z", None, 1 / 8),
# out of vocabulary should use "UNK" score, i.e. again uniform
("y", None, 1. / 8),
# alpha = count('bc') - discount = 1 - 0.1 = 0.9
# gamma(['b']) = discount * number of unique words that follow ['b'] = 0.1 * 2
# normalizer = total number of bigrams with this context = 2
# the final should be: (alpha + gamma * unigram_score("c"))
("c", ["b"], (0.9 + 0.2 * (1 / 8)) / 2),
# building on that, let's try 'a b c' as the trigram
# alpha = count('abc') - discount = 1 - 0.1 = 0.9
# gamma(['a', 'b']) = 0.1 * 1
# normalizer = total number of trigrams with prefix "ab" = 1 => we can ignore it!
("c", ["a", "b"], 0.9 + 0.1 * ((0.9 + 0.2 * (1 / 8)) / 2)),
]
class NgramModelTextGenerationTests(unittest.TestCase):
"""Using MLE estimator, generate some text."""
def setUp(self):
vocab, training_text = _prepare_test_data(3)
self.model = MLE(3, vocabulary=vocab)
self.model.fit(training_text)
def test_generate_one_no_context(self):
self.assertEqual(self.model.generate(random_seed=3), "<UNK>")
def test_generate_one_limiting_context(self):
# We don't need random_seed for contexts with only one continuation
self.assertEqual(self.model.generate(text_seed=["c"]), "d")
self.assertEqual(self.model.generate(text_seed=["b", "c"]), "d")
self.assertEqual(self.model.generate(text_seed=["a", "c"]), "d")
def test_generate_one_varied_context(self):
# When context doesn't limit our options enough, seed the random choice
self.assertEqual(
self.model.generate(text_seed=("a", "<s>"), random_seed=2), "a"
)
def test_generate_no_seed_unigrams(self):
self.assertEqual(
self.model.generate(5, random_seed=3),
["<UNK>", "</s>", "</s>", "</s>", "</s>"],
)
def test_generate_with_text_seed(self):
self.assertEqual(
self.model.generate(5, text_seed=("<s>", "e"), random_seed=3),
["<UNK>", "a", "d", "b", "<UNK>"],
)
def test_generate_oov_text_seed(self):
self.assertEqual(
self.model.generate(text_seed=("aliens",), random_seed=3),
self.model.generate(text_seed=("<UNK>",), random_seed=3),
)
def test_generate_None_text_seed(self):
# should crash with type error when we try to look it up in vocabulary
with self.assertRaises(TypeError):
self.model.generate(text_seed=(None,))
# This will work
self.assertEqual(
self.model.generate(text_seed=None, random_seed=3),
self.model.generate(random_seed=3),
)
| 32.32287
| 89
| 0.521435
|
ae51d62dc4e3f156719b55fca4d0c4ec9583397a
| 848
|
py
|
Python
|
django/bitcoin_monitor/pydevd.py
|
chanhosuh/bitcoin-monitor
|
acecfcf020cf2debfdf3a2e8c446007d7412d8e1
|
[
"MIT"
] | 1
|
2020-01-01T15:54:45.000Z
|
2020-01-01T15:54:45.000Z
|
django/bitcoin_monitor/pydevd.py
|
chanhosuh/bitcoin-monitor
|
acecfcf020cf2debfdf3a2e8c446007d7412d8e1
|
[
"MIT"
] | 13
|
2019-02-28T03:24:54.000Z
|
2021-09-22T17:50:00.000Z
|
django/bitcoin_monitor/pydevd.py
|
chanhosuh/bitcoin-monitor
|
acecfcf020cf2debfdf3a2e8c446007d7412d8e1
|
[
"MIT"
] | null | null | null |
import pydevd
def set_trace():
"""
To use Eclipse's Pydev debugging in the Django docker
container:
1. in the docker container:
pip install pydevd
or add it to container's requirements.
2. set this in environment-dev-local.env:
PYTHONBREAKPOINT=bitcoin_monitor.pydevd.set_trace
The reason we don't use pydevd.settrace directly is
that it requires the host argument so Docker knows
the IP to reach out to from the container, but
it's annoying to have to always do:
breakpoint('docker.for.mac.localhost')
An alternative is to use a PyDev editor template,
which is how the above 'pydev' autocomplete works.
3. set a Python breakpoint in your code, start
up the Pydev debug server in Eclipse, debug away!
"""
pydevd.settrace('docker.for.mac.localhost')
| 25.69697
| 57
| 0.696934
|
2ce0cc7a53e74581c87ae0633b90979bbb3d348f
| 1,492
|
py
|
Python
|
datasets/mk_subimgnet_val.py
|
RowanAI/BayesianDefense
|
c4c0be9b258f40130b40d6a6e009c459666f2722
|
[
"MIT"
] | 63
|
2018-10-21T09:52:45.000Z
|
2022-03-11T02:04:17.000Z
|
datasets/mk_subimgnet_val.py
|
RowanAI/BayesianDefense
|
c4c0be9b258f40130b40d6a6e009c459666f2722
|
[
"MIT"
] | 10
|
2018-11-21T13:44:15.000Z
|
2022-03-01T13:24:52.000Z
|
datasets/mk_subimgnet_val.py
|
RowanAI/BayesianDefense
|
c4c0be9b258f40130b40d6a6e009c459666f2722
|
[
"MIT"
] | 16
|
2018-12-25T06:52:21.000Z
|
2021-08-08T05:04:07.000Z
|
import shutil, os
import glob
from shutil import copyfile
import pdb
def read_f():
id_classes = {}
for l in open('./dirname_to_label.txt', 'r'):
name, id = l.split(' ')
id = int(id)
id_classes[id] = name
return id_classes
if __name__ == "__main__":
#source_dir = '/mnt/lustre/wuchongruo/projects/xq/data/prepare/val'
source_dir = '/mnt/lustre/share/images/val'
target_dir = '/mnt/lustre/wuchongruo/projects/xq/data/val'
id_classes = read_f()
#all_files = glob.glob(source_dir+'/*.JPEG')
all_files_id = {}
with open('/mnt/lustre/share/images/meta/val.txt','r') as f:
lines = f.readlines()
for line in lines:
line = line[:-1]
file_name, class_id = line.split(' ')
class_id = int(class_id)
all_files_id[file_name] = class_id
for ind, cur_file_name in enumerate(list(all_files_id.keys())):
cur_file_id = all_files_id[ cur_file_name ]
if cur_file_id >= 151 and cur_file_id < 294:
cur_file_class_name = id_classes[cur_file_id]
class_dir_path = os.path.join(target_dir, cur_file_class_name)
if not os.path.exists(class_dir_path):
os.mkdir(class_dir_path)
source_file_path = os.path.join(source_dir, cur_file_name)
dest_file_path = os.path.join(target_dir, cur_file_class_name, cur_file_name)
copyfile(source_file_path, dest_file_path)
| 27.127273
| 89
| 0.628016
|
ca5105f562396eb5970d49e3eee2f8c4b5247c6f
| 4,217
|
py
|
Python
|
test/test_edf.py
|
johnnycakes79/pyops
|
9eeda939e3f0d65a5dd220b3e439c8d2ba880d98
|
[
"BSD-3-Clause"
] | 2
|
2016-03-23T14:35:16.000Z
|
2016-05-17T04:56:28.000Z
|
test/test_edf.py
|
johnnycakes79/pyops
|
9eeda939e3f0d65a5dd220b3e439c8d2ba880d98
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_edf.py
|
johnnycakes79/pyops
|
9eeda939e3f0d65a5dd220b3e439c8d2ba880d98
|
[
"BSD-3-Clause"
] | null | null | null |
from pyops import EDF
import os
def test_add_none_to_empty_fields():
edf_test = EDF()
test_dict = {'a': ['a'], 'b': [1, 3], 'c': [1]}
output = edf_test._add_none_to_empty_fields(test_dict)
assert output == {'a': ['a', None], 'b': [1, 3], 'c': [1, None]}
test_dict = {'a': [], 'b': [], 'c': []}
output = edf_test._add_none_to_empty_fields(test_dict)
assert output == {'a': [], 'b': [], 'c': []}
test_dict = {'a': [1, 2], 'b': [1, 3], 'c': [1, 'a']}
output = edf_test._add_none_to_empty_fields(test_dict)
assert output == {'a': [1, 2], 'b': [1, 3], 'c': [1, 'a']}
def test_how_many_brackets_following():
edf_test = EDF()
test_line = ['aa', 'bbadsf', '[asdf]', '[sdfs2]', 'asdfas', '[asfsddf]']
output = edf_test._how_many_brackets_following(test_line)
assert output == 0
test_line = ['aa', 'bbadsf', '[asdf]', '[sdfs2]', 'asdfas', '[asfsddf]']
output = edf_test._how_many_brackets_following(test_line[2:])
assert output == 2
test_line = ['aa', 'bbadsf', '[asdf]', '[sdfs2]', 'asdfas', '[asfsddf]']
output = edf_test._how_many_brackets_following(test_line[5:])
assert output == 1
def test_read_variables():
edf_test = EDF()
test_line = "Experiment: MERTIS 'MERTIS'".split()
edf_test._read_variables(test_line)
assert edf_test.experiment == "MERTIS 'MERTIS'"
test_line = "Include: mertis.edf".split()
edf_test._read_variables(test_line)
output = False
for files in edf_test.include_files:
if 'mertis.edf' in files:
output = True
assert output is True
test_line = "Include_file: bela.edf".split()
edf_test._read_variables(test_line)
output = False
output2 = False
for files in edf_test.include_files:
if 'bela.edf' in files:
output = True
if 'mertis.edf' in files:
output2 = True
assert output is True and output2 is True
test_line = "Random_variable: test".split()
edf_test._read_variables(test_line)
output = False
for variable in edf_test.variables:
if "Random_variable" in variable:
if edf_test.variables["Random_variable"] == 'test':
output = True
assert output is True
def test_concatenate_lines():
edf_test = EDF()
test_content = ['#abc\\\n', 'abcd\\ #Comment\n', 'b\\\n', 'aa\n',
'bcd\\\n', 'aa\\\\\n', 'c\n']
output = edf_test._concatenate_lines(test_content)
assert output == ['#abc\\', 'abcd b aa', 'bcd aa\\\\', 'c']
def test_load_edf_file():
this_dir, this_filename = os.path.split(__file__)
parent_dir = os.path.abspath(os.path.join(this_dir, os.pardir))
test_file = os.path.join(parent_dir, "test/data/test.edf")
edf = EDF(test_file)
assert edf.experiment == 'SSMM "MassMemory"'
for key in edf.GLOBAL_PROPERTIES:
if key == 'Global_actions':
assert edf.GLOBAL_PROPERTIES[key] == \
'COPY_DV UPDATE_FTS_TABLE ALLOCATE_PID'
else:
assert edf.GLOBAL_PROPERTIES[key] is None
assert len(edf.DATA_STORES.Table) == 28
assert edf.DATA_STORES.Table.loc[2]['Memory size'] == '1 [Gbits]'
assert len(edf.PIDS.Table) == 6
assert edf.PIDS.Table.loc[3]['Data Store ID'] == '30'
assert len(edf.FTS.Table) == 8
assert edf.FTS.Table.loc[6]['Data Volume'] == '150'
assert len(edf.FOVS.Table) == 2
assert edf.FOVS.Table.loc[1]['FOV_type'] == 'RECTANGULAR'
assert len(edf.MODES.Table) == 5
assert edf.MODES.Table.loc[3]['Nominal_power'] == '0 [Watts]'
assert len(edf.MODULES.Table) == 4
assert edf.MODULES.Table.loc[2]['Module_level'] is None
assert len(edf.MODULES.Module_states_Table) == 10
assert edf.MODULES.Module_states_Table.loc[5]['MS_data_rate_parameter'] ==\
'VIHI_DATA_RATE'
assert len(edf.PARAMETERS.Table) == 11
assert edf.PARAMETERS.Table.loc[7]['Eng_type'] == 'REAL'
assert len(edf.PARAMETERS.Parameter_values_Table) == 6
assert edf.PARAMETERS.Parameter_values_Table.loc[3]['Parameter_value'] == \
'FTS_ENABLE_FLAG - 1 ENABLE'
assert len(edf.ACTIONS.Table) == 3
assert edf.ACTIONS.Table.loc[1]['Duration'] is None
| 32.689922
| 79
| 0.624378
|
b2d48d2e0312f14d5a1e321e3db31e8fe69a859f
| 681
|
py
|
Python
|
src/1616_split_two_strings_to_make_palindrome.py
|
hariharanragothaman/leetcode-solutions
|
44e759f80d3c9df382fdf8d694d6378881e3649d
|
[
"Apache-2.0"
] | 2
|
2021-04-21T07:59:42.000Z
|
2021-06-17T17:14:26.000Z
|
src/1616_split_two_strings_to_make_palindrome.py
|
hariharanragothaman/pyrevise-leetcode
|
44e759f80d3c9df382fdf8d694d6378881e3649d
|
[
"Apache-2.0"
] | null | null | null |
src/1616_split_two_strings_to_make_palindrome.py
|
hariharanragothaman/pyrevise-leetcode
|
44e759f80d3c9df382fdf8d694d6378881e3649d
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def checkPalindromeFormation(self, a: str, b: str) -> bool:
"""
As long as a prefix == rev(bsuffix) keep forming the split
do this (a, b) and (b, a)
if any of them are palindromes, then it's true
"""
i = 0
j = len(a) - 1
while i < j and a[i] == b[j]:
i = i + 1
j = j - 1
s1 = a[i : j + 1]
s2 = b[i : j + 1]
print(s1, s2)
i, j = 0, len(a) - 1
while i < j and b[i] == a[j]:
i, j = i + 1, j - 1
s3, s4 = a[i : j + 1], b[i : j + 1]
print(s3, s4)
return any(s == s[::-1] for s in (s1, s2, s3, s4))
| 26.192308
| 66
| 0.400881
|
f79b568345f11b3af3b82d3b719d52d13aa69775
| 1,778
|
py
|
Python
|
sktime/classification/all/__init__.py
|
alwinw/sktime
|
a6f17bd586df6bbc8e6c783f08eda4c30d2353f9
|
[
"BSD-3-Clause"
] | 1
|
2020-11-05T18:47:32.000Z
|
2020-11-05T18:47:32.000Z
|
sktime/classification/all/__init__.py
|
alwinw/sktime
|
a6f17bd586df6bbc8e6c783f08eda4c30d2353f9
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/classification/all/__init__.py
|
alwinw/sktime
|
a6f17bd586df6bbc8e6c783f08eda4c30d2353f9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["Markus Löning"]
__all__ = [
"ShapeletTransformClassifier",
"MrSEQLClassifier",
"BOSSEnsemble",
"BOSSIndividual",
"KNeighborsTimeSeriesClassifier",
"TemporalDictionaryEnsemble",
"ProximityStump",
"ProximityTree",
"ProximityForest",
"TimeSeriesForest",
"TimeSeriesForestClassifier",
"RandomIntervalSpectralForest",
"ColumnEnsembleClassifier",
"ElasticEnsemble",
"pd",
"np",
"load_gunpoint",
"load_osuleaf",
"load_basic_motions",
"load_arrow_head",
]
import numpy as np
import pandas as pd
from sktime.classification.compose import ColumnEnsembleClassifier
from sktime.classification.compose import TimeSeriesForestClassifier
from sktime.classification.dictionary_based import BOSSEnsemble
from sktime.classification.dictionary_based import BOSSIndividual
from sktime.classification.dictionary_based import TemporalDictionaryEnsemble
from sktime.classification.distance_based import ElasticEnsemble
from sktime.classification.distance_based import KNeighborsTimeSeriesClassifier
from sktime.classification.distance_based import ProximityForest
from sktime.classification.distance_based import ProximityStump
from sktime.classification.distance_based import ProximityTree
from sktime.classification.frequency_based import RandomIntervalSpectralForest
from sktime.classification.interval_based import TimeSeriesForest
from sktime.classification.shapelet_based import MrSEQLClassifier
from sktime.classification.shapelet_based import ShapeletTransformClassifier
from sktime.datasets import load_arrow_head
from sktime.datasets import load_basic_motions
from sktime.datasets import load_gunpoint
from sktime.datasets import load_osuleaf
| 36.285714
| 79
| 0.823397
|
b0721b36819da2a0f9db56ef58b00a3a9ce43b9e
| 4,838
|
py
|
Python
|
plyer/platforms/macosx/wifi.py
|
seanodonnell/plyer
|
9b9c523cbef3e7ad04e231bbabad36652d88b45e
|
[
"MIT"
] | null | null | null |
plyer/platforms/macosx/wifi.py
|
seanodonnell/plyer
|
9b9c523cbef3e7ad04e231bbabad36652d88b45e
|
[
"MIT"
] | null | null | null |
plyer/platforms/macosx/wifi.py
|
seanodonnell/plyer
|
9b9c523cbef3e7ad04e231bbabad36652d88b45e
|
[
"MIT"
] | null | null | null |
from plyer.facades import Wifi
from pyobjus.dylib_manager import load_framework, INCLUDE
from pyobjus import autoclass
load_framework(INCLUDE.Foundation)
load_framework(INCLUDE.CoreWLAN)
CWInterface = autoclass('CWInterface')
CWNetwork = autoclass('CWNetwork')
CWWiFiClient = autoclass('CWWiFiClient')
NSArray = autoclass('NSArray')
NSDictionary = autoclass('NSDictionary')
NSString = autoclass('NSString')
class OSXWifi(Wifi):
names = {}
def _is_enabled(self):
'''
Returns `True` if the Wifi is enabled else returns `False`.
'''
return CWWiFiClient.sharedWiFiClient().interface().powerOn()
def _get_network_info(self, name):
'''
Returns all the network information.
'''
accessNetworkType = self.names[name].accessNetworkType
aggregateRSSI = self.names[name].aggregateRSSI
beaconInterval = self.names[name].beaconInterval
bssid = self.names[name].bssid.UTF8String()
countryCode = self.names[name].countryCode
hasInternet = self.names[name].hasInternet
hasInterworkingIE = self.names[name].hasInterworkingIE
hessid = self.names[name].hessid
ibss = self.names[name].ibss
isAdditionalStepRequiredForAccess = \
self.names[name].isAdditionalStepRequiredForAccess
isCarPlayNetwork = self.names[name].isCarPlayNetwork
isEmergencyServicesReachable = \
self.names[name].isEmergencyServicesReachable
isPasspoint = self.names[name].isPasspoint
isPersonalHotspot = self.names[name].isPersonalHotspot
isUnauthenticatedEmergencyServiceAccessible = \
self.names[name].isUnauthenticatedEmergencyServiceAccessible
noiseMeasurement = self.names[name].noiseMeasurement
physicalLayerMode = self.names[name].physicalLayerMode
rssiValue = self.names[name].rssiValue
securityType = self.names[name].securityType
ssid = self.names[name].ssid.UTF8String()
supportsEasyConnect = self.names[name].supportsEasyConnect
supportsWPS = self.names[name].supportsWPS
venueGroup = self.names[name].venueGroup
venueType = self.names[name].venueType
return {'accessNetworkType': accessNetworkType,
'aggregateRSSI': aggregateRSSI,
'beaconInterval': beaconInterval,
'bssid': bssid,
'countryCode': countryCode,
'hasInternet': hasInternet,
'hasInternet': hasInternet,
'hasInterworkingIE': hasInterworkingIE,
'hessid': hessid,
'ibss': ibss,
'isAdditionalStepRequiredForAccess':
isAdditionalStepRequiredForAccess,
'isCarPlayNetwork': isCarPlayNetwork,
'isEmergencyServicesReachable': isEmergencyServicesReachable,
'isPasspoint': isPasspoint,
'isPersonalHotspot': isPersonalHotspot,
'isUnauthenticatedEmergencyServiceAccessible':
isUnauthenticatedEmergencyServiceAccessible,
'noiseMeasurement': noiseMeasurement,
'physicalLayerMode': physicalLayerMode,
'rssiValue': rssiValue,
'securityType': securityType,
'ssid': ssid,
'supportsEasyConnect': supportsEasyConnect,
'supportsWPS': supportsWPS,
'venueGroup': venueGroup,
'venueType': venueType}
def _start_scanning(self):
'''
Starts scanning for available Wi-Fi networks.
'''
if self._is_enabled():
self.names = {}
c = CWInterface.interface()
scan = c.scanForNetworksWithName_error_(None, None)
cnt = scan.allObjects().count()
for i in range(cnt):
self.names[
scan.allObjects().objectAtIndex_(i).ssid.UTF8String()
] = scan.allObjects().objectAtIndex_(i)
else:
raise Exception("Wifi not enabled.")
def _get_available_wifi(self):
'''
Returns the name of available networks.
'''
return self.names.keys()
def _connect(self, network, parameters):
'''
Expects 2 parameters:
- name/ssid of the network.
- password: dict type
'''
password = parameters['password']
network_object = self.names[network]
CWInterface.interface().associateToNetwork_password_error_(
network_object,
password,
None)
return
def _disconnect(self):
'''
Disconnect from network.
'''
CWInterface.interface().disassociate()
return
def instance():
return OSXWifi()
| 36.651515
| 77
| 0.621331
|
2e615eb0f76f8a6c9d7be7348534790d04b892c1
| 1,552
|
py
|
Python
|
stream/readPubsubMessage/readPubsub.py
|
sanjeevkanabargi/python
|
25f43cf20f92104951d5d1d3b2af535450085e72
|
[
"Apache-2.0"
] | null | null | null |
stream/readPubsubMessage/readPubsub.py
|
sanjeevkanabargi/python
|
25f43cf20f92104951d5d1d3b2af535450085e72
|
[
"Apache-2.0"
] | null | null | null |
stream/readPubsubMessage/readPubsub.py
|
sanjeevkanabargi/python
|
25f43cf20f92104951d5d1d3b2af535450085e72
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
"""Receives messages from a pull subscription."""
import datetime, time
from google.cloud import pubsub_v1
parser = argparse.ArgumentParser()
parser.add_argument('-p','--projectID', help="Project ID fo Gcloud <ba-qe-da7e1252>", default='ba-qe-da7e1252')
parser.add_argument('-s','--subscriber', help="Subscriber of a pubsub topic", required=True)
parser.add_argument('-k','--serviceKey', help="GCP service account key, json file path", required=True)
args = parser.parse_args()
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(args.serviceKey)
project_id = args.projectID
subscription_name = args.subscriber
print('Pulling data form : '+str(project_id)+"/"+str(subscription_name))
subscriber = pubsub_v1.SubscriberClient()
# The `subscription_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/subscriptions/{subscription_name}`
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('======================================\nReceived msge:\t{} \nReceived time:\t{}\n'.format(message.data.decode("utf-8"), datetime.datetime.now()))
#message.ack()
subscriber.subscribe(subscription_path, callback=callback)
# The subscriber is non-blocking. We must keep the main thread from
# exiting to allow it to process messages asynchronously in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
i=1
# [END pubsub_subscriber_async_pull]
# [END pubsub_quickstart_subscriber]
| 35.272727
| 156
| 0.749356
|
4abec4a4d0566be889c15134e35e5d63796c0581
| 207
|
py
|
Python
|
strategy.py
|
nitishsahni/TexasHoldem
|
4b5e9da0113a871141339fe8e731b5110c5cb9a3
|
[
"MIT"
] | null | null | null |
strategy.py
|
nitishsahni/TexasHoldem
|
4b5e9da0113a871141339fe8e731b5110c5cb9a3
|
[
"MIT"
] | null | null | null |
strategy.py
|
nitishsahni/TexasHoldem
|
4b5e9da0113a871141339fe8e731b5110c5cb9a3
|
[
"MIT"
] | null | null | null |
class Strategy:
"""Template strategy"""
def __init__(self):
pass
def think(self, player):
pass
class AlwaysFold(Strategy):
def think(self, player):
return "fold"
| 13.8
| 28
| 0.584541
|
bed66f337ab42f9fa828496032524d4f3c58e8b7
| 846
|
py
|
Python
|
telegram_ecommerce/tamplates/messages.py
|
Anonylions/telegram_ecommerce
|
f5382886bbebf607c735e2f451774c56df8d6011
|
[
"MIT"
] | 10
|
2020-11-20T20:55:52.000Z
|
2022-02-10T20:25:45.000Z
|
telegram_ecommerce/tamplates/messages.py
|
Anonylions/telegram_ecommerce
|
f5382886bbebf607c735e2f451774c56df8d6011
|
[
"MIT"
] | 1
|
2022-02-16T10:28:18.000Z
|
2022-02-16T10:35:31.000Z
|
telegram_ecommerce/tamplates/messages.py
|
Anonylions/telegram_ecommerce
|
f5382886bbebf607c735e2f451774c56df8d6011
|
[
"MIT"
] | 8
|
2021-05-01T01:13:09.000Z
|
2022-03-13T14:00:01.000Z
|
from ..language import get_text
from .buttons import (
boolean_question,
rating_tamplate)
def reply(update, context, text):
update.message.reply_text(text)
def ask_a_boolean_question(
update,
context,
pattern_identifier="",
question=None):
if question:
text = question
else:
text = get_text("ask_if_its_all_ok", context)
markup = boolean_question(pattern_identifier, context)
update.message.reply_text(text, reply_markup=markup)
def send_a_rating_message(update, context, pattern_identifier=""):
text = get_text("ask_for_the_rating", context)
markup = rating_tamplate(pattern_identifier, context)
if update.message:
update.message.reply_text(text, reply_markup=markup)
else:
update.callback_query.edit_message_text(text, reply_markup = markup)
| 26.4375
| 76
| 0.719858
|
42fa558a76bc8cfaf430a7208f4889ce5e8cf993
| 605
|
py
|
Python
|
tests/amp/test_amp.py
|
lrodrin/transceivers
|
62b8e44c0a459615a0464a9567e28e195c4dabf1
|
[
"MIT"
] | null | null | null |
tests/amp/test_amp.py
|
lrodrin/transceivers
|
62b8e44c0a459615a0464a9567e28e195c4dabf1
|
[
"MIT"
] | null | null | null |
tests/amp/test_amp.py
|
lrodrin/transceivers
|
62b8e44c0a459615a0464a9567e28e195c4dabf1
|
[
"MIT"
] | null | null | null |
import logging
import time
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
logging.basicConfig(level=logging.DEBUG)
from lib.amp.amp import Amplifier
if __name__ == '__main__':
ip_oa1 = '10.1.1.16'
ip_oa2 = '10.1.1.15'
addr = '3'
mode_oa1 = "APC"
mode_oa2 = "APC"
power_oa1 = 1.8
power_oa2 = 1.8
start_time = time.time()
# Amplifier.configuration(ip_oa1, addr, mode_oa1, power_oa1)
Amplifier.configuration(ip_oa2, addr, mode_oa2, power_oa2)
print("--- %s seconds ---" % (time.time() - start_time))
| 26.304348
| 81
| 0.67438
|
29faa414806d22c84693b30631ad1bafe140c642
| 4,775
|
py
|
Python
|
tests/test_qoo.py
|
cvoege/qoo
|
3a94ce1c59a50218a0981b44c298192706bf18f1
|
[
"MIT"
] | 2
|
2019-07-22T18:22:38.000Z
|
2019-10-03T23:35:42.000Z
|
tests/test_qoo.py
|
cvoege/qoo
|
3a94ce1c59a50218a0981b44c298192706bf18f1
|
[
"MIT"
] | 2
|
2019-10-25T13:04:20.000Z
|
2021-04-30T20:55:08.000Z
|
tests/test_qoo.py
|
cvoege/qoo
|
3a94ce1c59a50218a0981b44c298192706bf18f1
|
[
"MIT"
] | 1
|
2019-10-25T01:21:06.000Z
|
2019-10-25T01:21:06.000Z
|
"""
@author jacobi petrucciani
@desc pytest the qoo functionality
"""
import json
import os
import pytest
import qoo
import sys
import time
from moto import mock_sqs
def dbg(text) -> None:
"""debug printer for tests"""
if isinstance(text, dict):
text = json.dumps(text, sort_keys=True, indent=2)
caller = sys._getframe(1)
print("")
print("----- {} line {} ------".format(caller.f_code.co_name, caller.f_lineno))
print(text)
print("-----")
print("")
def test_login():
"""
Ensure that login sets the correct environment variables.
The ``login`` fixture sets these automatically.
"""
assert os.environ["AWS_ACCESS_KEY_ID"] == "access_key"
assert os.environ["AWS_SECRET_ACCESS_KEY"] == "secret_key"
assert os.environ["AWS_DEFAULT_REGION"] == "us-east-1"
@mock_sqs
def test_queues_can_be_created():
"""test that we can create a queue"""
queue = qoo.create("test_queue")
assert queue.name == "test_queue"
assert queue.created_at < time.time()
assert queue.maximum_message_size == 262144
assert not queue.fifo
@mock_sqs
def test_fifo_queues_can_be_created():
"""test that we can create a queue"""
queue = qoo.create("test_queue.fifo", fifo=True)
assert queue.name == "test_queue.fifo"
assert queue.created_at < time.time()
assert queue.maximum_message_size == 262144
assert queue.fifo
@mock_sqs
def test_queues_are_not_auto_created():
"""tests that we are not creating queues on init"""
sqs = qoo._client()
with pytest.raises(sqs.exceptions.QueueDoesNotExist):
qoo.get("this_isnt_a_queue")
def test_can_send_job(queue):
"""test that we can send a job into the queue"""
queue.send(info="test_job")
assert len(queue) > 0
def test_can_send_batch_jobs(queue):
"""test that we can send many jobs into the queue"""
responses = queue.send_batch(
[
{"job": 0, "message": "test 0"},
{"job": 1, "message": "test 1"},
{"job": 2, "message": "test 2"},
{"job": 3, "message": "test 3"},
{"job": 4, "message": "test 4"},
{"job": 5, "message": "test 5"},
{"job": 6, "message": "test 6"},
{"job": 7, "message": "test 7"},
{"job": 8, "message": "test 8"},
{"job": 9, "message": "test 9"},
{"job": 10, "message": "test 10"},
{"job": 11, "message": "test 11"},
{"job": 12, "message": "test 12"},
{"job": 13, "message": "test 13"},
{"job": 14, "message": "test 14"},
{"job": 15, "message": "test 15"},
{"job": 16, "message": "test 16"},
{"job": 17, "message": "test 17"},
{"job": 18, "message": "test 18"},
{"job": 19, "message": "test 19"},
{"job": 20, "message": "test 20"},
]
)
assert len(queue) == 21
assert len(responses["Successful"]) == 21
# send as list of strings
responses = queue.send_batch(
[
"this",
"is",
"an",
"example",
"of",
"sending",
"batch",
"messages",
"as",
"a",
"list",
"of",
"strings",
]
)
assert len(queue) == 34
assert len(responses["Successful"]) == 13
jobs = queue.receive_jobs(max_messages=10)
assert len(jobs) == 10
assert "message" in jobs[0]
assert jobs[0].job == 0
jobs = queue.receive_jobs(max_messages=10)
assert len(jobs) == 10
jobs = queue.receive_jobs(max_messages=10)
assert len(jobs) == 10
jobs = queue.receive_jobs(max_messages=10)
assert len(jobs) == 4
def test_can_send_and_receive_job(queue):
"""test that we can send a job into the queue, and pull it back out"""
queue.send(info="test_job")
assert len(queue) > 0
job = queue.receive()
assert job
assert job.md5_matches
assert job.approximate_receive_count == 1
assert job.elapsed > 0.0
def test_can_purge_queue(queue_with_job):
"""test that we can purge a queue"""
queue_with_job.purge()
assert len(queue_with_job) == 0
def test_can_delete_job(queue_with_job):
"""test that we can delete a job from the queue"""
job = queue_with_job.receive()
del job
next_job = queue_with_job.receive(wait_time=1)
assert not next_job
def test_pull_two_jobs(queue_with_jobs):
"""test that we can pull 2 jobs in one call"""
assert len(queue_with_jobs) == 10
job_0 = queue_with_jobs.receive()
job_1 = queue_with_jobs.receive()
assert job_0.test
assert job_1.test
assert job_0.test == "test message 0"
assert job_1.test == "test message 1"
| 28.592814
| 83
| 0.584084
|
0efb633804bc912338e9473807697d8d44af2769
| 1,996
|
py
|
Python
|
samsung/13460.py
|
smartx-jshan/Coding_Practice
|
bc7d485e7992031e55df62483818b721ad7d1d4f
|
[
"Apache-2.0"
] | null | null | null |
samsung/13460.py
|
smartx-jshan/Coding_Practice
|
bc7d485e7992031e55df62483818b721ad7d1d4f
|
[
"Apache-2.0"
] | null | null | null |
samsung/13460.py
|
smartx-jshan/Coding_Practice
|
bc7d485e7992031e55df62483818b721ad7d1d4f
|
[
"Apache-2.0"
] | null | null | null |
import collections
from sys import stdin
n, m = map(int, stdin.readline().split())
graph = [list(stdin.readline()) for _ in range(n)]
red = [0, 0]
blue = [0, 0]
# red 저장
# blue 저장
# red와 blue 좌료를 획득 후, 그 자리를 이동할 수 있는 . 으로 변경
for i in range (n):
for j in range(m):
if graph[i][j] == 'R':
graph[i][j] = '.'
# 레드 저장
red = [i, j]
elif graph[i][j] == 'B':
graph[i][j] = '.'
# 블루 저장
blue = [i,j]
print (red)
print (blue)
def movement(x, y, dx, dy):
move = 0
while graph[x+dx][y+dy] != '#':
if graph[x+dx][y+dy] == 'O':
return 0, 0, 0
x += dx
y += dy
move += 1
return x, y, move
def bfs():
visit = {}
queue = collections.deque([red + blue])
print (queue)
visit[red[0], red[1], blue[0], blue[1]] = 0
while queue:
rx, ry, bx, by = queue.popleft()
for dx, dy in (-1,0), (1,0), (0,-1), (0, 1):
nrx, nry, rmove = movement(rx, ry, dx, dy)
nbx, nby, bmove = movement(bx, by, dx, dy)
print (dx, dy, "인 경우 ", "nred:", nrx, nry, rmove, "Nblue:", nbx,nby,bmove)
# 파란공이 탈출한 경우
if nbx == 0 and nby == 0:
continue
# 파란공은 탈출 안하고 빨간공이 탈출한경우
elif nrx == 0 and nry == 0:
print (visit[rx,ry,bx,by] + 1)
return
# 두 공이 같은 방향으로 이동된 경우, 이동을 많이 한놈 이전으로 돌려놔야 함
elif nrx == nbx and nry == nby:
if rmove > bmove:
nrx -= dx
nry -= dy
else:
nbx -= dx
nby -= dy
# 방문한 곳이 아닌 경우
if (nrx, nry, nbx, nby) not in visit:
visit[nrx, nry, nbx, nby] = visit[rx, ry, bx, by] +1
queue.append([nrx, nry, nbx, nby])
if not queue or visit[rx, ry, bx, by] >= 10:
print (-1)
return
bfs()
| 23.209302
| 86
| 0.422846
|
f6a7bf1524d5dd4165862a54664f3b669f63f4a8
| 623
|
py
|
Python
|
config.py
|
EDL-Group/klabotermann
|
7ced510348b4ee2c2e5bfb5186d7f6b387f8843c
|
[
"MIT"
] | null | null | null |
config.py
|
EDL-Group/klabotermann
|
7ced510348b4ee2c2e5bfb5186d7f6b387f8843c
|
[
"MIT"
] | null | null | null |
config.py
|
EDL-Group/klabotermann
|
7ced510348b4ee2c2e5bfb5186d7f6b387f8843c
|
[
"MIT"
] | 1
|
2020-11-12T15:21:58.000Z
|
2020-11-12T15:21:58.000Z
|
# -*- Mode: Python; tab-width: 8; python-indent-offset: 4 -*-
# This Source Code Form is subject to the terms of the MIT License.
# If a copy of the ML was not distributed with this
# file, You can obtain one at https://opensource.org/licenses/MIT
# author: JackRed <jackred@tuta.io>
import os
class DefaultConfig:
""" Bot Configuration """
HOST = "0.0.0.0"
PORT = 3978
APP_ID = os.environ.get("MicrosoftAppId", "")
APP_PASSWORD = os.environ.get("MicrosoftAppPassword", "")
PREFIX = "!"
class DBConfig:
"""DB Configuration"""
port = 27017
host = "ng_mongo"
db = "klabotermann"
| 24.92
| 67
| 0.654896
|
ca9532ac6a2d35da14075131c54e98d6b2815c26
| 2,578
|
py
|
Python
|
DoubleCommandFactory.py
|
invveritas/PYIEC60870-104
|
68fd8481f55e907209560f9797d7e068f2598284
|
[
"MIT"
] | 5
|
2016-05-25T08:27:01.000Z
|
2019-05-08T19:20:15.000Z
|
DoubleCommandFactory.py
|
invveritas/PYIEC60870-104
|
68fd8481f55e907209560f9797d7e068f2598284
|
[
"MIT"
] | null | null | null |
DoubleCommandFactory.py
|
invveritas/PYIEC60870-104
|
68fd8481f55e907209560f9797d7e068f2598284
|
[
"MIT"
] | 13
|
2016-11-13T15:39:30.000Z
|
2022-01-23T07:25:43.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 08 10:41:44 2015
clsIECDoubleCommandFactory
Class that creates double command telegrams (only On/Off)
@author: Martin Baldinger / martin.baldinger@gmail.com
"""
from CommandFactory import CommandFactory
from myfunctions import int_to_hex_bytes
class CDoubleCommandFactory(CommandFactory):
"""
Extends the CommandFactory class. It generates the double command telegrams that
are needed to switch objects in the FPS controller ON or OFF.
"""
def __init__(self, ASDU, IOA, CONTROL_ACTION):
"""
Builds the telegram as a list of bytes. CONTROL_ACTION specifies whether the target
object given by the IOA should be switched \"ON\" or \"OFF\".
"""
CommandFactory.__init__(self)
# 1) length of APDU
self.command.append(10 + 4)
# 2-5) Control Fields
# leave them all zero for the moment
# we need to care about them later, when trying to check whether
# telegrams arrived and were processed or not
self.command.append(self.ZERO_BYTE)
self.command.append(self.ZERO_BYTE)
self.command.append(self.ZERO_BYTE)
self.command.append(self.ZERO_BYTE)
# 6) Type Identification
self.command.append(self.TYPE_C_DC_NA_1)
# 7) SQ / Number of Objects
self.command.append(1)
# 8) T / P/N / Cause of Transmission
self.command.append(self.COT_ACTIVATION)
# COT is always "activation", even if switching off!!!
# 9) Originator Address
# this is always zero in our case
self.command.append(self.ZERO_BYTE)
# 10-11) ASDU address
asdu_bytes = int_to_hex_bytes(ASDU)
# - low octet
self.command.append(asdu_bytes[1])
# - high octet
self.command.append(asdu_bytes[0])
# 12-n) IOAs + respective Object informations
ioa_bytes = int_to_hex_bytes(IOA)
# IOA - low octet
self.command.append(ioa_bytes[1])
# IOA - high octet
self.command.append(ioa_bytes[0])
# IOA - special - always 0 in our case
self.command.append(self.ZERO_BYTE)
# Object Information - always one byte in our case
if CONTROL_ACTION == "ON":
self.command.append(self.EXECUTE_UNSPECIFIED_ON)
else:
self.command.append(self.EXECUTE_UNSPECIFIED_OFF)
| 34.837838
| 92
| 0.607448
|
6ad772eeb48e3179bfcec3aaf20e4c2036c3a5d5
| 36,495
|
py
|
Python
|
protocol.py
|
Cloud-Lakitu/versacoin-opennodes
|
21745217c47e397ecb58a4a020c9111c71c267ee
|
[
"MIT"
] | 14
|
2019-03-05T15:02:30.000Z
|
2022-01-06T09:33:18.000Z
|
protocol.py
|
Cloud-Lakitu/versacoin-opennodes
|
21745217c47e397ecb58a4a020c9111c71c267ee
|
[
"MIT"
] | 2
|
2020-05-19T14:25:03.000Z
|
2021-04-21T17:31:24.000Z
|
protocol.py
|
Cloud-Lakitu/versacoin-opennodes
|
21745217c47e397ecb58a4a020c9111c71c267ee
|
[
"MIT"
] | 12
|
2019-03-05T15:02:33.000Z
|
2022-01-07T02:10:36.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# protocol.py - Bitcoin protocol access for Bitnodes.
#
# Copyright (c) Addy Yeow Chin Heng <ayeowch@gmail.com>
#
# Modified by open-nodes project for python3 compatibility
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Bitcoin protocol access for Bitnodes.
Reference: https://en.bitcoin.it/wiki/Protocol_specification
-------------------------------------------------------------------------------
PACKET STRUCTURE FOR BITCOIN PROTOCOL
protocol version >= 70001
-------------------------------------------------------------------------------
[---MESSAGE---]
[ 4] MAGIC_NUMBER (\xF9\xBE\xB4\xD9) uint32_t
[12] COMMAND char[12]
[ 4] LENGTH <I (len(payload)) uint32_t
[ 4] CHECKSUM (sha256(sha256(payload))[:4]) uint32_t
[..] PAYLOAD see below
[---VERSION_PAYLOAD---]
[ 4] VERSION <i int32_t
[ 8] SERVICES <Q uint64_t
[ 8] TIMESTAMP <q int64_t
[26] ADDR_RECV
[ 8] SERVICES <Q uint64_t
[16] IP_ADDR
[12] IPV6 (\x00 * 10 + \xFF * 2) char[12]
[ 4] IPV4 char[4]
[ 2] PORT >H uint16_t
[26] ADDR_FROM
[ 8] SERVICES <Q uint64_t
[16] IP_ADDR
[12] IPV6 (\x00 * 10 + \xFF * 2) char[12]
[ 4] IPV4 char[4]
[ 2] PORT >H uint16_t
[ 8] NONCE <Q (random.getrandbits(64)) uint64_t
[..] USER_AGENT variable string
[ 4] HEIGHT <i int32_t
[ 1] RELAY <? (since version >= 70001) bool
[---ADDR_PAYLOAD---]
[..] COUNT variable integer
[..] ADDR_LIST multiple of COUNT (max 1000)
[ 4] TIMESTAMP <I uint32_t
[ 8] SERVICES <Q uint64_t
[16] IP_ADDR
[12] IPV6 (\x00 * 10 + \xFF * 2) char[12]
[ 4] IPV4 char[4]
[ 2] PORT >H uint16_t
[---PING_PAYLOAD---]
[ 8] NONCE <Q (random.getrandbits(64)) uint64_t
[---PONG_PAYLOAD---]
[ 8] NONCE <Q (nonce from ping) uint64_t
[---INV_PAYLOAD---]
[..] COUNT variable integer
[..] INVENTORY multiple of COUNT (max 50000)
[ 4] TYPE <I (0=error, 1=tx, 2=block) uint32_t
[32] HASH char[32]
[---TX_PAYLOAD---]
[ 4] VERSION <I uint32_t
[..] TX_IN_COUNT variable integer
[..] TX_IN multiple of TX_IN_COUNT
[32] PREV_OUT_HASH char[32]
[ 4] PREV_OUT_INDEX <I (zero-based) uint32_t
[..] SCRIPT_LENGTH variable integer
[..] SCRIPT variable string
[ 4] SEQUENCE <I uint32_t
[..] TX_OUT_COUNT variable integer
[..] TX_OUT multiple of TX_OUT_COUNT
[ 8] VALUE <q int64_t
[..] SCRIPT_LENGTH variable integer
[..] SCRIPT variable string
[ 4] LOCK_TIME <I uint32_t
[---BLOCK_PAYLOAD---]
[ 4] VERSION <I uint32_t
[32] PREV_BLOCK_HASH char[32]
[32] MERKLE_ROOT char[32]
[ 4] TIMESTAMP <I uint32_t
[ 4] BITS <I uint32_t
[ 4] NONCE <I uint32_t
[..] TX_COUNT variable integer
[..] TX multiple of TX_COUNT
[..] TX see TX_PAYLOAD
[---GETBLOCKS_PAYLOAD---]
[ 4] VERSION <I uint32_t
[..] COUNT variable integer
[..] BLOCK_HASHES multiple of COUNT
[32] BLOCK_HASH char[32]
[32] LAST_BLOCK_HASH char[32]
[---GETHEADERS_PAYLOAD---]
[ 4] VERSION <I uint32_t
[..] COUNT variable integer
[..] BLOCK_HASHES multiple of COUNT
[32] BLOCK_HASH char[32]
[32] LAST_BLOCK_HASH char[32]
[---HEADERS_PAYLOAD---]
[..] COUNT variable integer (max 2000)
[..] HEADERS multiple of COUNT
[ 4] VERSION <I uint32_t
[32] PREV_BLOCK_HASH char[32]
[32] MERKLE_ROOT char[32]
[ 4] TIMESTAMP <I uint32_t
[ 4] BITS <I uint32_t
[ 4] NONCE <I uint32_t
[..] TX_COUNT variable integer (always 0)
-------------------------------------------------------------------------------
"""
import hashlib
import random
import socket
import socks
import struct
import sys
import time
from base64 import b32decode, b32encode
from binascii import hexlify, unhexlify
from collections import deque
from io import SEEK_CUR, BytesIO
from operator import itemgetter
# MAGIC_NUMBER = "\xF9\xBE\xB4\xD9"
# PORT = 8333
# MIN_PROTOCOL_VERSION = 70001
# PROTOCOL_VERSION = 70015
# FROM_SERVICES = 0
# TO_SERVICES = 1 # NODE_NETWORK
# USER_AGENT = "/bitnodes.earn.com:0.1/"
# HEIGHT = 478000
# RELAY = 0 # set to 1 to receive all txs
import logging
SOCKET_BUFSIZE = 8192
SOCKET_TIMEOUT = 30
HEADER_LEN = 24
ONION_PREFIX = b"\xFD\x87\xD8\x7E\xEB\x43" # ipv6 prefix for .onion address
class ProtocolError(Exception):
pass
class ConnectionError(Exception):
pass
class HeaderTooShortError(ProtocolError):
pass
class InvalidMagicNumberError(ProtocolError):
pass
class PayloadTooShortError(ProtocolError):
pass
class InvalidPayloadChecksum(ProtocolError):
pass
class IncompatibleClientError(ProtocolError):
pass
class ReadError(ProtocolError):
pass
class ProxyRequired(ConnectionError):
pass
class RemoteHostClosedConnection(ConnectionError):
pass
def sha256(data):
return hashlib.sha256(data).digest()
def unpack(fmt, string):
# Wraps problematic struct.unpack() in a try statement
try:
return struct.unpack(fmt, string)[0]
except struct.error as err:
raise ReadError(err)
def create_connection(address, timeout=SOCKET_TIMEOUT, source_address=None,
proxy=None):
if address[0].endswith(".onion") and proxy is None:
raise ProxyRequired(
"tor proxy is required to connect to .onion address")
if proxy:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, proxy[0], proxy[1])
sock = socks.socksocket()
sock.settimeout(timeout)
try:
sock.connect(address)
except socks.ProxyError as err:
raise ConnectionError(err)
return sock
if ":" in address[0] and source_address and ":" not in source_address[0]:
source_address = None
return socket.create_connection(address, timeout=timeout,
source_address=source_address)
class Serializer(object):
def __init__(self, **conf):
self.magic_number = conf.get('magic_number')
self.protocol_version = conf.get('protocol_version')
self.to_services = conf.get('to_services')
self.from_services = conf.get('from_services')
self.user_agent = conf.get('user_agent')
self.height = conf.get('height')
self.min_protocol_version = conf.get("min_protocol_version")
if self.height is None:
self.height = 0
self.relay = conf.get('relay')
# This is set prior to throwing PayloadTooShortError exception to
# allow caller to fetch more data over the network.
self.required_len = 0
def serialize_msg(self, **kwargs):
command = kwargs['command']
msg = [
self.magic_number,
command + b"\x00" * (12 - len(command)),
]
payload = b""
if command == b"version":
to_addr = (self.to_services,) + kwargs['to_addr']
from_addr = (self.from_services,) + kwargs['from_addr']
payload = self.serialize_version_payload(to_addr, from_addr)
elif command == b"ping" or command == b"pong":
nonce = kwargs['nonce']
payload = self.serialize_ping_payload(nonce)
elif command == b"addr":
addr_list = kwargs['addr_list']
payload = self.serialize_addr_payload(addr_list)
elif command == b"inv" or command == b"getdata":
inventory = kwargs['inventory']
payload = self.serialize_inv_payload(inventory)
elif command == b"getblocks" or command == b"getheaders":
block_hashes = kwargs['block_hashes']
last_block_hash = kwargs['last_block_hash']
payload = self.serialize_getblocks_payload(block_hashes,
last_block_hash)
elif command == b"headers":
headers = kwargs['headers']
payload = self.serialize_block_headers_payload(headers)
msg.extend([
struct.pack("<I", len(payload)),
sha256(sha256(payload))[:4],
payload,
])
return b''.join(msg)
def deserialize_msg(self, data):
msg = {}
data_len = len(data)
if data_len < HEADER_LEN:
raise HeaderTooShortError("got {} of {} bytes".format(
data_len, HEADER_LEN))
data = BytesIO(data)
header = data.read(HEADER_LEN)
msg.update(self.deserialize_header(header))
if (data_len - HEADER_LEN) < msg['length']:
self.required_len = HEADER_LEN + msg['length']
raise PayloadTooShortError("got {} of {} bytes".format(
data_len, HEADER_LEN + msg['length']))
payload = data.read(msg['length'])
computed_checksum = sha256(sha256(payload))[:4]
if computed_checksum != msg['checksum']:
raise InvalidPayloadChecksum("{} != {}".format(
hexlify(computed_checksum), hexlify(msg['checksum'])))
if msg['command'] == b"version":
msg.update(self.deserialize_version_payload(payload))
elif msg['command'] == b"ping" or msg['command'] == b"pong":
msg.update(self.deserialize_ping_payload(payload))
elif msg['command'] == b"addr":
msg.update(self.deserialize_addr_payload(payload))
elif msg['command'] == b"inv":
msg.update(self.deserialize_inv_payload(payload))
elif msg['command'] == b"tx":
msg.update(self.deserialize_tx_payload(payload))
elif msg['command'] == b"block":
msg.update(self.deserialize_block_payload(payload))
elif msg['command'] == b"headers":
msg.update(self.deserialize_block_headers_payload(payload))
return (msg, data.read())
def deserialize_header(self, data):
msg = {}
data = BytesIO(data)
msg['magic_number'] = data.read(4)
if msg['magic_number'] != self.magic_number:
raise InvalidMagicNumberError("{} != {}".format(hexlify(msg['magic_number']), hexlify(self.magic_number)))
msg['command'] = data.read(12).strip(b"\x00")
msg['length'] = struct.unpack("<I", data.read(4))[0]
msg['checksum'] = data.read(4)
return msg
def serialize_version_payload(self, to_addr, from_addr):
payload = [
struct.pack("<i", self.protocol_version),
struct.pack("<Q", self.from_services),
struct.pack("<q", int(time.time())),
self.serialize_network_address(to_addr),
self.serialize_network_address(from_addr),
struct.pack("<Q", random.getrandbits(64)),
self.serialize_string(self.user_agent),
struct.pack("<i", self.height),
struct.pack("<?", self.relay),
]
return b''.join(payload)
def deserialize_version_payload(self, data):
msg = {}
data = BytesIO(data)
msg['version'] = unpack("<i", data.read(4))
if msg['version'] < self.min_protocol_version:
raise IncompatibleClientError("{} < {}".format(
msg['version'], self.min_protocol_version))
msg['services'] = unpack("<Q", data.read(8))
msg['timestamp'] = unpack("<q", data.read(8))
msg['to_addr'] = self.deserialize_network_address(data)
msg['from_addr'] = self.deserialize_network_address(data)
msg['nonce'] = unpack("<Q", data.read(8))
msg['user_agent'] = self.deserialize_string(data)
msg['height'] = unpack("<i", data.read(4))
try:
msg['relay'] = struct.unpack("<?", data.read(1))[0]
except struct.error:
msg['relay'] = False
return msg
def serialize_ping_payload(self, nonce):
payload = [
struct.pack("<Q", nonce),
]
return b''.join(payload)
def deserialize_ping_payload(self, data):
data = BytesIO(data)
nonce = unpack("<Q", data.read(8))
msg = {
'nonce': nonce,
}
return msg
def serialize_addr_payload(self, addr_list):
payload = [
self.serialize_int(len(addr_list)),
]
payload.extend(
[self.serialize_network_address(addr) for addr in addr_list])
return b''.join(payload)
def deserialize_addr_payload(self, data):
msg = {}
data = BytesIO(data)
msg['count'] = self.deserialize_int(data)
msg['addr_list'] = []
for _ in range(msg['count']):
network_address = self.deserialize_network_address(
data, has_timestamp=True)
msg['addr_list'].append(network_address)
return msg
def serialize_inv_payload(self, inventory):
payload = [
self.serialize_int(len(inventory)),
]
payload.extend(
[self.serialize_inventory(item) for item in inventory])
return b''.join(payload)
def deserialize_inv_payload(self, data):
msg = {
'timestamp': int(time.time() * 1000), # milliseconds
}
data = BytesIO(data)
msg['count'] = self.deserialize_int(data)
msg['inventory'] = []
for _ in range(msg['count']):
inventory = self.deserialize_inventory(data)
msg['inventory'].append(inventory)
return msg
def serialize_tx_payload(self, tx):
payload = [
struct.pack("<I", tx['version']),
self.serialize_int(tx['tx_in_count']),
b''.join([
self.serialize_tx_in(tx_in) for tx_in in tx['tx_in']
]),
self.serialize_int(tx['tx_out_count']),
b''.join([
self.serialize_tx_out(tx_out) for tx_out in tx['tx_out']
]),
struct.pack("<I", tx['lock_time']),
]
return b''.join(payload)
def deserialize_tx_payload(self, data):
msg = {}
if isinstance(data, bytes):
data = BytesIO(data)
msg['version'] = unpack("<I", data.read(4))
# Check for BIP144 marker
marker = data.read(1)
if marker == '\x00': # BIP144 marker is set
flags = data.read(1)
else:
flags = '\x00'
data.seek(-1, SEEK_CUR)
msg['tx_in_count'] = self.deserialize_int(data)
msg['tx_in'] = []
for _ in range(msg['tx_in_count']):
tx_in = self.deserialize_tx_in(data)
msg['tx_in'].append(tx_in)
msg['tx_out_count'] = self.deserialize_int(data)
msg['tx_out'] = []
for _ in range(msg['tx_out_count']):
tx_out = self.deserialize_tx_out(data)
msg['tx_out'].append(tx_out)
if flags != '\x00':
for in_num in range(msg['tx_in_count']):
msg['tx_in'][in_num].update({
'wits': self.deserialize_string_vector(data),
})
msg['lock_time'] = unpack("<I", data.read(4))
# Calculate hash from the entire payload
payload = self.serialize_tx_payload(msg)
msg['tx_hash'] = hexlify(sha256(sha256(payload))[::-1])
return msg
def deserialize_block_payload(self, data):
msg = {}
# Calculate hash from: version (4 bytes) + prev_block_hash (32 bytes) +
# merkle_root (32 bytes) + timestamp (4 bytes) + bits (4 bytes) +
# nonce (4 bytes) = 80 bytes
msg['block_hash'] = hexlify(sha256(sha256(data[:80]))[::-1])
data = BytesIO(data)
msg['version'] = struct.unpack("<I", data.read(4))[0]
# BE (big-endian) -> LE (little-endian)
msg['prev_block_hash'] = hexlify(data.read(32)[::-1])
# BE -> LE
msg['merkle_root'] = hexlify(data.read(32)[::-1])
msg['timestamp'] = struct.unpack("<I", data.read(4))[0]
msg['bits'] = struct.unpack("<I", data.read(4))[0]
msg['nonce'] = struct.unpack("<I", data.read(4))[0]
msg['tx_count'] = self.deserialize_int(data)
msg['tx'] = []
for _ in range(msg['tx_count']):
tx_payload = self.deserialize_tx_payload(data)
msg['tx'].append(tx_payload)
return msg
def serialize_getblocks_payload(self, block_hashes, last_block_hash):
payload = [
struct.pack("<i", self.protocol_version),
self.serialize_int(len(block_hashes)),
''.join(
[unhexlify(block_hash)[::-1] for block_hash in block_hashes]),
unhexlify(last_block_hash)[::-1], # LE -> BE
]
return b''.join(payload)
def serialize_block_headers_payload(self, headers):
payload = [
self.serialize_int(len(headers)),
]
payload.extend(
[self.serialize_block_header(header) for header in headers])
return b''.join(payload)
def deserialize_block_headers_payload(self, data):
msg = {}
data = BytesIO(data)
msg['count'] = self.deserialize_int(data)
msg['headers'] = []
for _ in range(msg['count']):
header = self.deserialize_block_header(data)
msg['headers'].append(header)
return msg
def serialize_network_address(self, addr):
network_address = []
if len(addr) == 4:
(timestamp, services, ip_address, port) = addr
network_address.append(struct.pack("<I", timestamp))
else:
(services, ip_address, port) = addr
network_address.append(struct.pack("<Q", services))
if ip_address.endswith(".onion"):
# convert .onion address to its ipv6 equivalent (6 + 10 bytes)
network_address.append(
ONION_PREFIX + b32decode(ip_address[:-6].encode(), True))
elif "." in ip_address:
# unused (12 bytes) + ipv4 (4 bytes) = ipv4-mapped ipv6 address
unused = b"\x00" * 10 + b"\xFF" * 2
network_address.append(
unused + socket.inet_pton(socket.AF_INET, ip_address))
else:
# ipv6 (16 bytes)
network_address.append(
socket.inet_pton(socket.AF_INET6, ip_address))
network_address.append(struct.pack(">H", port))
return b''.join(network_address)
def deserialize_network_address(self, data, has_timestamp=False):
timestamp = None
if has_timestamp:
timestamp = unpack("<I", data.read(4))
services = unpack("<Q", data.read(8))
_ipv6 = data.read(12)
_ipv4 = data.read(4)
port = unpack(">H", data.read(2))
_ipv6 += _ipv4
ipv4 = ""
ipv6 = ""
onion = ""
if _ipv6[:6] == ONION_PREFIX:
onion = b32encode(_ipv6[6:]).lower().decode("utf8") + ".onion" # use .onion
else:
ipv6 = socket.inet_ntop(socket.AF_INET6, _ipv6)
ipv4 = socket.inet_ntop(socket.AF_INET, _ipv4)
if ipv4 in ipv6:
ipv6 = "" # use ipv4
else:
ipv4 = "" # use ipv6
return {
'timestamp': timestamp,
'services': services,
'ipv4': ipv4,
'ipv6': ipv6,
'onion': onion,
'port': port,
}
def serialize_inventory(self, item):
(inv_type, inv_hash) = item
payload = [
struct.pack("<I", inv_type),
unhexlify(inv_hash)[::-1], # LE -> BE
]
return b''.join(payload)
def deserialize_inventory(self, data):
inv_type = unpack("<I", data.read(4))
inv_hash = data.read(32)[::-1] # BE -> LE
return {
'type': inv_type,
'hash': hexlify(inv_hash),
}
def serialize_tx_in(self, tx_in):
payload = [
unhexlify(tx_in['prev_out_hash'])[::-1], # LE -> BE
struct.pack("<I", tx_in['prev_out_index']),
self.serialize_int(tx_in['script_length']),
tx_in['script'],
struct.pack("<I", tx_in['sequence']),
]
return b''.join(payload)
def deserialize_tx_in(self, data):
prev_out_hash = data.read(32)[::-1] # BE -> LE
prev_out_index = struct.unpack("<I", data.read(4))[0]
script_length = self.deserialize_int(data)
script = data.read(script_length)
sequence = unpack("<I", data.read(4))
return {
'prev_out_hash': hexlify(prev_out_hash),
'prev_out_index': prev_out_index,
'script_length': script_length,
'script': script,
'sequence': sequence,
}
def serialize_tx_out(self, tx_out):
payload = [
struct.pack("<q", tx_out['value']),
self.serialize_int(tx_out['script_length']),
tx_out['script'],
]
return b''.join(payload)
def deserialize_tx_out(self, data):
value = struct.unpack("<q", data.read(8))[0]
script_length = self.deserialize_int(data)
script = data.read(script_length)
return {
'value': value,
'script_length': script_length,
'script': script,
}
def serialize_block_header(self, header):
payload = [
struct.pack("<I", header['version']),
unhexlify(header['prev_block_hash'])[::-1], # LE -> BE
unhexlify(header['merkle_root'])[::-1], # LE -> BE
struct.pack("<I", header['timestamp']),
struct.pack("<I", header['bits']),
struct.pack("<I", header['nonce']),
self.serialize_int(0),
]
return ''.join(payload)
def deserialize_block_header(self, data):
header = data.read(80)
block_hash = sha256(sha256(header))[::-1] # BE -> LE
header = BytesIO(header)
version = struct.unpack("<i", header.read(4))[0]
prev_block_hash = header.read(32)[::-1] # BE -> LE
merkle_root = header.read(32)[::-1] # BE -> LE
timestamp = struct.unpack("<I", header.read(4))[0]
bits = struct.unpack("<I", header.read(4))[0]
nonce = struct.unpack("<I", header.read(4))[0]
tx_count = self.deserialize_int(data)
return {
'block_hash': hexlify(block_hash),
'version': version,
'prev_block_hash': hexlify(prev_block_hash),
'merkle_root': hexlify(merkle_root),
'timestamp': timestamp,
'bits': bits,
'nonce': nonce,
'tx_count': tx_count,
}
def serialize_string_vector(self, data):
payload = [
self.serialize_int(len(data)),
] + [self.serialize_string(item) for item in data]
return ''.join(payload)
def deserialize_string_vector(self, data):
items = []
count = self.deserialize_int(data)
for _ in range(count):
items.append(self.deserialize_string(data))
return items
def serialize_string(self, data):
if isinstance(data, bytes):
pass
else:
data = data.encode()
length = len(data)
if length < 0xFD:
out = chr(length).encode() + data
return out
elif length <= 0xFFFF:
out = chr(0xFD).encode() + struct.pack("<H", length) + data
return out
elif length <= 0xFFFFFFFF:
out = chr(0xFE).encode() + struct.pack("<I", length) + data
return out
out = chr(0xFF).encode() + struct.pack("<Q", length) + data
return out
def deserialize_string(self, data):
length = self.deserialize_int(data)
return data.read(length)
def serialize_int(self, length):
if length < 0xFD:
return chr(length).encode()
elif length <= 0xFFFF:
return chr(0xFD).encode() + struct.pack("<H", length)
elif length <= 0xFFFFFFFF:
return chr(0xFE).encode() + struct.pack("<I", length)
return chr(0xFF).encode() + struct.pack("<Q", length)
def deserialize_int(self, data):
length = unpack("<B", data.read(1))
if length == 0xFD:
length = unpack("<H", data.read(2))
elif length == 0xFE:
length = unpack("<I", data.read(4))
elif length == 0xFF:
length = unpack("<Q", data.read(8))
return length
class Connection(object):
def __init__(self, to_addr, from_addr=("0.0.0.0", 0), **conf):
self.to_addr = to_addr
self.from_addr = from_addr
self.serializer = Serializer(**conf)
self.socket_timeout = conf.get('socket_timeout', SOCKET_TIMEOUT)
self.proxy = conf.get('proxy', None)
self.socket = None
self.bps = deque([], maxlen=128) # bps samples for this connection
def open(self):
self.socket = create_connection(self.to_addr,
timeout=self.socket_timeout,
source_address=self.from_addr,
proxy=self.proxy)
def close(self):
if self.socket:
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
finally:
self.socket.close()
def send(self, data):
self.socket.sendall(data)
def recv(self, length=0):
start_t = time.time()
if length > 0:
chunks = []
while length > 0:
chunk = self.socket.recv(SOCKET_BUFSIZE)
if not chunk:
raise RemoteHostClosedConnection("{} closed connection".format(self.to_addr))
chunks.append(chunk)
length -= len(chunk)
data = b''.join(chunks)
else:
data = self.socket.recv(SOCKET_BUFSIZE)
if not data:
raise RemoteHostClosedConnection("{} closed connection".format(self.to_addr))
if len(data) > SOCKET_BUFSIZE:
end_t = time.time()
self.bps.append((len(data) * 8) / (end_t - start_t))
return data
def get_messages(self, length=0, commands=None):
msgs = []
data = self.recv(length=length)
while len(data) > 0:
time.sleep(0.0001)
try:
(msg, data) = self.serializer.deserialize_msg(data)
except PayloadTooShortError:
data += self.recv(
length=self.serializer.required_len - len(data))
(msg, data) = self.serializer.deserialize_msg(data)
if msg.get('command') == b"ping":
self.pong(msg['nonce']) # respond to ping immediately
elif msg.get('command') == b"version":
self.verack() # respond to version immediately
msgs.append(msg)
if len(msgs) > 0 and commands:
msgs[:] = [m for m in msgs if m.get('command') in commands]
return msgs
def set_min_version(self, version):
self.serializer.protocol_version = min(
self.serializer.protocol_version,
version.get(b'version', self.serializer.protocol_version))
def handshake(self):
# [version] >>>
msg = self.serializer.serialize_msg(
command=b"version", to_addr=self.to_addr, from_addr=self.from_addr)
self.send(msg)
# <<< [version 124 bytes] [verack 24 bytes]
time.sleep(1)
msgs = self.get_messages(length=148, commands=[b"version", b"verack"])
if len(msgs) > 0:
msgs[:] = sorted(msgs, key=itemgetter('command'), reverse=True)
self.set_min_version(msgs[0])
return msgs
def verack(self):
# [verack] >>>
msg = self.serializer.serialize_msg(command=b"verack")
self.send(msg)
def getaddr(self, block=True):
# [getaddr] >>>
msg = self.serializer.serialize_msg(command=b"getaddr")
self.send(msg)
# Caller should call get_messages separately.
if not block:
return None
# <<< [addr]..
time.sleep(3)
msgs = self.get_messages(commands=[b"addr"])
return msgs
def getpeerinfo(self, block=True):
# [getaddr] >>>
msg = self.serializer.serialize_msg(command=b"getpeerinfo")
self.send(msg)
# Caller should call get_messages separately.
if not block:
return None
# <<< [addr]..
msgs = self.get_messages(commands=[b"getpeerinfo"])
return msgs
def addr(self, addr_list):
# addr_list = [(TIMESTAMP, SERVICES, "IP_ADDRESS", PORT),]
# [addr] >>>
msg = self.serializer.serialize_msg(
command=b"addr", addr_list=addr_list)
self.send(msg)
def ping(self, nonce=None):
if nonce is None:
nonce = random.getrandbits(64)
# [ping] >>>
msg = self.serializer.serialize_msg(command=b"ping", nonce=nonce)
self.send(msg)
def pong(self, nonce):
# [pong] >>>
msg = self.serializer.serialize_msg(command=b"pong", nonce=nonce)
self.send(msg)
def inv(self, inventory):
# inventory = [(INV_TYPE, "INV_HASH"),]
# [inv] >>>
msg = self.serializer.serialize_msg(
command=b"inv", inventory=inventory)
self.send(msg)
def getdata(self, inventory):
# inventory = [(INV_TYPE, "INV_HASH"),]
# [getdata] >>>
msg = self.serializer.serialize_msg(
command=b"getdata", inventory=inventory)
self.send(msg)
# <<< [tx] [block]..
time.sleep(1)
msgs = self.get_messages(commands=[b"tx", b"block"])
return msgs
def getblocks(self, block_hashes, last_block_hash=None):
if last_block_hash is None:
last_block_hash = "0" * 64
# block_hashes = ["BLOCK_HASH",]
# [getblocks] >>>
msg = self.serializer.serialize_msg(command=b"getblocks",
block_hashes=block_hashes,
last_block_hash=last_block_hash)
self.send(msg)
# <<< [inv]..
time.sleep(1)
msgs = self.get_messages(commands=[b"inv"])
return msgs
def getheaders(self, block_hashes, last_block_hash=None):
if last_block_hash is None:
last_block_hash = "0" * 64
# block_hashes = ["BLOCK_HASH",]
# [getheaders] >>>
msg = self.serializer.serialize_msg(command=b"getheaders",
block_hashes=block_hashes,
last_block_hash=last_block_hash)
self.send(msg)
# <<< [headers]..
time.sleep(1)
msgs = self.get_messages(commands=[b"headers"])
return msgs
def headers(self, headers):
# headers = [{
# 'version': VERSION,
# 'prev_block_hash': PREV_BLOCK_HASH,
# 'merkle_root': MERKLE_ROOT,
# 'timestamp': TIMESTAMP,
# 'bits': BITS,
# 'nonce': NONCE
# },]
# [headers] >>>
msg = self.serializer.serialize_msg(command=b"headers", headers=headers)
self.send(msg)
class Keepalive(object):
"""
Implements keepalive mechanic to keep the specified connection with a node.
"""
def __init__(self, conn, keepalive_time):
self.conn = conn
self.keepalive_time = keepalive_time
def keepalive(self, addr=False):
st = time.time()
last_ping = time.time() - 10
addrs = []
while time.time() - st < self.keepalive_time:
if time.time() - last_ping > 9:
try:
self.ping()
last_ping = time.time()
except socket.error as err:
logging.debug("keepalive failed %s", err)
break
time.sleep(0.3)
try:
if addr:
new = self.conn.get_messages(commands=[b'addr'])
addrs += new
else:
self.conn.get_messages()
except socket.timeout:
pass
except (ProtocolError, ConnectionError, socket.error) as err:
logging.debug("getmsg failed %s", err)
break
return addrs
def ping(self):
"""
Sends a ping message. Ping time is stored in Redis for round-trip time
(RTT) calculation.
"""
nonce = random.getrandbits(64)
try:
self.conn.ping(nonce=nonce)
except socket.error:
raise
self.last_ping = time.time()
| 35.674487
| 118
| 0.524921
|
867e504eac500a6a052b99fa2e425a3b41728698
| 3,039
|
py
|
Python
|
model-optimizer/extensions/front/onnx/upsample_ext.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 3
|
2019-07-08T09:03:03.000Z
|
2020-09-09T10:34:17.000Z
|
model-optimizer/extensions/front/onnx/upsample_ext.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 6
|
2022-01-11T18:56:22.000Z
|
2022-02-21T13:20:20.000Z
|
model-optimizer/extensions/front/onnx/upsample_ext.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 3
|
2021-02-05T17:11:17.000Z
|
2021-04-19T08:33:31.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
from extensions.ops.resample import ResampleOp
from mo.front.extractor import FrontExtractorOp
from mo.front.onnx.extractors.utils import onnx_attr
from mo.utils.error import Error
import numpy as np
class UpsampleFrontExtractor(FrontExtractorOp):
op = 'Upsample'
enabled = True
@staticmethod
def extract(node):
mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode())
scales = onnx_attr(node, 'scales', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32))
width_scale = onnx_attr(node, 'width_scale', 'f')
height_scale = onnx_attr(node, 'height_scale', 'f')
supported_modes = ['nearest', 'linear']
if mode not in supported_modes:
raise Error(
'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.',
node.name,
mode,
supported_modes
)
# TODO: this is a temporary limitation
if mode != 'nearest':
raise Error(
'Upsample mode {} for node {} is not supported. Only nearest is supported.',
mode,
node.name
)
# TODO: this is a temporary limitation
if scales is not None:
raise Error(
'Upsample scales attribute is defined for node {}. Only scale_width and scale_height are supported.',
node.name
)
if width_scale is None or height_scale is None:
raise Error(
'One/both of widths_scale = {} and height_scale = {} is not defined for Upsampe node {}.',
width_scale,
height_scale,
node.name
)
if width_scale != height_scale:
raise Error(
'Upsample node {} have different widths_scale = {} and height_scale = {}. It is not supported; they should match.',
node.name,
width_scale,
height_scale
)
mode_to_resample_type = {'nearest': 'caffe.ResampleParameter.NEAREST'}
assert mode in mode_to_resample_type
assert width_scale == height_scale
assert width_scale is not None
ResampleOp.update_node_stat(node, {'resample_type': mode_to_resample_type[mode], 'factor': width_scale, 'antialias': 0})
return __class__.enabled
| 36.178571
| 131
| 0.622902
|
84bcef798ef2bd2ed1401b12e5744cdfd1494f0b
| 841
|
py
|
Python
|
src/spaceone/notification/manager/repository_manager.py
|
xellos00/notification
|
e091c1eaeaf54d2669ac204c027aacddabad382a
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/notification/manager/repository_manager.py
|
xellos00/notification
|
e091c1eaeaf54d2669ac204c027aacddabad382a
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/notification/manager/repository_manager.py
|
xellos00/notification
|
e091c1eaeaf54d2669ac204c027aacddabad382a
|
[
"Apache-2.0"
] | null | null | null |
import logging
from spaceone.core.manager import BaseManager
from spaceone.notification.error import *
from spaceone.notification.connector.repository_connector import RepositoryConnector
_LOGGER = logging.getLogger(__name__)
class RepositoryManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.repo_connector: RepositoryConnector = self.locator.get_connector('RepositoryConnector')
def get_plugin(self, plugin_id, domain_id):
return self.repo_connector.get_plugin(plugin_id, domain_id)
def check_plugin_version(self, plugin_id, version, domain_id):
versions = self.repo_connector.get_plugin_versions(plugin_id, domain_id)
if version not in versions:
raise ERROR_INVALID_PLUGIN_VERSION(plugin_id=plugin_id, version=version)
| 35.041667
| 100
| 0.769322
|
c35d40d4eb7d7deb6a32f5a4ab03111a563f3234
| 3,513
|
py
|
Python
|
gmond/python_modules/example/example.py
|
clouTrix/monitor-core
|
70cf26243fcf7159bbeb6a5471841392c417de0a
|
[
"BSD-3-Clause"
] | 346
|
2015-01-06T14:19:48.000Z
|
2022-03-27T07:15:09.000Z
|
gmond/python_modules/example/example.py
|
clouTrix/monitor-core
|
70cf26243fcf7159bbeb6a5471841392c417de0a
|
[
"BSD-3-Clause"
] | 98
|
2015-01-14T18:17:53.000Z
|
2022-03-20T00:22:46.000Z
|
gmond/python_modules/example/example.py
|
clouTrix/monitor-core
|
70cf26243fcf7159bbeb6a5471841392c417de0a
|
[
"BSD-3-Clause"
] | 157
|
2015-01-20T16:26:30.000Z
|
2022-03-10T06:02:09.000Z
|
#/******************************************************************************
#* Portions Copyright (C) 2007 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Author: Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
import random
descriptors = list()
Random_Max = 50
Constant_Value = 50
def Random_Numbers(name):
'''Return a random number.'''
return int(random.uniform(0, Random_Max))
def Constant_Number(name):
'''Return a constant number.'''
return int(Constant_Value)
def metric_init(params):
'''Initialize the random number generator and create the
metric definition dictionary object for each metric.'''
global descriptors
global Random_Max
global Constant_Value
random.seed()
print '[pyexample] Received the following parameters'
print params
if 'RandomMax' in params:
Random_Max = int(params['RandomMax'])
if 'ConstantValue' in params:
Constant_Value = int(params['ConstantValue'])
d1 = {'name': 'PyRandom_Numbers',
'call_back': Random_Numbers,
'time_max': 90,
'value_type': 'uint',
'units': 'N',
'slope': 'both',
'format': '%u',
'description': 'Example module metric (random numbers)',
'groups': 'example,random'}
d2 = {'name': 'PyConstant_Number',
'call_back': Constant_Number,
'time_max': 90,
'value_type': 'uint',
'units': 'N',
'slope': 'zero',
'format': '%hu',
'description': 'Example module metric (constant number)'}
descriptors = [d1, d2]
return descriptors
def metric_cleanup():
'''Clean up the metric module.'''
pass
#This code is for debugging and unit testing
if __name__ == '__main__':
params = {'RandomMax': '500',
'ConstantValue': '322'}
metric_init(params)
for d in descriptors:
v = d['call_back'](d['name'])
print 'value for %s is %u' % (d['name'], v)
| 34.782178
| 80
| 0.652434
|
bb6a811dd615d543eb6ce85900959d726c2319e8
| 10,362
|
py
|
Python
|
tests/sentry/incidents/endpoints/test_organization_alert_rule_index.py
|
vaniot-s/sentry
|
5c1accadebfaf8baf6863251c05b38ea979ee1c7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/incidents/endpoints/test_organization_alert_rule_index.py
|
vaniot-s/sentry
|
5c1accadebfaf8baf6863251c05b38ea979ee1c7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/incidents/endpoints/test_organization_alert_rule_index.py
|
vaniot-s/sentry
|
5c1accadebfaf8baf6863251c05b38ea979ee1c7
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from exam import fixture
from freezegun import freeze_time
from sentry.api.serializers import serialize
from sentry.incidents.logic import create_alert_rule
from sentry.incidents.models import AlertRule
from sentry.testutils import APITestCase
class AlertRuleListEndpointTest(APITestCase):
endpoint = "sentry-api-0-organization-alert-rules"
@fixture
def organization(self):
return self.create_organization()
@fixture
def project(self):
return self.create_project(organization=self.organization)
@fixture
def user(self):
return self.create_user()
def test_simple(self):
self.create_team(organization=self.organization, members=[self.user])
alert_rule = create_alert_rule(
self.organization, [self.project], "hello", "level:error", "count()", 10, 1
)
self.login_as(self.user)
with self.feature("organizations:incidents"):
resp = self.get_valid_response(self.organization.slug)
assert resp.data == serialize([alert_rule])
def test_no_feature(self):
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
resp = self.get_response(self.organization.slug)
assert resp.status_code == 404
@freeze_time()
class AlertRuleCreateEndpointTest(APITestCase):
endpoint = "sentry-api-0-organization-alert-rules"
method = "post"
@fixture
def organization(self):
return self.create_organization()
@fixture
def project(self):
return self.create_project(organization=self.organization)
@fixture
def user(self):
return self.create_user()
def test_simple(self):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
valid_alert_rule = {
"aggregate": "count()",
"query": "",
"timeWindow": "300",
"triggers": [
{
"label": "critical",
"alertThreshold": 200,
"resolveThreshold": 100,
"thresholdType": 0,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id}
],
},
{
"label": "warning",
"alertThreshold": 150,
"resolveThreshold": 100,
"thresholdType": 0,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id},
{"type": "email", "targetType": "user", "targetIdentifier": self.user.id},
],
},
],
"projects": [self.project.slug],
"name": "JustAValidTestRule",
}
with self.feature("organizations:incidents"):
resp = self.get_valid_response(
self.organization.slug, status_code=201, **valid_alert_rule
)
assert "id" in resp.data
alert_rule = AlertRule.objects.get(id=resp.data["id"])
assert resp.data == serialize(alert_rule, self.user)
def test_no_label(self):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
rule_one_trigger_no_label = {
"aggregate": "count()",
"query": "",
"timeWindow": "300",
"projects": [self.project.slug],
"name": "OneTriggerOnlyCritical",
"triggers": [
{
"alertThreshold": 200,
"resolveThreshold": 100,
"thresholdType": 1,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id}
],
}
],
}
with self.feature("organizations:incidents"):
self.get_valid_response(
self.organization.slug, status_code=400, **rule_one_trigger_no_label
)
def test_only_critical_trigger(self):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
rule_one_trigger_only_critical = {
"aggregate": "count()",
"query": "",
"timeWindow": "300",
"projects": [self.project.slug],
"name": "OneTriggerOnlyCritical",
"triggers": [
{
"label": "critical",
"alertThreshold": 100,
"resolveThreshold": 200,
"thresholdType": 1,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id}
],
}
],
}
with self.feature("organizations:incidents"):
resp = self.get_valid_response(
self.organization.slug, status_code=201, **rule_one_trigger_only_critical
)
assert "id" in resp.data
alert_rule = AlertRule.objects.get(id=resp.data["id"])
assert resp.data == serialize(alert_rule, self.user)
def test_no_triggers(self):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
rule_no_triggers = {
"aggregate": "count()",
"query": "",
"timeWindow": "300",
"projects": [self.project.slug],
"name": "JustATestRuleWithNoTriggers",
}
with self.feature("organizations:incidents"):
resp = self.get_valid_response(
self.organization.slug, status_code=400, **rule_no_triggers
)
assert resp.data == {"triggers": [u"This field is required."]}
def test_no_critical_trigger(self):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
rule_one_trigger_only_warning = {
"aggregate": "count()",
"query": "",
"timeWindow": "300",
"projects": [self.project.slug],
"name": "JustATestRule",
"triggers": [
{
"label": "warning",
"alertThreshold": 200,
"resolveThreshold": 100,
"thresholdType": 1,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id}
],
}
],
}
with self.feature("organizations:incidents"):
resp = self.get_valid_response(
self.organization.slug, status_code=400, **rule_one_trigger_only_warning
)
assert resp.data == {"nonFieldErrors": [u'Trigger 1 must be labeled "critical"']}
def test_critical_trigger_no_action(self):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
rule_one_trigger_only_critical_no_action = {
"aggregate": "count()",
"query": "",
"timeWindow": "300",
"projects": [self.project.slug],
"name": "JustATestRule",
"triggers": [
{
"label": "critical",
"alertThreshold": 75,
"resolveThreshold": 100,
"thresholdType": 1,
}
],
}
with self.feature("organizations:incidents"):
resp = self.get_valid_response(
self.organization.slug, status_code=400, **rule_one_trigger_only_critical_no_action
)
assert resp.data == {u"nonFieldErrors": [u'"critical" trigger must have an action.']}
def test_invalid_projects(self):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
with self.feature("organizations:incidents"):
resp = self.get_valid_response(
self.organization.slug,
status_code=400,
projects=[
self.project.slug,
self.create_project(organization=self.create_organization()).slug,
],
name="an alert",
thresholdType=1,
query="hi",
aggregate="count()",
timeWindow=10,
alertThreshold=1000,
resolveThreshold=100,
triggers=[
{
"label": "critical",
"alertThreshold": 200,
"resolveThreshold": 100,
"thresholdType": 1,
"actions": [
{
"type": "email",
"targetType": "team",
"targetIdentifier": self.team.id,
}
],
}
],
)
assert resp.data == {"projects": [u"Invalid project"]}
def test_no_feature(self):
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
resp = self.get_response(self.organization.slug)
assert resp.status_code == 404
def test_no_perms(self):
self.create_member(
user=self.user, organization=self.organization, role="member", teams=[self.team]
)
self.login_as(self.user)
resp = self.get_response(self.organization.slug)
assert resp.status_code == 403
| 35.125424
| 99
| 0.517275
|
65b98ecfbbd86a122b35b106da1b4a97c5f3630f
| 8,138
|
py
|
Python
|
ros/src/tl_detector/tl_detector.py
|
JoaoGranja/CarND-Capstone
|
839e0d1f9ee5e470f6645feda5b14a0796fbe062
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
JoaoGranja/CarND-Capstone
|
839e0d1f9ee5e470f6645feda5b14a0796fbe062
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
JoaoGranja/CarND-Capstone
|
839e0d1f9ee5e470f6645feda5b14a0796fbe062
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import cKDTree
import numpy as np
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.tree = None
self.waypoints_2D = None
self.lights = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.use_classifier = rospy.get_param('~use_classifier')
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
if self.use_classifier:
self.light_classifier = TLClassifier()
#self.listener = tf.TransformListener()
#rate = rospy.Rate(3000) # sleep 100 ms
#rate.sleep()
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
self.waypoints_2D = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.tree = cKDTree(self.waypoints_2D)
rospy.logdebug("TL Detector - Base waypoints callback")
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
#rospy.logwarn("TL Detector - Closest light waypoint {} and state {}".format(light_wp, state))
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
#rospy.logwarn("TL Detector - light state changes")
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
#rospy.logwarn("TL Detector - Publish next Red traffic waypoint {}".format(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
#rospy.logwarn("TL Detector - Publish lst Red traffic waypoint {}".format(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#find the closest waypoint of the current car position
closest_idx = self.tree.query([x,y])[1]
#Check if this point is behind or after the current car position
closest_wp = np.array(self.waypoints_2D[closest_idx])
if closest_idx == 0:
ahead_closest_wp = np.array(self.waypoints_2D[closest_idx+1])
dot = np.dot((ahead_closest_wp - closest_wp), (closest_wp - np.array([x,y])))
else:
behind_closest_wp = np.array(self.waypoints_2D[closest_idx-1])
dot = np.dot((closest_wp - behind_closest_wp), (closest_wp - np.array([x,y])))
if dot > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2D)
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(not self.has_image):
#rospy.logwarn("TL Detector - no camera image")
return False
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#Get classification
return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_stop_line_idx = None
closest_light = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose and self.tree):
car_waypoint_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
rospy.logdebug("TL Detector - x: {} y: {}".format(self.pose.pose.position.x, self.pose.pose.position.y))
rospy.logdebug("TL Detector - car_waypoint_idx {}".format(car_waypoint_idx))
# find the closest visible traffic light (if one exists)
closest_stop_line_idx = None
for i, light in enumerate(self.lights):
stop_line_pos = stop_line_positions[i]
rospy.logdebug("TL Detector - stop_line_pos {}".format(stop_line_pos))
stop_line_idx = self.get_closest_waypoint(stop_line_pos[0], stop_line_pos[1])
rospy.logdebug("TL Detector - stop_line_idx {}".format(stop_line_idx))
if stop_line_idx >= car_waypoint_idx and ( closest_stop_line_idx is None or stop_line_idx < closest_stop_line_idx):
closest_stop_line_idx = stop_line_idx
closest_light = light
if closest_light:
if self.use_classifier:
state = self.get_light_state(closest_light)
rospy.logwarn("TL Detector - state classified is {} and traffic light is {}".format(state, light.state))
else:
state = light.state
return closest_stop_line_idx, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 39.125
| 131
| 0.638363
|
24291d1d24d61becd354aae0d9c6d1d4783c447e
| 6,135
|
py
|
Python
|
Assignment3/q2.py
|
zilunzhang/Introduction-to-Image-Understanding
|
6504323ee97e7dff8ebdb98cc4d9e79b3c5092f9
|
[
"Apache-2.0"
] | null | null | null |
Assignment3/q2.py
|
zilunzhang/Introduction-to-Image-Understanding
|
6504323ee97e7dff8ebdb98cc4d9e79b3c5092f9
|
[
"Apache-2.0"
] | null | null | null |
Assignment3/q2.py
|
zilunzhang/Introduction-to-Image-Understanding
|
6504323ee97e7dff8ebdb98cc4d9e79b3c5092f9
|
[
"Apache-2.0"
] | null | null | null |
from PIL import Image
from Homography import *
import scipy.spatial.distance as sp
import os
from sympy.utilities.iterables import multiset_permutations
color_target = cv2.imread("findBook.jpg")
RGB_color_target = cv2.cvtColor(color_target, cv2.COLOR_BGR2RGB)
row, col, height = RGB_color_target.shape
print(RGB_color_target.shape)
old_im = Image.open("findBook.jpg")
old_size = old_im.size
new_size = (2000, 1500)
new_im = Image.new("RGB", new_size)
new_im.paste(old_im, (int((new_size[0]-old_size[0])/2),
int((new_size[1]-old_size[1])/2)))
new_im.save('findbook_border.jpg')
border_img = cv2.imread('findbook_border.jpg')
border_img = cv2.cvtColor(border_img, cv2.COLOR_BGR2RGB)
print("border image shape's is: {}".format(border_img.shape))
def ransac(template_img, target_img):
# define some hyperparameter
k = 3
P = 0.99
p = 0.2
ratio_threshold = 0.8
threshold = 20
max_num_inliner = 0
mssd = 0
best_transformation = None
min_trails = int(np.divide(np.log(1-P), np.log(1-np.power(p, k))))
match_template_kpts, match_target_kpts = extract_match_transformation(template_img, target_img, ratio_threshold)
num_match = len(match_template_kpts)
print("number of match is: {}".format(num_match))
index = 0
print("min trails is: {}".format(min_trails))
while index in range(min_trails):
# print("outter iteration {}".format(index))
# print(k)
# print(num_match)
random_indices = np.random.choice(num_match, k)
match_template_kpts_rand = np.take(match_template_kpts, random_indices)
match_target_kpts_rand = np.take(match_target_kpts, random_indices)
M, match_template_kpts_rand, match_target_kpts_rand = affine_transformation(match_template_kpts_rand, match_target_kpts_rand)
M_new = np.zeros((2, 3))
M_new[0, 0] = M[0, 0]
M_new[0, 1] = M[1, 0]
M_new[0, 2] = M[4, 0]
M_new[1, 0] = M[2, 0]
M_new[1, 1] = M[3, 0]
M_new[1, 2] = M[5, 0]
num_inliner = 0
distance = 0
for i in range(num_match):
# print("inner iteration is: {}".format(i))
template_pt_x, template_pt_y= match_template_kpts[i].pt
target_pt_x, target_pt_y = match_target_kpts[i].pt
before = np.dot(M_new, np.array((template_pt_x, template_pt_y, 1)).T)
after = np.array((target_pt_x, target_pt_y)).reshape(2, 1)
temp_distance = sp.euclidean(before, after)
distance += np.power(temp_distance, 2)
if temp_distance < threshold:
num_inliner += 1
if num_inliner > max_num_inliner:
max_num_inliner = num_inliner
best_transformation = M_new
mssd = distance/num_match
index += 1
return best_transformation, mssd, match_template_kpts, match_target_kpts
def reconstruct(img_template, img_set):
best_mssd = float("inf")
best_trans = np.zeros((3, 3))
index_list = []
best_index = []
for p in multiset_permutations(np.arange(6)):
index_list.append(p)
np.random.shuffle(index_list)
# index_list = index_list[0]
index = 0
for permutation in index_list:
print("permutation number is:{} ".format(index + 1))
print(permutation)
# permutation = [1, 3, 2, 5, 0, 4]
img_reconstruct = merge_image(img_set, permutation)
reconstruct_downsample = cv2.resize(img_reconstruct, (int(row/4), int(col/4)))
template_downsample = cv2.resize(img_template, (int(row/4), int(col/4)))
best_transformation, mssd, match_template_kpts, match_target_kpts = \
ransac(template_downsample, reconstruct_downsample)
if mssd < best_mssd:
best_mssd = mssd
best_trans = best_transformation
best_index = permutation
if best_mssd == 0:
return best_mssd, best_trans, best_index
print("best current mssd is: {}".format(best_mssd))
print("best current index is: {}".format(best_index))
index += 1
return best_mssd, best_trans, best_index
# cite: https://stackoverflow.com/questions/30230592/loading-all-images-using-imread-from-a-given-folder
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder,filename))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if img is not None:
images.append(img)
return images
def merge_image(list_images, permutation):
temp = []
# print(len(permutation))
for i in range(len(permutation)):
temp.append(list_images[permutation[i]])
result = np.concatenate(temp, axis=1)
return result
if __name__ == '__main__':
start = datetime.now()
color_template = cv2.imread("book.jpg")
RGB_color_template = cv2.cvtColor(color_template, cv2.COLOR_BGR2RGB)
transformation, mssd, match_template_kpts, match_target_kpts = ransac(border_img, RGB_color_template)
print("M is {}".format(transformation.shape))
print("MSSD is: {}".format(mssd))
RGB_color_target = cv2.warpAffine(border_img, transformation, (row, col))
plt.imshow(RGB_color_target)
plt.show()
BGR_color_target = cv2.cvtColor(RGB_color_target, cv2.COLOR_RGB2BGR)
cv2.imwrite("q2a.jpg", BGR_color_target)
images = load_images_from_folder("shredded")
color_template = cv2.imread("mugShot.jpg")
RGB_color_template = cv2.cvtColor(color_template, cv2.COLOR_BGR2RGB)
best_mssd, best_tran, best_idx = reconstruct(RGB_color_template, images)
loaded_images = load_images_from_folder("shredded")
best_img_reconstruct = merge_image(loaded_images, best_idx)
plt.imshow(best_img_reconstruct)
plt.show()
best_img_reconstruct = cv2.cvtColor(best_img_reconstruct, cv2.COLOR_BGR2RGB)
cv2.imwrite("q2b.jpg", best_img_reconstruct)
print("best MSSD is: {}".format(best_mssd))
print("best transformation is: {}".format(best_tran))
print("best index is: {}".format(best_idx))
print(datetime.now() - start)
| 38.829114
| 133
| 0.671231
|
2ded8183dc04d48607c6fa07deb40df7841f9d45
| 3,851
|
py
|
Python
|
src/commercetools/platform/client/orders/by_project_key_orders_by_id_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 15
|
2018-11-02T14:35:52.000Z
|
2022-03-16T07:51:44.000Z
|
src/commercetools/platform/client/orders/by_project_key_orders_by_id_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 84
|
2018-11-02T12:50:32.000Z
|
2022-03-22T01:25:54.000Z
|
src/commercetools/platform/client/orders/by_project_key_orders_by_id_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 13
|
2019-01-03T09:16:50.000Z
|
2022-02-15T18:37:19.000Z
|
# This file is automatically generated by the rmf-codegen project.
#
# The Python code generator is maintained by Lab Digital. If you want to
# contribute to this project then please do not edit this file directly
# but send a pull request to the Lab Digital fork of rmf-codegen at
# https://github.com/labd/rmf-codegen
import typing
import warnings
from ...models.error import ErrorResponse
from ...models.order import Order, OrderUpdate
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyOrdersByIDRequestBuilder:
_client: "BaseClient"
_project_key: str
_id: str
def __init__(
self,
project_key: str,
id: str,
client: "BaseClient",
):
self._project_key = project_key
self._id = id
self._client = client
def get(
self,
*,
expand: typing.List["str"] = None,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["Order"]:
"""Get Order by ID"""
headers = {} if headers is None else headers
response = self._client._get(
endpoint=f"/{self._project_key}/orders/{self._id}",
params={"expand": expand},
headers=headers,
options=options,
)
if response.status_code == 200:
return Order.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
warnings.warn("Unhandled status code %d" % response.status_code)
def post(
self,
body: "OrderUpdate",
*,
expand: typing.List["str"] = None,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["Order"]:
"""Update Order by ID"""
headers = {} if headers is None else headers
response = self._client._post(
endpoint=f"/{self._project_key}/orders/{self._id}",
params={"expand": expand},
json=body.serialize(),
headers={"Content-Type": "application/json", **headers},
options=options,
)
if response.status_code == 200:
return Order.deserialize(response.json())
elif response.status_code in (409, 400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
warnings.warn("Unhandled status code %d" % response.status_code)
def delete(
self,
*,
data_erasure: bool = None,
version: int,
expand: typing.List["str"] = None,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["Order"]:
"""Delete Order by ID"""
headers = {} if headers is None else headers
response = self._client._delete(
endpoint=f"/{self._project_key}/orders/{self._id}",
params={"dataErasure": data_erasure, "version": version, "expand": expand},
headers=headers,
options=options,
)
if response.status_code == 200:
return Order.deserialize(response.json())
elif response.status_code in (409, 400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
warnings.warn("Unhandled status code %d" % response.status_code)
| 35.657407
| 87
| 0.602701
|
60859b9e6f8245ccabca6fcdaf4ab3fc130042a5
| 624
|
py
|
Python
|
brablog/posts/models.py
|
BruhMano/BraBlog
|
bf1173d281893fe65fd96e5e5f07bdc4d65095b1
|
[
"BSD-3-Clause"
] | null | null | null |
brablog/posts/models.py
|
BruhMano/BraBlog
|
bf1173d281893fe65fd96e5e5f07bdc4d65095b1
|
[
"BSD-3-Clause"
] | null | null | null |
brablog/posts/models.py
|
BruhMano/BraBlog
|
bf1173d281893fe65fd96e5e5f07bdc4d65095b1
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.contrib.auth import get_user_model
from groups.models import Group
User = get_user_model()
group = Group()
class Post(models.Model):
title = models.TextField(max_length=150)
text = models.TextField()
pub_date = models.DateField("date published", auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="posts")
image = models.ImageField(blank=True,null = True,upload_to = "media")
is_liked_by_current_user = models.BooleanField()
likes = models.IntegerField(default=0)
def __str__(self):
return self.title
| 36.705882
| 84
| 0.741987
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.