hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
91c545eedebfe63072291f5498dba2aca85beda1
8,738
py
Python
basic_code/networks.py
J-asy/Emotion-FAN
30c1e24a31b2a05c0810a17eb533096a7baaeeef
[ "MIT" ]
275
2019-09-11T10:22:06.000Z
2022-03-29T07:14:31.000Z
basic_code/networks.py
J-asy/Emotion-FAN
30c1e24a31b2a05c0810a17eb533096a7baaeeef
[ "MIT" ]
34
2019-09-11T11:32:32.000Z
2022-03-18T09:32:42.000Z
basic_code/networks.py
J-asy/Emotion-FAN
30c1e24a31b2a05c0810a17eb533096a7baaeeef
[ "MIT" ]
69
2019-09-18T19:00:17.000Z
2022-03-08T11:43:49.000Z
import torch.nn as nn import math import torch.utils.model_zoo as model_zoo import torch.nn.functional as F import torch import numpy as np import cv2 import pdb def sigmoid(x): return 1 / (1 + math.exp(-x)) def norm_angle(angle): norm_angle = sigmoid(10 * (abs(angle) / 0.7853975 - 1)) return norm_angle def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU() self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU() self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out = out + residual out = self.relu(out) return out ###''' self-attention; relation-attention ''' class ResNet_AT(nn.Module): def __init__(self, block, layers, num_classes=1000, end2end=True, at_type=''): self.inplanes = 64 self.end2end = end2end super(ResNet_AT, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d(1) self.dropout = nn.Dropout(0.5) self.dropout2 = nn.Dropout(0.6) self.alpha = nn.Sequential(nn.Linear(512, 1), nn.Sigmoid()) self.beta = nn.Sequential(nn.Linear(1024, 1), nn.Sigmoid()) self.pred_fc1 = nn.Linear(512, 7) self.pred_fc2 = nn.Linear(1024, 7) self.at_type = at_type for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x='', phrase='train', AT_level='first_level',vectors='',vm='',alphas_from1='',index_matrix=''): vs = [] alphas = [] assert phrase == 'train' or phrase == 'eval' assert AT_level == 'first_level' or AT_level == 'second_level' or AT_level == 'pred' if phrase == 'train': num_pair = 3 for i in range(num_pair): f = x[:, :, :, :, i] # x[128,3,224,224] f = self.conv1(f) f = self.bn1(f) f = self.relu(f) f = self.maxpool(f) f = self.layer1(f) f = self.layer2(f) f = self.layer3(f) f = self.layer4(f) f = self.avgpool(f) f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512] # MN_MODEL(first Level) vs.append(f) alphas.append(self.alpha(self.dropout(f))) vs_stack = torch.stack(vs, dim=2) alphas_stack = torch.stack(alphas, dim=2) if self.at_type == 'self-attention': vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2)) if self.at_type == 'self_relation-attention': vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2)) betas = [] for i in range(len(vs)): vs[i] = torch.cat([vs[i], vm1], dim=1) betas.append(self.beta(self.dropout(vs[i]))) cascadeVs_stack = torch.stack(vs, dim=2) betas_stack = torch.stack(betas, dim=2) output = cascadeVs_stack.mul(betas_stack * alphas_stack).sum(2).div((betas_stack * alphas_stack).sum(2)) if self.at_type == 'self-attention': vm1 = self.dropout(vm1) pred_score = self.pred_fc1(vm1) if self.at_type == 'self_relation-attention': output = self.dropout2(output) pred_score = self.pred_fc2(output) return pred_score if phrase == 'eval': if AT_level == 'first_level': f = self.conv1(x) f = self.bn1(f) f = self.relu(f) f = self.maxpool(f) f = self.layer1(f) f = self.layer2(f) f = self.layer3(f) f = self.layer4(f) f = self.avgpool(f) f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512] # MN_MODEL(first Level) alphas = self.alpha(self.dropout(f)) return f, alphas if AT_level == 'second_level': assert self.at_type == 'self_relation-attention' vms = index_matrix.permute(1, 0).mm(vm) # [381, 21783] -> [21783,381] * [381,512] --> [21783, 512] vs_cate = torch.cat([vectors, vms], dim=1) betas = self.beta(self.dropout(vs_cate)) ''' keywords: mean_fc ; weight_sourcefc; sum_alpha; weightmean_sourcefc ''' ''' alpha * beta ''' weight_catefc = vs_cate.mul(alphas_from1) # [21570,512] * [21570,1] --->[21570,512] alpha_beta = alphas_from1.mul(betas) sum_alphabetas = index_matrix.mm(alpha_beta) # [380,21570] * [21570,1] -> [380,1] weightmean_catefc = index_matrix.mm(weight_catefc).div(sum_alphabetas) weightmean_catefc = self.dropout2(weightmean_catefc) pred_score = self.pred_fc2(weightmean_catefc) return pred_score if AT_level == 'pred': if self.at_type == 'self-attention': pred_score = self.pred_fc1(self.dropout(vm)) return pred_score ''' self-attention; relation-attention ''' def resnet18_at(pretrained=False, **kwargs): # Constructs base a ResNet-18 model. model = ResNet_AT(BasicBlock, [2, 2, 2, 2], **kwargs) return model
34.674603
120
0.54509
7,958
0.910735
0
0
0
0
0
0
758
0.086748
91c59190736d04c98947f42fd90af017204111ac
505
py
Python
ndscheduler/server/handlers/index.py
symphonyrm/ndscheduler
e9a56ef345b25916a2b53d1ea3349efb532d63ce
[ "BSD-2-Clause" ]
null
null
null
ndscheduler/server/handlers/index.py
symphonyrm/ndscheduler
e9a56ef345b25916a2b53d1ea3349efb532d63ce
[ "BSD-2-Clause" ]
null
null
null
ndscheduler/server/handlers/index.py
symphonyrm/ndscheduler
e9a56ef345b25916a2b53d1ea3349efb532d63ce
[ "BSD-2-Clause" ]
null
null
null
"""Serves the single page app web ui.""" import json import tornado.gen from ndscheduler import settings from ndscheduler import utils from ndscheduler.server.handlers import base class Handler(base.BaseHandler): """Index page request handler.""" @tornado.gen.coroutine def get(self): """Serve up the single page app for scheduler dashboard.""" meta_info = utils.get_all_available_jobs() self.render(settings.APP_INDEX_PAGE, jobs_meta_info=json.dumps(meta_info))
25.25
82
0.732673
320
0.633663
0
0
244
0.483168
0
0
132
0.261386
91c6b0a778c821558e257de0d52e71c5f953c2bf
801
py
Python
Scripts/xbbtools/xbb_io.py
eoc21/biopython
c0f8db8f55a506837c320459957a0ce99b0618b6
[ "PostgreSQL" ]
3
2017-10-23T21:53:57.000Z
2019-09-23T05:14:12.000Z
Scripts/xbbtools/xbb_io.py
eoc21/biopython
c0f8db8f55a506837c320459957a0ce99b0618b6
[ "PostgreSQL" ]
null
null
null
Scripts/xbbtools/xbb_io.py
eoc21/biopython
c0f8db8f55a506837c320459957a0ce99b0618b6
[ "PostgreSQL" ]
6
2020-02-26T16:34:20.000Z
2020-03-04T15:34:00.000Z
#!/usr/bin/env python # Created: Wed Jun 21 13:46:35 2000 # Last changed: Time-stamp: <00/12/02 14:18:23 thomas> # Thomas.Sicheritz@molbio.uu.se, http://evolution.bmc.uu.se/~thomas # File: xbb_io.py import os, sys # os.system, sys.argv sys.path.insert(0, '.') sys.path.insert(0, os.path.expanduser('~thomas/cbs/python/biopython')) from Bio.ParserSupport import * from Bio import Fasta class xbb_io: def __init__(self): "" def error(self, str): print str def read_fasta_file(self, file): genes = [] iter = Fasta.Iterator(handle = open(file), parser = Fasta.RecordParser()) while 1: rec = iter.next() if not rec: break genes.append((rec.sequence, rec.title)) return genes
23.558824
81
0.604245
391
0.48814
0
0
0
0
0
0
250
0.31211
91c7653e4b544fa3638814ac8e321e91f01ca6d4
288
py
Python
HW6/Andrii_Haponov/cw_4.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
null
null
null
HW6/Andrii_Haponov/cw_4.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
null
null
null
HW6/Andrii_Haponov/cw_4.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
6
2022-02-22T22:30:49.000Z
2022-03-28T12:51:19.000Z
# Convert a Number to a String! # We need a function that can transform a number into a string. # What ways of achieving this do you know? def number_to_string(num: int) -> str: str_num = str(num) return str_num print(number_to_string(123)) print(type(number_to_string(123)))
24
63
0.729167
0
0
0
0
0
0
0
0
136
0.472222
91c7cf66ad6751a13ba5162d5a7e62b526efecd6
2,693
py
Python
project/scripts/clausecat/evaluate_clausecat.py
explosion/healthsea
4481488ed9fc85b89844ee872d0a8412a33f0b15
[ "MIT" ]
60
2021-12-15T17:14:37.000Z
2022-03-26T18:25:15.000Z
project/scripts/clausecat/evaluate_clausecat.py
zhinoos-adibi/healthsea
4481488ed9fc85b89844ee872d0a8412a33f0b15
[ "MIT" ]
3
2021-12-16T19:50:15.000Z
2022-03-28T06:10:48.000Z
project/scripts/clausecat/evaluate_clausecat.py
zhinoos-adibi/healthsea
4481488ed9fc85b89844ee872d0a8412a33f0b15
[ "MIT" ]
9
2021-12-15T21:00:05.000Z
2022-03-17T09:20:51.000Z
import spacy from spacy.scorer import PRFScore import typer from pathlib import Path from wasabi import Printer, table import operator import benepar import clausecat_component import clausecat_model import clausecat_reader import clause_segmentation import clause_aggregation msg = Printer() def main(model_path: Path, eval_path: Path): """This script is used to evaluate the clausecat component""" nlp = spacy.load(model_path) reader = clausecat_reader.ClausecatCorpus(eval_path) examples = reader(nlp) clausecat = nlp.get_pipe("clausecat") scorer = { "POSITIVE": PRFScore(), "NEGATIVE": PRFScore(), "NEUTRAL": PRFScore(), "ANAMNESIS": PRFScore(), } for i, example in enumerate(examples): prediction = example.predicted reference = example.reference # Prediction prediction = clausecat(prediction) # Iterate through prediction and references for pred_clause, ref_clause in zip(prediction._.clauses, reference._.clauses): prediction_cats = pred_clause["cats"] reference_cats = ref_clause["cats"] prediction_class = max(prediction_cats.items(), key=operator.itemgetter(1))[ 0 ] # Add to matrix for label in prediction_cats: if label != prediction_class: prediction = 0 else: prediction = 1 if prediction == 0 and reference_cats[label] != 0: scorer[label].fn += 1 elif prediction == 1 and reference_cats[label] != 1: scorer[label].fp += 1 elif prediction == 1 and reference_cats[label] == 1: scorer[label].tp += 1 # Printing textcat_data = [] avg_fscore = 0 avg_recall = 0 avg_precision = 0 for label in scorer: textcat_data.append( ( label, round(scorer[label].fscore, 2), round(scorer[label].recall, 2), round(scorer[label].precision, 2), ) ) avg_fscore += scorer[label].fscore avg_recall += scorer[label].recall avg_precision += scorer[label].precision textcat_data.append( ( "AVERAGE", round(avg_fscore / len(scorer), 2), round(avg_recall / len(scorer), 2), round(avg_precision / len(scorer), 2), ) ) header = ("Label", "F-Score", "Recall", "Precision") print(table(textcat_data, header=header, divider=True)) if __name__ == "__main__": typer.run(main)
26.93
88
0.580394
0
0
0
0
0
0
0
0
258
0.095804
91c7ef0594439547e88e45169d2cad470d31a591
130
py
Python
utils/test.py
david-waugh/network-automation
c85ab092cd9b76753c4d35f113126cfb663c1933
[ "MIT" ]
null
null
null
utils/test.py
david-waugh/network-automation
c85ab092cd9b76753c4d35f113126cfb663c1933
[ "MIT" ]
null
null
null
utils/test.py
david-waugh/network-automation
c85ab092cd9b76753c4d35f113126cfb663c1933
[ "MIT" ]
null
null
null
import pathlib print(pathlib.Path(__file__).parent.resolve()) while True: next_cmd = input("> ") print(eval(next_cmd))
14.444444
46
0.684615
0
0
0
0
0
0
0
0
4
0.030769
91c92b40c4f1e26399a0ff522ec30f406f0ff98d
934
py
Python
nlp_annotator_api/server/app.py
IBM/deepsearch-nlp-annotator-api-example
76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40
[ "Apache-2.0" ]
3
2022-01-04T12:15:22.000Z
2022-03-25T21:19:20.000Z
nlp_annotator_api/server/app.py
IBM/deepsearch-nlp-annotator-api-example
76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40
[ "Apache-2.0" ]
null
null
null
nlp_annotator_api/server/app.py
IBM/deepsearch-nlp-annotator-api-example
76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40
[ "Apache-2.0" ]
5
2021-09-27T08:26:09.000Z
2022-03-10T11:41:35.000Z
import logging import os import aiohttp.web from connexion import AioHttpApp from nlp_annotator_api.config.config import conf from nlp_annotator_api.config.logging import setup_logging from nlp_annotator_api.server.middleware.statsd_middleware import StatsdMiddleware from nlp_annotator_api.server.signals.statsd_client import statsd_client_factory setup_logging() access_log = logging.getLogger("nlp_annotator_api.access") _file_dir = os.path.dirname(__file__) app = AioHttpApp( __name__, specification_dir=os.path.join(_file_dir, "..", "resources", "schemas"), server_args=dict( client_max_size=8 * 1024**2 ) ) app.add_api("openapi.yaml", pass_context_arg_name="request") aiohttp_app: aiohttp.web.Application = app.app aiohttp_app.cleanup_ctx.append(statsd_client_factory(conf.statsd)) aiohttp_app.middlewares.append(StatsdMiddleware()) if __name__ == "__main__": app.run(access_log=access_log)
26.685714
86
0.799786
0
0
0
0
0
0
0
0
83
0.088865
91c97df0fae07bca6b5ed203a6e4102faddf3f12
4,534
py
Python
keras_cv_attention_models/resnest/resnest.py
dcleres/keras_cv_attention_models
264876673e369f23eff49b3b589b72f908a9625b
[ "MIT" ]
140
2021-08-04T06:51:41.000Z
2022-03-30T08:08:32.000Z
keras_cv_attention_models/resnest/resnest.py
dcleres/keras_cv_attention_models
264876673e369f23eff49b3b589b72f908a9625b
[ "MIT" ]
12
2021-09-29T00:43:58.000Z
2022-03-28T07:50:35.000Z
keras_cv_attention_models/resnest/resnest.py
dcleres/keras_cv_attention_models
264876673e369f23eff49b3b589b72f908a9625b
[ "MIT" ]
20
2021-09-28T20:07:35.000Z
2022-03-31T14:06:40.000Z
import tensorflow as tf from tensorflow import keras from tensorflow.keras import backend as K from keras_cv_attention_models.aotnet import AotNet from keras_cv_attention_models.download_and_load import reload_model_weights from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias PRETRAINED_DICT = { "resnest101": {"imagenet": "63f9ebdcd32529cbc4b4fbbec3d1bb2f"}, "resnest200": {"imagenet": "8e211dcb089b588e18d36ba7cdf92ef0"}, "resnest269": {"imagenet": "4309ed1b0a8ae92f2b1143dc3512c5c7"}, "resnest50": {"imagenet": "eee7b20a229821f730ab205b6afeb369"}, } def rsoftmax(inputs, groups): if groups > 1: nn = tf.reshape(inputs, [-1, 1, groups, inputs.shape[-1] // groups]) # nn = tf.transpose(nn, [0, 2, 1, 3]) nn = tf.nn.softmax(nn, axis=2) nn = tf.reshape(nn, [-1, 1, 1, inputs.shape[-1]]) else: nn = keras.layers.Activation("sigmoid")(inputs) return nn def split_attention_conv2d(inputs, filters, kernel_size=3, strides=1, downsample_first=False, groups=2, activation="relu", name=""): h_axis, w_axis = [2, 3] if K.image_data_format() == "channels_first" else [1, 2] in_channels = inputs.shape[-1] conv_strides = strides if downsample_first else 1 if groups == 1: logits = conv2d_no_bias(inputs, filters, kernel_size, strides=conv_strides, padding="same", name=name and name + "1_") else: # Using groups=2 is slow in `mixed_float16` policy # logits = conv2d_no_bias(inputs, filters * groups, kernel_size, padding="same", groups=groups, name=name and name + "1_") logits = [] splitted_inputs = tf.split(inputs, groups, axis=-1) for ii in range(groups): conv_name = name and name + "1_g{}_".format(ii + 1) logits.append(conv2d_no_bias(splitted_inputs[ii], filters, kernel_size, strides=conv_strides, padding="same", name=conv_name)) logits = tf.concat(logits, axis=-1) logits = batchnorm_with_activation(logits, activation=activation, name=name and name + "1_") if groups > 1: splited = tf.split(logits, groups, axis=-1) gap = tf.reduce_sum(splited, axis=0) else: gap = logits gap = tf.reduce_mean(gap, [h_axis, w_axis], keepdims=True) reduction_factor = 4 inter_channels = max(in_channels * groups // reduction_factor, 32) atten = keras.layers.Conv2D(inter_channels, kernel_size=1, name=name and name + "2_conv")(gap) atten = batchnorm_with_activation(atten, activation=activation, name=name and name + "2_") atten = keras.layers.Conv2D(filters * groups, kernel_size=1, name=name and name + "3_conv")(atten) atten = rsoftmax(atten, groups) out = keras.layers.Multiply()([atten, logits]) if groups > 1: out = tf.split(out, groups, axis=-1) out = tf.reduce_sum(out, axis=0) if not downsample_first and strides > 1: out = keras.layers.ZeroPadding2D(padding=1, name=name and name + "pool_pad")(out) out = keras.layers.AveragePooling2D(3, strides=2, name=name and name + "pool")(out) return out def ResNest(input_shape=(224, 224, 3), stem_type="deep", attn_types="sa", bn_after_attn=False, shortcut_type="avg", pretrained="imagenet", **kwargs): kwargs.pop("kwargs", None) model = AotNet(**locals(), **kwargs) reload_model_weights(model, pretrained_dict=PRETRAINED_DICT, sub_release="resnest", pretrained=pretrained) return model def ResNest50(input_shape=(224, 224, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs): return ResNest(num_blocks=[3, 4, 6, 3], stem_width=64, model_name="resnest50", **locals(), **kwargs) def ResNest101(input_shape=(256, 256, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs): return ResNest(num_blocks=[3, 4, 23, 3], stem_width=128, model_name="resnest101", **locals(), **kwargs) def ResNest200(input_shape=(320, 320, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs): return ResNest(num_blocks=[3, 24, 36, 3], stem_width=128, model_name="resnest200", **locals(), **kwargs) def ResNest269(input_shape=(416, 416, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs): return ResNest(num_blocks=[3, 30, 48, 8], stem_width=128, model_name="resnest269", **locals(), **kwargs)
50.377778
155
0.696074
0
0
0
0
0
0
0
0
718
0.158359
91c9ae32ffd6100ceb2a8fceee2c2c30ae4e7dc4
3,518
py
Python
dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py
brianherman/data-act-broker-backend
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
[ "CC0-1.0" ]
1
2019-06-22T21:53:16.000Z
2019-06-22T21:53:16.000Z
dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py
brianherman/data-act-broker-backend
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
[ "CC0-1.0" ]
3
2021-08-22T11:47:45.000Z
2022-03-29T22:06:49.000Z
dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py
brianherman/data-act-broker-backend
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
[ "CC0-1.0" ]
1
2020-07-17T23:50:56.000Z
2020-07-17T23:50:56.000Z
"""replace FileRequest with FileGeneration Revision ID: 8692ab1298e1 Revises: 4bbc47f2b48d Create Date: 2018-10-24 14:54:39.278159 """ # revision identifiers, used by Alembic. revision = '8692ab1298e1' down_revision = '4bbc47f2b48d' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(engine_name): globals()["upgrade_%s" % engine_name]() def downgrade(engine_name): globals()["downgrade_%s" % engine_name]() def upgrade_data_broker(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('file_generation', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('file_generation_id', sa.Integer(), nullable=False), sa.Column('request_date', sa.Date(), nullable=False), sa.Column('start_date', sa.Date(), nullable=False), sa.Column('end_date', sa.Date(), nullable=False), sa.Column('agency_code', sa.Text(), nullable=False), sa.Column('agency_type', sa.Enum('awarding', 'funding', name='generation_agency_types'), server_default='awarding', nullable=False), sa.Column('file_type', sa.Enum('D1', 'D2', name='generation_file_types'), server_default='D1', nullable=False), sa.Column('file_path', sa.Text(), nullable=True), sa.Column('is_cached_file', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('file_generation_id') ) op.create_index(op.f('ix_file_generation_agency_code'), 'file_generation', ['agency_code'], unique=False) op.create_index(op.f('ix_file_generation_agency_type'), 'file_generation', ['agency_type'], unique=False) op.create_index(op.f('ix_file_generation_end_date'), 'file_generation', ['end_date'], unique=False) op.create_index(op.f('ix_file_generation_file_type'), 'file_generation', ['file_type'], unique=False) op.create_index(op.f('ix_file_generation_request_date'), 'file_generation', ['request_date'], unique=False) op.create_index(op.f('ix_file_generation_start_date'), 'file_generation', ['start_date'], unique=False) op.add_column('job', sa.Column('file_generation_id', sa.Integer(), nullable=True)) op.create_foreign_key('fk_file_request_file_generation_id', 'job', 'file_generation', ['file_generation_id'], ['file_generation_id'], ondelete='SET NULL') op.drop_column('job', 'from_cached') # ### end Alembic commands ### def downgrade_data_broker(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('job', sa.Column('from_cached', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False)) op.drop_constraint('fk_file_request_file_generation_id', 'job', type_='foreignkey') op.drop_column('job', 'file_generation_id') op.drop_index(op.f('ix_file_generation_start_date'), table_name='file_generation') op.drop_index(op.f('ix_file_generation_request_date'), table_name='file_generation') op.drop_index(op.f('ix_file_generation_file_type'), table_name='file_generation') op.drop_index(op.f('ix_file_generation_end_date'), table_name='file_generation') op.drop_index(op.f('ix_file_generation_agency_type'), table_name='file_generation') op.drop_index(op.f('ix_file_generation_agency_code'), table_name='file_generation') op.drop_table('file_generation') op.execute(""" DROP TYPE generation_agency_types """) op.execute(""" DROP TYPE generation_file_types """) # ### end Alembic commands ###
45.102564
158
0.726549
0
0
0
0
0
0
0
0
1,708
0.485503
91cb094ac7602563246a111f9c1326b917365ed1
10,652
py
Python
cluster.py
Birfy/Endlinking
cc87a5528498e1733111d302437aeb1142b0a47f
[ "MIT" ]
1
2020-02-20T03:46:10.000Z
2020-02-20T03:46:10.000Z
cluster.py
Birfy/Endlinking
cc87a5528498e1733111d302437aeb1142b0a47f
[ "MIT" ]
null
null
null
cluster.py
Birfy/Endlinking
cc87a5528498e1733111d302437aeb1142b0a47f
[ "MIT" ]
null
null
null
import numpy as np import random import sys chainlength = int(sys.argv[1]) dfname = sys.argv[2] outfl = 'result.data' cluster_size = int(sys.argv[3]) def readsize(dfname): with open(dfname, 'r') as df: lines = df.readlines() for line in lines: content = line.split() if content and content[-1] == 'xhi': return 2*float(content[1]) def readdata(dfname, chainlen): X=[] Xi=[] with open(dfname, 'r') as df: lines = df.readlines() for line in lines: content = line.split() if len(content) == 9: # print(content) if (int(content[0]) % chainlen == 0 or int(content[0]) % chainlen == 1) and int(content[2]) != 3 and int(content[2]) != 4 : X.append([float(content[i]) for i in range(3,6)]) Xi.append(int(content[0])) return np.array(X), np.array(Xi) def initmeans(n): M=[] for i in range(n): M.append([size*(random.random()-0.5),size*(random.random()-0.5),size*(random.random()-0.5)]) return np.array(M) def SetDistMat(X, means): distmat_dtype = [('key',int), ('dist',float)] distmat = np.empty((n,k),dtype=distmat_dtype) for i in range(n): distmat[i,:] = [(c[0], GetDist(X[i], c[1])) for c in enumerate(means)] distmat[i,:] = np.sort(distmat[i,:], order='dist') return distmat def GetDist(x, c): dist = np.linalg.norm(x-c-boxl*np.around((x-c)/boxl)) return dist def Get_plst(assigned, distmat, full): plst = [] for i in range(n): if (i not in assigned): j = 0 while j<k: if (not full[distmat[i,j][0]]): bestkey = distmat[i,j][0] mindist = distmat[i,j][1] break else: j += 1 for j in range(k-1,-1,-1): if (not full[distmat[i,j][0]]): maxdist = distmat[i,j][1] break plst.append((i, bestkey, maxdist-mindist)) plst.sort(key=lambda t:t[2]) return plst def InitialAssignment(distmat): clusters = {} full = np.zeros(k,dtype=bool) # a boolean array that records which clusters are full assigned = [] # a list of objects who has been assigned to a cluster plst = Get_plst(assigned, distmat, full) while (len(plst)): temp = plst.pop() try: if (len(clusters[temp[1]])<cluster_size): clusters[temp[1]].append(temp[0]) assigned.append(temp[0]) else: full[temp[1]] = True plst = Get_plst(assigned, distmat, full) except KeyError: clusters[temp[1]] = [temp[0]] assigned.append(temp[0]) return clusters def CalcMeans(X, oldmeans, clusters): means = np.zeros((k,3)) keys = sorted(clusters.keys()) for key in keys: for i in clusters[key]: means[key] += X[i]-boxl*np.around((X[i]-oldmeans[key])/boxl) means[key] /= len(clusters[key]) means[key] -= boxl*np.around(means[key]/boxl) return means def SortObj(X, clusters, means, distmat): objlst = [] # list of objects ordered in asceding delta of the current # assignment and the best possible alternate assignment keys = sorted(clusters.keys()) for key in keys: for i in clusters[key]: currdist = GetDist(X[i],means[key]) mindist = distmat[i,0][1] objlst.append((i, key, currdist-mindist)) objlst.sort(key=lambda t:t[2], reverse=True) return objlst def Transfer(obj, clufrom, cluto, clusters): clusters[clufrom].remove(obj) clusters[cluto].append(obj) return clusters def WriteResult(file, X, means, clusters): with open(file, 'w') as fl: # keys = sorted(clusters.keys()) # i = 1 # for key in keys: # for obj in clusters[key]: # fl.write("%d\t%d\t%f\t%f\t%f\t%d\n"\ # %(obj,Xi[obj], X[obj][0], X[obj][1], X[obj][2], key)) # i = i + 1 for c in enumerate(means): fl.write("%d\t%f\t%f\t%f"%(c[0], c[1][0], c[1][1], c[1][2])) for obj in clusters[c[0]]: fl.write("\t%d"%(Xi[obj])) fl.write('\n') # i = i + 1 return # This function will perform statistical analysis to the clustering results def ClusterStat(X, means, clusters): # Average distance between means means_avg = 0. for i in range(k-1): for j in range(i+1,k): means_avg += GetDist(means[i], means[j]) means_avg /= (k*(k-1)/2.) # Average distance between obj and mean in a cluster obj2mean_avg = np.zeros(k) # Variance of the distances between obj and mean in a cluster obj2mean_var = np.zeros(k) keys = sorted(clusters.keys()) for key in keys: for i in clusters[key]: obj2mean = GetDist(X[i], means[key]) obj2mean_avg[key] += obj2mean obj2mean_var[key] += obj2mean*obj2mean obj2mean_avg[key] /= len(clusters[key]) obj2mean_var[key] /= len(clusters[key]) obj2mean_var[key] = np.sqrt(obj2mean_var[key]) # Average within cluster distances between objects winclu_avg = np.zeros(k) # Average of within cluster distances of all clusters winclu_grandavg = 0. for key in keys: for i in clusters[key]: x = X[i] for j in clusters[key]: if j>i: winclu_avg[key] += GetDist(x, X[j]) s = len(clusters[key]) winclu_avg[key] /= (s*(s-1)/2) winclu_grandavg += winclu_avg[key] winclu_grandavg /= k # write the summary print("average distance among means: %f"%means_avg) #print("average distance from objects to the mean of a cluster:") #for i in range(k): # print("cluster %i: %f"%(i, obj2mean_avg[i])) #print("variance of distances from objects to the mean of a cluster:") #for i in range(k): # print("cluster %i: %f"%(i, obj2mean_var[i])) #print("within-cluster average distances:") #for i in range(k): # print("cluster %i: %f"%(i, winclu_avg[i])) print("grand average of within-cluster average distances: %f"%winclu_grandavg) return X, Xi = readdata(dfname, chainlength) size = readsize(dfname) boxl = np.array([size, size, size]) n = len(X) k = int(len(X)/cluster_size) # Set up the database of objects # X = readdata(dfname, chainlength) # Choose initial means with K-means means = initmeans(k) # Set up initial clusters distmat = SetDistMat(X, means) clusters = InitialAssignment(distmat) ## debug code #keys = sorted(clusters.keys()) #for key in keys: # print("cluster %i:"%key) # print(clusters[key]) ## end of debug # Iteration step for iter in range(100): active = 0 # indicate the number of transfers in the current iteration tranlst = (-1)*np.ones(k, dtype='int') # set up transfer list for each cluster # Compute the cluster means oldmeans = means.copy() means = CalcMeans(X, oldmeans, clusters) # Get statistics about the clustering #ClusterStat(X, means, clusters) ## debug code #print("old means:") #print(oldmeans) #print("new means:") #print(means) ## end of debug # For each object, compute the distances to the cluster means distmat = SetDistMat(X, means) # Sort objects based on the delta of the current assignment and the best # possible alternate assignment objlst = SortObj(X, clusters, means, distmat) ##debug code #print(objlst) ##return #end of debug # For each element by prioty: while (len(objlst)): (i, key, temp) = objlst.pop() obj2key = GetDist(X[i], means[key]) transferred = False #record if any transfering has occured to i if (key == distmat[i,0][0]): ##debug #print("%i is already the opt cluster for obj %i. no transfer"%(clu, i)) ##end of debug continue # For each other clusters by element gain: else: for j in range(k): clu = distmat[i,j][0] # the key of another cluster objgain = obj2key - distmat[i,j][1] # gain by transfering i from cluster key to clu if (clu==key): # already in the cluster continue if (len(clusters[clu]) < cluster_size): active += 1 transferred = True clusters = Transfer(i, key, clu, clusters) ##debug #print("cluster %i not full. transfer obj %i from cluster %i to it."%(clu, i, key)) ##end of debug break elif (tranlst[clu] != -1): # if the tranlst of another cluster is not empty # distance between the obj in the tranlst and the current cluster tran2key = GetDist(X[tranlst[clu]], means[key]) tran2clu = GetDist(X[tranlst[clu]], means[clu]) # gain by transfering the obj in tranlst from cluster clu to key trangain = tran2clu - tran2key if (objgain + trangain > 0): # transfer if the sum of gains are positive, ie net gain active += 2 transferred = True clusters = Transfer(i, key, clu, clusters) clusters = Transfer(tranlst[clu], clu, key, clusters) ##debug #print("obj %i is transfered from cluster %i to %i"%(i, key, clu)) #print("obj %i is transfered from cluster %i to %i"%(tranlst[clu], clu, key)) #print("objgain: %f, trangain: %f"%(objgain, trangain)) ##end of debug tranlst[clu] = -1 # reset the tranlst to empty break if (not transferred): tranlst[key] = i ##debug #print("add obj %i in cluster %i to the transfer list"%(i, key)) ##end of debug # nothing is transferred during this iteration, return the clustering result if (not active): break #debug code print("number of transfers in iter %i: %i\n"%(iter+1, active)) #end of debug print("K-means clustering converged in %d iterations!\n"%(iter+1)) # Output the clustering results WriteResult(outfl, X, means, clusters) ClusterStat(X, means, clusters) # print(X)
36.986111
135
0.557548
0
0
0
0
0
0
0
0
3,334
0.312993
91cb09a3e92988e65a39aed7bb0bc23d1f6a9538
20,537
py
Python
util/hierarchical_primitive/cube_inclusion.py
isunchy/cuboid_abstraction
afda6ca8516c2f5e5e7292b3b22a059a4f6c84ec
[ "MIT" ]
43
2019-09-20T07:45:08.000Z
2022-03-23T04:07:21.000Z
util/hierarchical_primitive/cube_inclusion.py
SilenKZYoung/cuboid_abstraction
afda6ca8516c2f5e5e7292b3b22a059a4f6c84ec
[ "MIT" ]
4
2019-11-25T00:57:10.000Z
2021-09-02T10:59:05.000Z
util/hierarchical_primitive/cube_inclusion.py
SilenKZYoung/cuboid_abstraction
afda6ca8516c2f5e5e7292b3b22a059a4f6c84ec
[ "MIT" ]
10
2019-09-10T02:19:47.000Z
2021-06-16T05:23:43.000Z
import numpy as np import quaternion sample_points = np.array([[-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], [-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], [-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0] ], dtype=np.float32) # [3, n] sample_points = np.transpose(sample_points) # [n, 3] def cube_inclusion(cube_param_1, cube_param_2): n_cube_1 = cube_param_1['z'].shape[0] # child n_cube_2 = cube_param_2['z'].shape[0] # parent assert(n_cube_1 > n_cube_2) assert(cube_param_1['q'].shape[0] == cube_param_1['t'].shape[0] == n_cube_1) assert(cube_param_2['q'].shape[0] == cube_param_2['t'].shape[0] == n_cube_2) n_point = sample_points.shape[0] cube_cube_distance = np.zeros([n_cube_1, n_cube_2]) for i in range(n_cube_1): z1, q1, t1 = [cube_param_1[v][i] for v in ['z', 'q', 't']] for j in range(n_cube_2): z2, q2, t2 = [cube_param_2[v][j] for v in ['z', 'q', 't']] points = sample_points * z1 rot1 = np.quaternion(q1[0], q1[1], q1[2], q1[3]) rot1 = quaternion.as_rotation_matrix(rot1) points = np.transpose(np.matmul(rot1, np.transpose(points))) points += t1 points -= t2 rot2 = np.quaternion(q2[0], q2[1], q2[2], q2[3]).conjugate() rot2 = quaternion.as_rotation_matrix(rot2) points = np.transpose(np.matmul(rot2, np.transpose(points))) distance = np.mean(np.sum(np.maximum(abs(points) - z2, 0)**2, axis=1)) cube_cube_distance[i, j] = distance index = np.argmin(cube_cube_distance, axis=1) return index def generate_sample_cube_points(resulution=11): sample_points = np.zeros([resulution, resulution, resulution, 3], dtype=np.float32) location_template = np.linspace(-1.0, 1.0, num=11) for i in range(resulution): for j in range(resulution): for k in range(resulution): sample_points[i, j, k, 0] = location_template[i] sample_points[i, j, k, 1] = location_template[j] sample_points[i, j, k, 2] = location_template[k] np.savetxt('sample_points.txt', np.transpose(np.reshape(sample_points, [-1, 3])), fmt='%1.1f', delimiter=',') if __name__ == '__main__': # generate_sample_cube_points() z1 = np.array([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.1, 0.1, 0.1]]) q1 = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]) t1 = np.array([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.4, 0.4, 0.4]]) cube_param_1 = {'z': z1, 'q': q1, 't': t1} z2 = np.array([[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]]) q2 = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]) t2 = np.array([[0.2, 0.2, 0.2], [0.3, 0.3, 0.3]]) cube_param_2 = {'z': z2, 'q': q2, 't': t2} index = cube_inclusion(cube_param_1, cube_param_2) print(index) assert((index == np.array([0, 0, 1])).all())
315.953846
5,958
0.466378
0
0
0
0
0
0
0
0
160
0.007791
91cc3e617eabbbaa426a11dc2dc6c376ad5cab95
740
py
Python
ituro/accounts/tests.py
kayduemre/ituro
eb5bb0655c2d85eed212d28c1d154006c57a4f03
[ "MIT" ]
9
2015-03-18T01:59:24.000Z
2022-03-09T06:36:21.000Z
ituro/accounts/tests.py
kayduemre/ituro
eb5bb0655c2d85eed212d28c1d154006c57a4f03
[ "MIT" ]
29
2015-03-18T01:59:49.000Z
2021-06-10T20:39:03.000Z
ituro/accounts/tests.py
kayduemre/ituro
eb5bb0655c2d85eed212d28c1d154006c57a4f03
[ "MIT" ]
10
2016-01-31T05:44:46.000Z
2019-10-15T06:12:27.000Z
from django.test import TestCase from django.utils import timezone from accounts.models import CustomUser, CustomUserManager class UserCreateTestCase(TestCase): def test_create_user_correctly(self): "Creating users correctly" new_user = CustomUser.objects.create( email="participant@gmail.com", name="Participant Name", phone="09876543210", school="Some University", is_staff="False", is_active="True", date_joined=timezone.now()) self.assertTrue(isinstance(new_user, CustomUser)) self.assertEqual(new_user.get_full_name(), "Participant Name") self.assertEqual(new_user.get_short_name(), "Participant Name")
33.636364
71
0.671622
612
0.827027
0
0
0
0
0
0
146
0.197297
91ce005123b48bec43dd6a96411c6f2b6ba102be
2,284
py
Python
continuum/datasets/dtd.py
oleksost/continuum
682d66540bfbfa171ac73281ed2989f9338e88bf
[ "MIT" ]
282
2020-05-09T21:35:22.000Z
2022-03-20T11:29:41.000Z
continuum/datasets/dtd.py
oleksost/continuum
682d66540bfbfa171ac73281ed2989f9338e88bf
[ "MIT" ]
180
2020-05-03T09:31:48.000Z
2022-03-30T12:12:48.000Z
continuum/datasets/dtd.py
oleksost/continuum
682d66540bfbfa171ac73281ed2989f9338e88bf
[ "MIT" ]
34
2020-06-13T14:09:29.000Z
2022-03-14T14:05:07.000Z
import os from typing import List import numpy as np from torchvision import datasets as torchdata from continuum.datasets import ImageFolderDataset from continuum import download from continuum.tasks import TaskType class DTD(ImageFolderDataset): """Describable Textures Dataset (DTD) Reference: * Describing Textures in the Wild M. Cimpoi and S. Maji and I. Kokkinos and S. Mohamed and and A. Vedaldi CVPR 2014 """ url = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz" def __init__(self, data_path: str, train: bool = True, download: bool = True, split: int = 1): super().__init__(data_path=data_path, train=train, download=download, data_type=TaskType.IMAGE_PATH) if not (1 <= int(split) <= 10): raise ValueError(f"Available splits are [1, ..., 10], not {split}") self.split = split def _download(self): archive_path = os.path.join(self.data_path, "dtd-r1.0.1.tar.gz") if not os.path.exists(archive_path): print("Downloading DTD dataset...") download.download(self.url, self.data_path) if not os.path.exists(os.path.join(self.data_path, "dtd")): print("Uncompressing images...") download.untar(archive_path) def get_data(self): x, y, t = self._format(torchdata.ImageFolder(os.path.join(self.data_path, "dtd", "images")).imgs) if self.train: index_files = [ os.path.join(self.data_path, "dtd", "labels", f"train{str(self.split)}.txt"), os.path.join(self.data_path, "dtd", "labels", f"val{str(self.split)}.txt") ] else: index_files = [ os.path.join(self.data_path, "dtd", "labels", f"test{str(self.split)}.txt") ] valid_paths = set() for index_file in index_files: with open(index_file) as f: valid_paths.update( map(lambda p: os.path.join(self.data_path, "dtd", "images", p.strip()), f.readlines() ) ) valid_paths = np.array(list(valid_paths)) indexes = np.isin(x, valid_paths) return x[indexes], y[indexes], None
35.6875
108
0.595009
2,061
0.902364
0
0
0
0
0
0
550
0.240806
91ce047cf63bd3235780b724cb14faa1d2a5cf51
1,732
py
Python
src/tests/testdata.py
Doometnick/MaxiMin-2048
f1d795ec07fffe1aa239c105cf522d2c3bc9b011
[ "MIT" ]
null
null
null
src/tests/testdata.py
Doometnick/MaxiMin-2048
f1d795ec07fffe1aa239c105cf522d2c3bc9b011
[ "MIT" ]
null
null
null
src/tests/testdata.py
Doometnick/MaxiMin-2048
f1d795ec07fffe1aa239c105cf522d2c3bc9b011
[ "MIT" ]
null
null
null
from board import Direction # Tuples of input, action, expected output. moving_tests = [ ( [[0,0,0,0], [4,0,0,0], [0,0,0,0], [4,0,2,0]], Direction.UP, [[8,0,2,0], [0,0,0,0], [0,0,0,0], [0,0,0,0]] ), ( [[0,0,0,0], [4,0,0,0], [0,0,0,0], [4,0,2,0]], Direction.DOWN, [[0,0,0,0], [0,0,0,0], [0,0,0,0], [8,0,2,0]] ), ( [[0,0,0,0], [4,0,0,0], [0,0,0,0], [4,0,2,0]], Direction.LEFT, [[0,0,0,0], [4,0,0,0], [0,0,0,0], [4,2,0,0]] ), ( [[0,0,0,0], [4,0,0,0], [0,0,0,0], [4,0,2,0]], Direction.RIGHT, [[0,0,0,0], [0,0,0,4], [0,0,0,0], [0,0,4,2]] ), ( [[4,4,4,4], [8,0,8,4], [32,16,0,16], [16,8,2,4]], Direction.RIGHT, [[0,0,8,8], [0,0,16,4], [0,0,32,32], [16,8,2,4]] ), ( [[4,4,4,4], [8,0,8,4], [32,16,0,16], [16,8,2,4]], Direction.LEFT, [[8,8,0,0], [16,4,0,0], [32,32,0,0], [16,8,2,4]] ), ( [[4,4,4,4], [8,0,8,4], [32,16,0,16], [16,8,2,4]], Direction.UP, [[4,4,4,8], [8,16,8,16], [32,8,2,4], [16,0,0,0]] ), ( [[4,4,4,4], [8,0,8,4], [32,16,0,16], [16,8,2,4]], Direction.DOWN, [[4,0,0,0], [8,4,4,8], [32,16,8,16], [16,8,2,4]] ) ]
18.623656
43
0.265012
0
0
0
0
0
0
0
0
43
0.024827
91ce29247f546090ea7272eb8cba1493be43a9a9
449
py
Python
test/utils/test_value.py
HansBug/pji
449d171cea0c03f4c302da886988f36f70e34ee6
[ "Apache-2.0" ]
null
null
null
test/utils/test_value.py
HansBug/pji
449d171cea0c03f4c302da886988f36f70e34ee6
[ "Apache-2.0" ]
null
null
null
test/utils/test_value.py
HansBug/pji
449d171cea0c03f4c302da886988f36f70e34ee6
[ "Apache-2.0" ]
null
null
null
import pytest from pji.utils import ValueProxy @pytest.mark.unittest class TestUtilsValue: def test_value_proxy_init(self): value = ValueProxy() assert value.value is None value = ValueProxy(233) assert value.value == 233 def test_value_proxy_set(self): value = ValueProxy() value.value = 233 assert value.value == 233 value.value = -27 assert value.value == -27
20.409091
36
0.63029
376
0.837416
0
0
398
0.886414
0
0
0
0
91d00c668e9c3c29e1e078f088b136cfebc103ca
1,727
py
Python
intro.py
Ebenazer-2002/library-management
8c1ededc7167d2221a3947abfeec4773da39dca9
[ "Apache-2.0" ]
null
null
null
intro.py
Ebenazer-2002/library-management
8c1ededc7167d2221a3947abfeec4773da39dca9
[ "Apache-2.0" ]
null
null
null
intro.py
Ebenazer-2002/library-management
8c1ededc7167d2221a3947abfeec4773da39dca9
[ "Apache-2.0" ]
1
2021-09-22T22:08:15.000Z
2021-09-22T22:08:15.000Z
#Intro Page from tkinter import * from PIL import Image, ImageTk import cv2 #----------------------------Start Function--------------------------# def start(event): label1.destroy() import log win.destroy() log.main() #------------------------Main Window---------------------------------#li def main_window(): global win global label1 win = Tk() win.title('Library Management System') win.iconbitmap("images/main_icon.ico") win.bind('<Key>', start) # start function on pressing any key win.state('zoomed') # opens video cap = cv2.VideoCapture("images/vid.MP4") global n n = 0 #----------------------------------------------------------------- # defining show function def show(): global n # frame count n = n+1 if n <= 30: rest, frame = cap.read() cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) img = Image.fromarray(cv2image).resize((1600, 850)) imgtk = ImageTk.PhotoImage(image=img) label1.imgtk = imgtk label1.configure(image=imgtk) win.after(10, show) else: label1.destroy() frm = Frame(win, bg='black') frm.place(relx=0, rely=0, relwidth=1, relheight=1) label = Label(frm, text='Press any Key to continue', bg='black', fg='white') label.place(relx=0.45, rely=0.5) #----------------------------------------------------------------- label1 = Label(win) label1.place(relx=0, rely=0, relheight=1, relwidth=1) show() win.mainloop() #----------------------------------------------------------------- main_window()
28.311475
72
0.466126
0
0
0
0
0
0
0
0
565
0.327157
91d02ed15b88e5d9e5da4c1c6b0a923344ec181d
16,740
py
Python
notebooks/week4_help.py
hugh9876/04-multivariate-analysis
0541962842df8844aa323c368f8a4e44999c2d7f
[ "MIT" ]
null
null
null
notebooks/week4_help.py
hugh9876/04-multivariate-analysis
0541962842df8844aa323c368f8a4e44999c2d7f
[ "MIT" ]
null
null
null
notebooks/week4_help.py
hugh9876/04-multivariate-analysis
0541962842df8844aa323c368f8a4e44999c2d7f
[ "MIT" ]
null
null
null
""" This module provides helper functions to support exercises during AM1 with outliers, robust regression and template regression in the CORE data analytics workshop series, week 4. """ import numpy as np import pandas as pd import math from collections import namedtuple def recovery_sulphur_dataframe_with_outliers(outlier_probability): """Return dataframe representing recovery as a function of sulphur. Parameters: ---------- outlier_probability: This floating point parameter should range between 0 and 1 and is probability of an observation being an outlier. Returns: ------- Pandas dataframe: A dataframe is returned with two series, the first being observed recovery, and the second being sulphur %. The data may be sampled from the true underlying relationship, plus gaussian noise, or may be an outlier value taken from a non-gaussian distribution. The proportion of outliers to non-outliers will depend on the outlier_probability parameter. """ # Check that the outlier_probability is an ordinary number. assert isinstance(outlier_probability, (float, int)) # As it's a probability, ensure that it ranges between 0 and 1. assert outlier_probability >= 0.0 assert outlier_probability <= 1.0 # If no exceptions have been thrown then we likely have a valid input. # Get 50 pairs of sulphur features and recovery labels sulphur_percent = _draw_sulphur_observations(50) recovery_percent = _observe_recovery(sulphur_percent, outlier_probability) return pd.DataFrame({'metal_recovery_percent': recovery_percent, 'feed_sulphur_percent': sulphur_percent}) def _initialise_randomstate(seed): """ Use RandomState object with seed set.""" return np.random.RandomState(seed) def _draw_sulphur_observations(count): rs = _initialise_randomstate(7) # draw "count" sulphur observations from a uniform distribution of # sulphur percentages between 0.15% and 1.35% sulphur_percent = rs.uniform(0.15, 1.35, count) return sulphur_percent def _draw_dilithium_observations(count): rs = _initialise_randomstate(8) return rs.uniform(25, 35, count) def _draw_kryptonite_observations(count): rs = _initialise_randomstate(9) return rs.uniform(20, 25, count) def _draw_unobtainium_observations(count): rs = _initialise_randomstate(10) return rs.uniform(0, 7, count) def _draw_quartz_observations(count): rs = _initialise_randomstate(11) return rs.uniform(25, 35, count) def _observe_recovery(sulphur_percent, outlier_probability): """Returns an array of metal recoveries. This method returns an array of metal recoveries given both an array of sulphur percentages and the probability of an outlier being observed. """ recovery_percent = np.zeros_like(sulphur_percent) is_outlier = _is_outlier(outlier_probability, len(sulphur_percent)) for index in range(0, len(recovery_percent)): if is_outlier[index]: recovery_percent [index]= _return_outlier_model_of_recovery(sulphur_percent[index]) else: recovery_percent [index]=_noise_free_model_of_recovery(sulphur_percent[index]) return recovery_percent def _noise_free_model_of_recovery(sulphur): """This method returns a metal recovery for a given sulphur %.""" return 74.81 - 6.81/sulphur def _return_outlier_model_of_recovery(sulphur): return (74.81 - 6.81/sulphur)/3 def _is_outlier(outlier_probability, how_many): """Return true/false numpy array """ rs = _initialise_randomstate(5) uniformly_distributed = rs.uniform(0, 1, how_many) is_outlier = np.zeros_like(uniformly_distributed) for index in range(0, len(is_outlier)): is_outlier[index]=uniformly_distributed[index]>(1-outlier_probability) return is_outlier def add_gaussian_noise(noise_free_input, mean, sigma): """Adds gaussian noise to vector, given mean and sigma """ bins = len(noise_free_input) noise = np.random.normal(mean, sigma, bins) return noise_free_input + noise def gaussian_fwhm_pdf(X, height, x_position, fwhm): """Returns guassian probability distribution function, given FWHM This computes a gaussian probability density function (pdf) given a Full Width at Half Maximum (FWHM) instead of standard deviation, and scales it by the height parameters. If the height is one, then the area of the guassian will also be unity, as required for a pdf, and for preserving area when used as an impulse response function in convolution operations. Note, this returns the function, it does not sample from the distribution. """ return gaussian_pdf(X, height, x_position, fwhm / (2 * math.sqrt(2 * math.log(2)))) def gaussian_pdf(X, area, x_position, standard_deviation): """Returns gaussian probability distribution function multiplied by area. This computes a gaussian with unit area and multiplies it by the area parameter. It is translated to be centered on x_position and has the width specified by standard_deviation. Unit area gaussians are used as probability distributions functions, and are also important in convolutions, as area of the convolution of two functions is the product of their areas. If it is important for the convolution to preserve area of a function when convolved with a gaussian then that gaussian needs to have unit area. Preserving area also implies conservation of energy in many physical models. It can be shown that the integral of the gaussian function is unity when the guassian's height is scaled as a function of standard_deviation as: height_scaling = 1/(standard_deviation*sqrt(2*pi)) So this function multiplies the height of the guassian by this factor and then multiplies this result by the area parameter that is passed in. If area parameter is 1, then the height of this gaussian with also be 1 for all standard deviations, otherwise the area will be set by the area parameter. The relationship between height and area, and the scaling of height by the second parameter below, will be made clearer by also studying the guassian function. """ return gaussian(X, area / (standard_deviation * math.sqrt(2 * math.pi)), x_position, standard_deviation) def gaussian(X, height, x_position, standard_deviation): """Return standard gaussian function This is the unnormalised gaussian function f(x)=height*exp(-(x-x_position)^2/(2*standard_deviation^2)) Parameters ---------- height: This is the maximum of the gaussian peak. This function does not normalise to constant area, the caller must do this if this is what they want. x_position: This is the x position of the centre of the gaussian. If the guassian is being used to apply the impulse response of an instrument applied to an XRD reflection, then this will be the two-theta position of the peak. standard_deviation: The standard deviation of the guassian curve. If this function is being applied in spectroscopy, optics or electrical engineering, it is common for gaussians to be defined in terms of Full Width at Half Maximum (FWHM), which is the width of the peak when the height drops to half of the peak height, specified by the height parameter. If the x-axis represents frequency, and the function height is proportional to energy or power, then this will be the gaussian's bandwidth, that is, the width between the -3db points. To convert from FWHM to standard deviation use the relationship: FWHM = 2*sqrt(2*log(2)) * standard_deviation Returns ------- double: Evaluated gaussian function. """ return height * math.e**(-(X - x_position)**2 / 2 / standard_deviation**2) class MultichannelXAxis: """Set up an X axis for isntrument This object is set up with three inputs, min_x is the minimum value on the axis. In the example I've chosen 5. The max_x value is the highest value on the x axis, and spacing is the x spacing between channels. In the example I've chosen a max_x of 90 and spacing of 0.2. The unit is two-theta degrees, and this unit (and the axis values) come from the world of x-ray diffraction (XRD). We're describing the x-axis of a low resolution XRD instrument. The object's as_vector method can return the x_axis as an array of numbers using numpy's linspace method, which we've already used for plotting and other purposes. """ def __init__(self, min_x, max_x, spacing): self._min = min_x self._max = max_x self._spacing = spacing self._channel_count = \ round((self.max - self.min) / self.spacing + 1) self._label = "r'$2\theta$ (degrees)" @property def min(self): """Return minimum two-theta for diffractogram x-axis.""" return self._min @property def max(self): """Return maximum two-theta for diffractogram x-axis.""" return self._max @property def spacing(self): """Return channel spacing in two-theta for diffractogram x-axis.""" return self._spacing @property def channel_count(self): """Return the count of channels in this diffractogram.""" return self._channel_count @property def label(self): """Return the x-axis label, for use with plot and report generation.""" return self._label @property def as_vector(self): """Return a numpy vector containing two-theta values for each channel.""" x_axis_vector = np.linspace(self.min, self.max, self.channel_count) return x_axis_vector def _apply_convolution_kernals(x_axis_vector, intensity, two_theta_angle, instrument_broadening_fwhm, reflection_broadening_fwhm): """Apply gaussian kernel for instrument broadening only.""" def _add_gaussian_fwhms(fwhm1, fwhm2): sigma_fwhm_conversion_constant = 2*math.sqrt(2*math.log(2)) sigma_1 = fwhm1/sigma_fwhm_conversion_constant sigma_2 = fwhm2/sigma_fwhm_conversion_constant #squares of std_dev (ie sigma^2 which is variance) are additive sigma_summed = math.sqrt(sigma_1*sigma_1 + sigma_2*sigma_2) return sigma_summed*sigma_fwhm_conversion_constant fwhm = _add_gaussian_fwhms (instrument_broadening_fwhm, reflection_broadening_fwhm) return gaussian_fwhm_pdf(x_axis_vector, intensity, two_theta_angle, fwhm) def create_templates_matrix(): """Create templates for four test pure components. This creates templates for quartz, dilithium, kryptonite and unobtainium, in that order. The templates are returned in an array where the first column is quartz, and the last is unobtainium. If you plot them, you'll see gently varying squiggly lines. """ # Create a templates matrix containing space for four templates, plus # a column of ones. x_axis = MultichannelXAxis(5, 90, 0.2) template_count = 4 templates_matrix = np.zeros((x_axis.channel_count, template_count+1)) # set 4 two-theta units of instrument broadening instrument_broadening = 4 # create a tuple for each reflection, and add it to a list. The loop # then grabs each reflection from the list and then adds it to the # template. The first value in the tuple is intensity, the second # two-theta angle and the third is how much broadening to apply. Reflection = namedtuple('Reflection', ('intensity', 'two_theta', 'broadening')) quartz_reflections = [] quartz_reflections.append (Reflection(intensity=10.0, two_theta=25.0, broadening=3.0)) quartz_reflections.append (Reflection(13.0, 38.0, 6.0)) quartz_reflections.append (Reflection(10.0, 43.0, 2.0)) quartz_reflections.append (Reflection(25.0, 60, 2.0)) dilithium_reflections = [] dilithium_reflections.append (Reflection(25.0, 80, 1.0)) kryptonite_reflections = [] #kryptonite_reflections.append (Reflection(intensity=12.0, two_theta=25.0, broadening=9.0)) kryptonite_reflections.append (Reflection(17.0, 12.0, 1.0)) kryptonite_reflections.append (Reflection(19.0, 43.0, 12.0)) #kryptonite_reflections.append (Reflection(4.0, 70, 2.0)) #kryptonite_reflections.append (Reflection(32.0, 74, 2.0)) unobtainium_reflections = [] #unobtainium_reflections.append (Reflection(intensity=4.0, two_theta=25.0, broadening=12.0)) unobtainium_reflections.append (Reflection(5.0, 18.0, 6.0)) unobtainium_reflections.append (Reflection(1.0, 23.0, 1.0)) unobtainium_reflections.append (Reflection(5.0, 31.0, 2.0)) unobtainium_reflections.append (Reflection(3.0, 55.0, 6.0)) unobtainium_reflections.append (Reflection(7.0, 58.0, 1.0)) #unobtainium_reflections.append (Reflection(5.0, 80, 2.0)) phases=[] # create four phases phases.append(quartz_reflections) phases.append(dilithium_reflections) phases.append(kryptonite_reflections) phases.append(unobtainium_reflections) for phase_idx in range(0, template_count): for a_reflection in phases[phase_idx]: contribution_of_this_reflection = \ _apply_convolution_kernals( x_axis.as_vector, a_reflection.intensity, a_reflection.two_theta, instrument_broadening, a_reflection.broadening) templates_matrix[:, phase_idx] += \ contribution_of_this_reflection # set the last column to be all ones templates_matrix[:, template_count] = \ np.ones(x_axis.channel_count) return templates_matrix def create_composition_dataframe(observations_count): """Create a dataframe of observations of drilling samples Returns: Pandas DataFrame with observations_count observations. The dataframe has four columns representing the amount of quartz, dilithium, kryptonite and unobtainium present. These values are drawn from uniform distributions.""" unobtainium = _draw_unobtainium_observations (observations_count) dilithium = _draw_dilithium_observations(observations_count) kryptonite = _draw_kryptonite_observations(observations_count) quartz = _draw_quartz_observations(observations_count) # Create clusters by imposing a relationship between quartz # and dilithium. for observation_idx in range(0, observations_count): if quartz[observation_idx] > 30: dilithium[observation_idx] = 5 if dilithium[observation_idx] > 30: quartz[observation_idx] = 5 return pd.DataFrame({'Quartz': quartz, 'Dilithium': dilithium, 'Kryptonite': kryptonite, 'Unobtainium': unobtainium}) def create_observations(compositions_dataframe, templates): """Create a new array containing synthetic observations""" observations_count = len(compositions_dataframe) channels_count = len(templates[:,0]) observations_matrix = np.zeros((channels_count, observations_count)) for observation_idx in range (0, observations_count): observations_matrix[:, observation_idx] = \ templates[:,0]*compositions_dataframe['Quartz'][observation_idx] + \ templates[:,1]*compositions_dataframe['Dilithium'][observation_idx] + \ templates[:,2]*compositions_dataframe['Kryptonite'][observation_idx] + \ templates[:,3]*compositions_dataframe['Unobtainium'][observation_idx] # add gaussian noise. If you have time, try increasing this and watch # prediction performance fall over. observations_matrix[:, observation_idx] = \ add_gaussian_noise(observations_matrix[:, observation_idx], 10, 3) return observations_matrix
41.435644
97
0.683871
1,968
0.117563
0
0
891
0.053226
0
0
8,385
0.500896
91d0d1d94cdf45c4bcbd44fd68f0bba0ecae92c7
2,671
py
Python
tests/actions/test_mutable_token_action.py
0xOmarA/RadixLib
85d75a47d4c4df4c1a319b74857ae2c513933623
[ "MIT" ]
32
2022-01-12T16:52:28.000Z
2022-03-24T18:05:47.000Z
tests/actions/test_mutable_token_action.py
0xOmarA/RadixLib
85d75a47d4c4df4c1a319b74857ae2c513933623
[ "MIT" ]
3
2022-01-12T17:01:55.000Z
2022-02-12T15:14:16.000Z
tests/actions/test_mutable_token_action.py
0xOmarA/RadixLib
85d75a47d4c4df4c1a319b74857ae2c513933623
[ "MIT" ]
1
2022-01-21T04:28:07.000Z
2022-01-21T04:28:07.000Z
from radixlib.actions import CreateTokenDefinition from typing import Dict, Any import unittest class TestMutableTokenAction(unittest.TestCase): """ Unit tests for the CreateTokenDefinition action of mutable tokens """ ActionDict: Dict[str, Any] = { "token_properties": { "name": "MutableTest", "description": "An amazing new token with great utility!", "icon_url": "https://www.google.com/", "url": "https://www.google.com/", "symbol": "mutable", "is_supply_mutable": True, "granularity": "1", "owner": { "address": "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v" } }, "token_supply": { "value": "0", "token_identifier": { "rri": "mutable_tr1q06dd0ut3qmyp4pqkvmeu2dvkwg5f7vm8yeslwvpkt9qcl5vqu" } }, "type": "CreateTokenDefinition" } def test_from_dict(self): """ Tests the derivation of the mainnet wallet addresses from the public key """ # The action loaded from the dictionary creation: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict) # Asserting that the CreateTokenDefinition object understood the content of the dictionary self.assertEqual(creation.name, self.ActionDict['token_properties']['name']) self.assertEqual(creation.description, self.ActionDict['token_properties']['description']) self.assertEqual(creation.icon_url, self.ActionDict['token_properties']['icon_url']) self.assertEqual(creation.url, self.ActionDict['token_properties']['url']) self.assertEqual(creation.symbol, self.ActionDict['token_properties']['symbol']) self.assertEqual(creation.is_supply_mutable, self.ActionDict['token_properties']['is_supply_mutable']) self.assertEqual(creation.granularity, int(self.ActionDict['token_properties']['granularity'])) self.assertEqual(creation.owner.address, self.ActionDict['token_properties']['owner']['address']) self.assertEqual(creation.token_supply, int(self.ActionDict['token_supply']['value'])) self.assertEqual(creation.token_rri, self.ActionDict['token_supply']['token_identifier']['rri']) self.assertEqual(creation.to_account, None) def test_to_dict(self): """ Tests the conversion of the token account to a dictionary """ # The account loaded from the dictionary account: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict) self.assertEqual(account.to_dict(), self.ActionDict)
47.696429
110
0.675028
2,574
0.963684
0
0
0
0
0
0
1,110
0.415575
91d124efa1b2bf7e4e72928accf387408c43adc6
113
py
Python
src/tests/testModules/loadCfg_typeCasting/allowsCastFailKeeping/primativeTypes.py
Trimatix/carica
074be16bdf50541eb3ba92ca42d0ad901cc51bd0
[ "Apache-2.0" ]
5
2021-09-08T07:29:23.000Z
2021-11-24T00:18:22.000Z
src/tests/testModules/loadCfg_typeCasting/allowsCastFailKeeping/primativeTypes.py
Trimatix/Carica
074be16bdf50541eb3ba92ca42d0ad901cc51bd0
[ "Apache-2.0" ]
42
2021-09-08T07:31:25.000Z
2022-01-16T17:39:34.000Z
src/tests/testModules/loadCfg_typeCasting/allowsCastFailKeeping/primativeTypes.py
Trimatix/carica
074be16bdf50541eb3ba92ca42d0ad901cc51bd0
[ "Apache-2.0" ]
null
null
null
floatVar = 1.0 listVar = [3, "hello"] dictVar = { "myField": "value" } aotVar = [dictVar, dictVar] intVar = 1
16.142857
27
0.610619
0
0
0
0
0
0
0
0
23
0.20354
91d348a1fe2260f1d59725d0f07d7baf69518dae
22
py
Python
quacc/recipes/xtb/__init__.py
arosen93/HT-ASE
a76542e7a2bc5bf6e7382d8f1387374eb2abc713
[ "BSD-3-Clause-LBNL" ]
9
2022-02-08T08:31:30.000Z
2022-03-30T21:37:35.000Z
quacc/recipes/xtb/__init__.py
arosen93/HT-ASE
a76542e7a2bc5bf6e7382d8f1387374eb2abc713
[ "BSD-3-Clause-LBNL" ]
5
2022-02-02T21:47:59.000Z
2022-03-18T21:28:52.000Z
quacc/recipes/xtb/__init__.py
arosen93/HT-ASE
a76542e7a2bc5bf6e7382d8f1387374eb2abc713
[ "BSD-3-Clause-LBNL" ]
3
2022-02-23T12:00:57.000Z
2022-03-24T23:54:22.000Z
"""Recipes for xTB"""
11
21
0.590909
0
0
0
0
0
0
0
0
21
0.954545
91d380ce2b1e14c5b063e9056626bb2c1ea92f55
6,869
py
Python
src/python/pants/backend/native/subsystems/xcode_cli_tools.py
StephanErb/pants
a368267b6b4cf50138ba567f582409ed31bf5db9
[ "Apache-2.0" ]
null
null
null
src/python/pants/backend/native/subsystems/xcode_cli_tools.py
StephanErb/pants
a368267b6b4cf50138ba567f582409ed31bf5db9
[ "Apache-2.0" ]
null
null
null
src/python/pants/backend/native/subsystems/xcode_cli_tools.py
StephanErb/pants
a368267b6b4cf50138ba567f582409ed31bf5db9
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals import os from pants.backend.native.config.environment import Assembler, CCompiler, CppCompiler, Linker from pants.engine.rules import rule from pants.engine.selectors import Select from pants.subsystem.subsystem import Subsystem from pants.util.dirutil import is_readable_dir from pants.util.memo import memoized_method, memoized_property MIN_OSX_SUPPORTED_VERSION = '10.11' MIN_OSX_VERSION_ARG = '-mmacosx-version-min={}'.format(MIN_OSX_SUPPORTED_VERSION) class XCodeCLITools(Subsystem): """Subsystem to detect and provide the XCode command line developer tools. This subsystem exists to give a useful error message if the tools aren't installed, and because the install location may not be on the PATH when Pants is invoked. """ options_scope = 'xcode-cli-tools' _REQUIRED_FILES = { 'bin': [ 'as', 'cc', 'c++', 'clang', 'clang++', 'ld', 'lipo', ], # Any of the entries that would be here are not directly below the 'include' or 'lib' dirs, and # we haven't yet encountered an invalid XCode/CLI tools installation which has the include dirs, # but incorrect files. These would need to be updated if such an issue arises. 'include': [], 'lib': [], } INSTALL_PREFIXES_DEFAULT = [ # Prefer files from this installation directory, if available. This doesn't appear to be # populated with e.g. header files on travis. '/usr', # Populated by the XCode CLI tools. '/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr', # Populated by the XCode app. These are derived from using the -v or -H switches invoking the # osx clang compiler. '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/9.1.0', '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr', ] class XCodeToolsUnavailable(Exception): """Thrown if the XCode CLI tools could not be located.""" class XCodeToolsInvalid(Exception): """Thrown if a method within this subsystem requests a nonexistent tool.""" @classmethod def register_options(cls, register): super(XCodeCLITools, cls).register_options(register) register('--install-prefixes', type=list, default=cls.INSTALL_PREFIXES_DEFAULT, fingerprint=True, advanced=True, help='Locations to search for resources from the XCode CLI tools, including a ' 'compiler, linker, header files, and some libraries. ' 'Under this directory should be some selection of these subdirectories: {}.' .format(cls._REQUIRED_FILES.keys())) @memoized_property def _all_existing_install_prefixes(self): return [pfx for pfx in self.get_options().install_prefixes if is_readable_dir(pfx)] # NB: We use @memoized_method in this file for methods which may raise. @memoized_method def _get_existing_subdirs(self, subdir_name): maybe_subdirs = [os.path.join(pfx, subdir_name) for pfx in self._all_existing_install_prefixes] existing_dirs = [existing_dir for existing_dir in maybe_subdirs if is_readable_dir(existing_dir)] required_files_for_dir = self._REQUIRED_FILES.get(subdir_name) if required_files_for_dir: for fname in required_files_for_dir: found = False for subdir in existing_dirs: full_path = os.path.join(subdir, fname) if os.path.isfile(full_path): found = True continue if not found: raise self.XCodeToolsUnavailable( "File '{fname}' in subdirectory '{subdir_name}' does not exist at any of the specified " "prefixes. This file is required to build native code on this platform. You may need " "to install the XCode command line developer tools from the Mac App Store.\n\n" "If the XCode tools are installed and you are still seeing this message, please file " "an issue at https://github.com/pantsbuild/pants/issues/new describing your " "OSX environment and which file could not be found.\n" "The existing install prefixes were: {pfxs}. These can be extended with " "--{scope}-install-prefixes." .format(fname=fname, subdir_name=subdir_name, pfxs=self._all_existing_install_prefixes, scope=self.get_options_scope_equivalent_flag_component())) return existing_dirs @memoized_method def path_entries(self): return self._get_existing_subdirs('bin') @memoized_method def lib_dirs(self): return self._get_existing_subdirs('lib') @memoized_method def include_dirs(self): base_inc_dirs = self._get_existing_subdirs('include') all_inc_dirs = base_inc_dirs for d in base_inc_dirs: # TODO: figure out what this directory does and why it's not already found by this compiler. secure_inc_dir = os.path.join(d, 'secure') if is_readable_dir(secure_inc_dir): all_inc_dirs.append(secure_inc_dir) return all_inc_dirs @memoized_method def assembler(self): return Assembler( path_entries=self.path_entries(), exe_filename='as', library_dirs=[]) @memoized_method def linker(self): return Linker( path_entries=self.path_entries(), exe_filename='ld', library_dirs=[], linking_library_dirs=[], extra_args=[MIN_OSX_VERSION_ARG]) @memoized_method def c_compiler(self): return CCompiler( path_entries=self.path_entries(), exe_filename='clang', library_dirs=self.lib_dirs(), include_dirs=self.include_dirs(), extra_args=[MIN_OSX_VERSION_ARG]) @memoized_method def cpp_compiler(self): return CppCompiler( path_entries=self.path_entries(), exe_filename='clang++', library_dirs=self.lib_dirs(), include_dirs=self.include_dirs(), extra_args=[MIN_OSX_VERSION_ARG]) @rule(Assembler, [Select(XCodeCLITools)]) def get_assembler(xcode_cli_tools): return xcode_cli_tools.assembler() @rule(Linker, [Select(XCodeCLITools)]) def get_ld(xcode_cli_tools): return xcode_cli_tools.linker() @rule(CCompiler, [Select(XCodeCLITools)]) def get_clang(xcode_cli_tools): return xcode_cli_tools.c_compiler() @rule(CppCompiler, [Select(XCodeCLITools)]) def get_clang_plusplus(xcode_cli_tools): return xcode_cli_tools.cpp_compiler() def create_xcode_cli_tools_rules(): return [ get_assembler, get_ld, get_clang, get_clang_plusplus, ]
34.345
105
0.7084
5,587
0.813364
0
0
4,198
0.611152
0
0
2,540
0.369777
91d43878e8db19b2ac8a4228dcc70b222e3033cf
11,998
py
Python
improver_tests/regrid/test_RegridWithLandSeaMask.py
yzhaobom/improver
47f9e103c63f890bfbb24d5e08d9d01d041514f7
[ "BSD-3-Clause" ]
77
2017-04-26T07:47:40.000Z
2022-03-31T09:40:49.000Z
improver_tests/regrid/test_RegridWithLandSeaMask.py
yzhaobom/improver
47f9e103c63f890bfbb24d5e08d9d01d041514f7
[ "BSD-3-Clause" ]
1,440
2017-03-29T10:04:15.000Z
2022-03-28T10:11:29.000Z
improver_tests/regrid/test_RegridWithLandSeaMask.py
MoseleyS/improver
ca028e3a1c842e3ff00b188c8ea6eaedd0a07149
[ "BSD-3-Clause" ]
72
2017-03-17T16:53:45.000Z
2022-02-16T09:41:37.000Z
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Unit tests for the RegridWithLandSeaMask class""" # set up a special data set and corresponding land-sea mask info # set up target grid and its land-sea mask info # it is designed to cover different scenarios for regridding with land-sea # the regridding reference results are manually checked for different methods # not using "set_up_variable_cube" because of different spacing at lat/lon import numpy as np from improver.regrid.bilinear import basic_indexes from improver.regrid.grid import calculate_input_grid_spacing, latlon_from_cube from improver.regrid.landsea import RegridLandSea from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube def modify_cube_coordinate_value(cube, coord_x, coord_y): """modify x(longitude) & y(latitude) andcoordinates for a cube""" cube.coord(axis="x").points = coord_x cube.coord(axis="x").bounds = None cube.coord(axis="x").guess_bounds() cube.coord(axis="y").points = coord_y cube.coord(axis="y").bounds = None cube.coord(axis="y").guess_bounds() return cube def define_source_target_grid_data(): """ define cube_in, cube_in_mask,cube_out_mask using assumed data """ # source (input) grid in_lats = np.linspace(0, 15, 4) in_lons = np.linspace(0, 40, 5) # target (output) grid out_lats = np.linspace(0, 14, 8) out_lons = np.linspace(5, 35, 11) # assume a set of nwp data data = np.arange(20).reshape(4, 5).astype(np.float32) # input grid mask info in_mask = np.empty((4, 5), dtype=np.int) in_mask[:, :] = 1 in_mask[0, 2] = 0 in_mask[2, 2:4] = 0 in_mask[3, 2:4] = 0 # output grid mask info out_mask = np.empty((8, 11), dtype=np.int) out_mask[:, :] = 1 out_mask[0, 4:7] = 0 out_mask[1, 5] = 0 out_mask[5:9, 4:10] = 0 out_mask[6, 6] = 1 out_mask[7, 6] = 1 out_mask[1, 0] = 0 # create cube with default spacing cube_in = set_up_variable_cube(data, "air_temperature", "Celsius") cube_in_mask = set_up_variable_cube(in_mask, "Land_Binary_Mask", "1") cube_out_mask = set_up_variable_cube(out_mask, "Land_Binary_Mask", "1") # modify cube coordinates to the designed value cube_in = modify_cube_coordinate_value(cube_in, in_lons, in_lats) cube_in_mask = modify_cube_coordinate_value(cube_in_mask, in_lons, in_lats) cube_out_mask = modify_cube_coordinate_value(cube_out_mask, out_lons, out_lats) return cube_in, cube_out_mask, cube_in_mask def define_source_target_grid_data_same_domain(): """ define cube_in, cube_in_mask,cube_out_mask, assume the same domain """ # source (input) grid in_lats = np.linspace(0, 15, 4) in_lons = np.linspace(0, 40, 5) # target (output) grid out_lats = np.linspace(0, 15, 7) out_lons = np.linspace(5, 40, 9) # assume a set of nwp data data = np.arange(20).reshape(4, 5).astype(np.float32) # input grid mask info in_mask = np.empty((4, 5), dtype=np.int) in_mask[:, :] = 1 in_mask[0, 2] = 0 in_mask[2, 2:4] = 0 in_mask[3, 2:4] = 0 # output grid mask info out_mask = np.empty((7, 9), dtype=np.int) out_mask[:, :] = 1 out_mask[0, 3:6] = 0 out_mask[1, 4] = 0 out_mask[4:9, 4:8] = 0 out_mask[6, 6] = 1 out_mask[1, 0] = 0 # create cube with default spacing cube_in = set_up_variable_cube(data, "air_temperature", "Celsius") cube_in_mask = set_up_variable_cube(in_mask, "Land_Binary_Mask", "1") cube_out_mask = set_up_variable_cube(out_mask, "Land_Binary_Mask", "1") # modify cube coordinates to the designed value cube_in = modify_cube_coordinate_value(cube_in, in_lons, in_lats) cube_in_mask = modify_cube_coordinate_value(cube_in_mask, in_lons, in_lats) cube_out_mask = modify_cube_coordinate_value(cube_out_mask, out_lons, out_lats) return cube_in, cube_out_mask, cube_in_mask def test_basic_indexes(): """Test basic_indexes for identical source and target domain case """ cube_in, cube_out_mask, _ = define_source_target_grid_data_same_domain() in_latlons = latlon_from_cube(cube_in) out_latlons = latlon_from_cube(cube_out_mask) in_lons_size = cube_in.coord(axis="x").shape[0] lat_spacing, lon_spacing = calculate_input_grid_spacing(cube_in) indexes = basic_indexes( out_latlons, in_latlons, in_lons_size, lat_spacing, lon_spacing ) test_results = indexes[58:63, :] expected_results = np.array( [ [12, 17, 18, 13], [12, 17, 18, 13], [13, 18, 19, 14], [13, 18, 19, 14], [13, 18, 19, 14], ] ) np.testing.assert_array_equal(test_results, expected_results) def test_regrid_nearest_2(): """Test nearest neighbour regridding option 'nearest-2'""" cube_in, cube_out_mask, _ = define_source_target_grid_data() regrid_nearest = RegridLandSea(regrid_mode="nearest-2",)(cube_in, cube_out_mask) expected_results = np.array( [ [0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], [0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8], [10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13], [10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13], [10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13], [15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18], ] ) np.testing.assert_allclose(regrid_nearest.data, expected_results, atol=1e-3) def test_regrid_bilinear_2(): """Test bilinear regridding option 'bilinear-2'""" cube_in, cube_out_mask, _ = define_source_target_grid_data() regrid_bilinear = RegridLandSea(regrid_mode="bilinear-2",)(cube_in, cube_out_mask) expected_results = np.array( [ [0.5, 0.8, 1.1, 1.4, 1.7, 2.0, 2.3, 2.6, 2.9, 3.2, 3.5], [2.5, 2.8, 3.1, 3.4, 3.7, 4.0, 4.3, 4.6, 4.9, 5.2, 5.5], [4.5, 4.8, 5.1, 5.4, 5.7, 6.0, 6.3, 6.6, 6.9, 7.2, 7.5], [6.5, 6.8, 7.1, 7.4, 7.7, 8.0, 8.3, 8.6, 8.9, 9.2, 9.5], [8.5, 8.8, 9.1, 9.4, 9.7, 10.0, 10.3, 10.6, 10.9, 11.2, 11.5], [10.5, 10.8, 11.1, 11.4, 11.7, 12.0, 12.3, 12.6, 12.9, 13.2, 13.5], [12.5, 12.8, 13.1, 13.4, 13.7, 14.0, 14.3, 14.6, 14.9, 15.2, 15.5], [14.5, 14.8, 15.1, 15.4, 15.7, 16.0, 16.3, 16.6, 16.9, 17.2, 17.5], ] ) np.testing.assert_allclose(regrid_bilinear.data, expected_results, atol=1e-3) def test_regrid_nearest_with_mask_2(): """Test nearest-with-mask-2 regridding""" cube_in, cube_out_mask, cube_in_mask = define_source_target_grid_data() regrid_nearest_with_mask = RegridLandSea( regrid_mode="nearest-with-mask-2", landmask=cube_in_mask, landmask_vicinity=250000000, )(cube_in, cube_out_mask) expected_results = np.array( [ [0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], [0, 1, 1, 1, 7, 2, 7, 3, 3, 3, 3], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9], [10, 11, 11, 11, 7, 7, 7, 8, 8, 8, 14], [10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14], [10, 11, 11, 11, 12, 12, 7, 13, 13, 13, 14], [15, 16, 16, 16, 17, 17, 7, 18, 18, 18, 19], ] ) np.testing.assert_allclose( regrid_nearest_with_mask.data, expected_results, atol=1e-3 ) # consider constant field cube_in.data = np.repeat(1.0, 20).reshape(4, 5).astype(np.float32) regrid_nearest_with_mask = RegridLandSea( regrid_mode="nearest-with-mask-2", landmask=cube_in_mask, landmask_vicinity=250000000, )(cube_in, cube_out_mask) expected_results = np.repeat(1.0, 88).reshape(8, 11).astype(np.float32) np.testing.assert_allclose( regrid_nearest_with_mask.data, expected_results, atol=1e-3 ) def test_regrid_bilinear_with_mask_2(): """Test bilinear-with-mask-2 regridding """ cube_in, cube_out_mask, cube_in_mask = define_source_target_grid_data() regrid_bilinear_with_mask = RegridLandSea( regrid_mode="bilinear-with-mask-2", landmask=cube_in_mask, landmask_vicinity=250000000, )(cube_in, cube_out_mask) expected_results = np.array( [ [0.5, 0.8, 1.40096, 3.2916, 2.0, 2.0, 2.0, 4.94333, 3.25586, 3.2, 3.5], [2.5, 2.8, 3.1, 3.4, 5.48911, 2.76267, 6.32926, 4.6, 4.9, 5.2, 5.5], [4.5, 4.8, 5.1, 5.4, 5.7, 7.0154, 6.3, 6.6, 6.9, 7.2, 7.5], [6.5, 6.8, 7.1, 7.4, 7.7, 7.0, 7.19033, 7.6681, 7.6618, 9.2, 9.5], [ 8.5, 8.8, 9.1, 9.4, 8.10633, 7.0, 7.0, 7.62915, 7.21672, 9.11434, 10.52363, ], [ 10.5, 10.8, 11.00012, 11.01183, 13.15439, 12.0, 12.3, 12.6, 12.9, 13.71286, 15.74504, ], [ 12.5, 12.8, 12.23411, 13.25881, 14.14155, 14.0, 8.07328, 14.6, 14.9, 14.96332, 16.3334, ], [ 14.5, 14.8, 15.0997, 14.22659, 15.50905, 16.0, 9.8733, 16.6, 16.9, 16.91114, 17.03773, ], ] ) np.testing.assert_allclose( regrid_bilinear_with_mask.data, expected_results, atol=1e-3 ) # consider constant field cube_in.data = np.repeat(1.0, 20).reshape(4, 5).astype(np.float32) regrid_bilinear_with_mask = RegridLandSea( regrid_mode="bilinear-with-mask-2", landmask=cube_in_mask, landmask_vicinity=250000000, )(cube_in, cube_out_mask) expected_results = np.repeat(1.0, 88).reshape(8, 11).astype(np.float32) np.testing.assert_allclose( regrid_bilinear_with_mask.data, expected_results, atol=1e-3 )
35.602374
86
0.596516
0
0
0
0
0
0
0
0
3,196
0.266378
91d4aad729e6a3ae80ef7ec7692d7daf662bb479
1,127
py
Python
setup.py
garnaat/details
07f2fc7f27b29a6ddcda918abf6ae0882450319e
[ "Apache-2.0" ]
27
2015-03-01T10:54:32.000Z
2021-09-08T14:52:30.000Z
setup.py
garnaat/details
07f2fc7f27b29a6ddcda918abf6ae0882450319e
[ "Apache-2.0" ]
3
2015-01-29T08:26:13.000Z
2017-02-14T09:35:06.000Z
setup.py
garnaat/details
07f2fc7f27b29a6ddcda918abf6ae0882450319e
[ "Apache-2.0" ]
7
2015-03-26T13:53:34.000Z
2017-05-23T20:58:28.000Z
#!/usr/bin/env python from setuptools import setup, find_packages import os requires = [ ] setup( name='details', version=open(os.path.join('details', '_version')).read(), description='Tools for processing AWS detailed billing reports', long_description=open('README.md').read(), author='Mitch Garnaat', author_email='mitch@scopely.com', url='https://github.com/scopely-devops/details', packages=find_packages(exclude=['tests*']), package_dir={'details': 'details'}, install_requires=requires, license=open("LICENSE").read(), classifiers=( 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Natural Language :: English', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4' ), )
30.459459
68
0.632653
0
0
0
0
0
0
0
0
639
0.566992
91d5346576f73b2550ed5c3a87e027cb58449870
4,686
py
Python
beam_telescope_analysis/testing/test_kalman.py
YannickDieter/beam_telescope_analysis
0c678ad991a9ef42178b2eeaf58059d387362f2a
[ "MIT" ]
3
2019-03-14T09:28:43.000Z
2020-02-24T13:04:12.000Z
beam_telescope_analysis/testing/test_kalman.py
YannickDieter/beam_telescope_analysis
0c678ad991a9ef42178b2eeaf58059d387362f2a
[ "MIT" ]
14
2019-05-09T10:01:06.000Z
2021-05-20T12:52:46.000Z
beam_telescope_analysis/testing/test_kalman.py
YannickDieter/beam_telescope_analysis
0c678ad991a9ef42178b2eeaf58059d387362f2a
[ "MIT" ]
1
2019-09-07T12:06:35.000Z
2019-09-07T12:06:35.000Z
''' Script to check the correctness of the analysis. The analysis is done on raw data and all results are compared to a recorded analysis. ''' import os import unittest import numpy as np from beam_telescope_analysis import track_analysis from beam_telescope_analysis.tools import test_tools class TestTrackAnalysis(unittest.TestCase): @classmethod def setUpClass(cls): # virtual X server for plots under headless LINUX travis testing is needed if os.getenv('TRAVIS', False) and os.getenv('TRAVIS_OS_NAME', False) == 'linux': from xvfbwrapper import Xvfb # virtual X server for plots under headless LINUX travis testing is needed cls.vdisplay = Xvfb() cls.vdisplay.start() @classmethod def tearDownClass(cls): # Remove created files pass # os.remove(os.path.join(cls.output_folder, 'Tracks_merged.pdf')) def test_kalman(self): # pixel size of sensor pixel_size = np.array([(18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (250., 50.)]) pixel_resolution = pixel_size / np.sqrt(12) material_budget = np.array([100., 100., 100., 100., 100., 100., 250.]) / np.array([125390., 125390., 125390., 125390., 125390., 125390., 93700.]) prealignment = {'z': [0., 29900., 60300., 82100., 118700., 160700., 197800.]} kwargs = {'track_hits': np.array([[[-1229.22372954, 2828.19616302, 0., pixel_resolution[0][0], pixel_resolution[0][1], 0.], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], # [-1254.51224282, 2827.4291421, 29900.], [-1285.6117892, 2822.34536687, 60300., pixel_resolution[2][0], pixel_resolution[2][1], 0.], [-1311.31083616, 2823.56121414, 82100., pixel_resolution[3][0], pixel_resolution[3][1], 0.], [-1335.8529645, 2828.43359043, 118700., pixel_resolution[4][0], pixel_resolution[4][1], 0.], [-1357.81872222, 2840.86947964, 160700., pixel_resolution[5][0], pixel_resolution[5][1], 0.], [-1396.35698339, 2843.76799577, 197800., pixel_resolution[6][0], pixel_resolution[6][1], 0.]]]), 'dut_fit_selection': 61, 'z_positions': [[0., 29900, 60300, 82100, 118700, 160700, 197800]], 'alignment': [prealignment], 'use_prealignment': True, 'pixel_size': pixel_size, 'n_pixels': ((576, 1152), (576, 1152), (576, 1152), (576, 1152), (576, 1152), (576, 1152), (80, 336)), 'beam_energy': 2500., 'material_budget': material_budget, 'add_scattering_plane': False} # expected result array: (state estimates, chi, x error, y errors) result = [[[-1.23045812e+03, 2.82684464e+03, 0.00000000e+00, -9.54188957e-04, -5.78722777e-05, 9.99999543e-01], [-1.25900270e+03, 2.82511339e+03, 2.99000000e+04, -9.54667558e-04, -5.79013065e-05, 9.99999543e-01], [-1.28705254e+03, 2.82443254e+03, 6.03000000e+04, -9.22691847e-04, -2.23966180e-05, 9.99999574e-01], [-1.30575083e+03, 2.82550588e+03, 8.21000000e+04, -8.57719095e-04, 4.92360053e-05, 9.99999631e-01], [-1.33339390e+03, 2.83014572e+03, 1.18700000e+05, -7.55274948e-04, 1.26771487e-04, 9.99999707e-01], [-1.36192826e+03, 2.83782855e+03, 1.60700000e+05, -6.79389377e-04, 1.82924497e-04, 9.99999752e-01], [-1.38713361e+03, 2.84461505e+03, 1.97800000e+05, -6.79389377e-04, 1.82924497e-04, 9.99999752e-01]], [79.59176738400244], [3.62429044, 3.2884327, 3.21655702, 3.1539946, 3.23671172, 4.66501707, 8.62909928], [3.62429044, 3.2884327, 3.21655702, 3.1539946, 3.23671172, 4.66501707, 8.62909928]] for i in range(4): # test each return (state estimates, chi, x error, y errors) seperatly test = test_tools._call_function_with_args(function=track_analysis._fit_tracks_kalman_loop, **kwargs)[0][i] data_equal = np.allclose(test, result[i]) self.assertTrue(data_equal) if __name__ == '__main__': import logging logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s") suite = unittest.TestLoader().loadTestsFromTestCase(TestTrackAnalysis) unittest.TextTestRunner(verbosity=2).run(suite)
62.48
153
0.588135
4,084
0.871532
0
0
544
0.11609
0
0
843
0.179898
91d54e85fa9e683a691056ba3de4c8a49958c847
3,723
py
Python
test/test_workflow.py
asnramos/asv
8a0979b532d06c7c352826e2acf0dd872922260e
[ "BSD-3-Clause" ]
null
null
null
test/test_workflow.py
asnramos/asv
8a0979b532d06c7c352826e2acf0dd872922260e
[ "BSD-3-Clause" ]
null
null
null
test/test_workflow.py
asnramos/asv
8a0979b532d06c7c352826e2acf0dd872922260e
[ "BSD-3-Clause" ]
null
null
null
# Licensed under a 3-clause BSD style license - see LICENSE.rst import glob import os import sys import json from os.path import join, isfile import pytest from asv import util from . import tools def test_run_publish(capfd, basic_conf_2): tmpdir, local, conf, machine_file = basic_conf_2 tmpdir = util.long_path(tmpdir) conf.matrix = { "req": dict(conf.matrix), "env": {"SOME_TEST_VAR": ["1"]}, } # Tests a typical complete run/publish workflow ret = tools.run_asv_with_conf(conf, 'run', "master", '--steps=2', '--quick', '--show-stderr', '--profile', '-a', 'warmup_time=0', '--durations=5', _machine_file=machine_file) assert ret is None text, err = capfd.readouterr() assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5 assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2 assert 'asv: benchmark timed out (timeout 0.1s)' in text assert 'total duration' in text tools.run_asv_with_conf(conf, 'publish') assert isfile(join(tmpdir, 'html', 'index.html')) assert isfile(join(tmpdir, 'html', 'index.json')) assert isfile(join(tmpdir, 'html', 'asv.js')) assert isfile(join(tmpdir, 'html', 'asv.css')) # Check parameterized test json data format filename = glob.glob(join(tmpdir, 'html', 'graphs', 'arch-x86_64', 'asv_dummy_test_package_1', 'asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1], 'branch-master', 'cpu-Blazingly fast', 'env-SOME_TEST_VAR-1', 'machine-orangutan', 'os-GNU_Linux', 'python-*', 'ram-128GB', 'params_examples.time_skip.json'))[0] with open(filename, 'r') as fp: data = json.load(fp) assert len(data) == 2 assert isinstance(data[0][0], int) # revision assert len(data[0][1]) == 3 assert len(data[1][1]) == 3 assert isinstance(data[0][1][0], float) assert isinstance(data[0][1][1], float) assert data[0][1][2] is None # Check that the skip options work capfd.readouterr() tools.run_asv_with_conf(conf, 'run', "master", '--steps=2', '--quick', '--skip-existing-successful', '--bench=time_secondary.track_value', '--skip-existing-failed', _machine_file=join(tmpdir, 'asv-machine.json')) tools.run_asv_with_conf(conf, 'run', "master", '--steps=2', '--bench=time_secondary.track_value', '--quick', '--skip-existing-commits', _machine_file=join(tmpdir, 'asv-machine.json')) text, err = capfd.readouterr() assert 'Running benchmarks.' not in text # Check EXISTING and --environment work python = "{0[0]}.{0[1]}".format(sys.version_info) env_type = tools.get_default_environment_type(conf, python) env_spec = ("-E", env_type + ":" + python) tools.run_asv_with_conf(conf, 'run', "EXISTING", '--quick', '--bench=time_secondary.track_value', *env_spec, _machine_file=machine_file) # Remove the benchmarks.json file and check publish fails os.remove(join(tmpdir, "results_workflow", "benchmarks.json")) with pytest.raises(util.UserError): tools.run_asv_with_conf(conf, 'publish')
39.189474
85
0.552243
0
0
0
0
0
0
0
0
1,215
0.32635
91d673a77f43b00da4523b7edc231f25e64c3f72
5,750
py
Python
trainer.py
Metro1998/P-DQN
6ab2ac6991d2685f10887c16f854ebba6144b306
[ "MIT" ]
5
2021-12-13T15:25:07.000Z
2022-03-29T12:42:37.000Z
trainer.py
Metro1998/P-DQN
6ab2ac6991d2685f10887c16f854ebba6144b306
[ "MIT" ]
null
null
null
trainer.py
Metro1998/P-DQN
6ab2ac6991d2685f10887c16f854ebba6144b306
[ "MIT" ]
null
null
null
# @author Metro # @time 2021/11/24 import os.path import gym from agents.pdqn import P_DQN from utilities.memory import ReplayBuffer from utilities.utilities import * from utilities.route_generator import generate_routefile class Train_and_Evaluate(object): def __init__(self, config): # Environment generate_routefile(seed=config.seed, demand=config.demand) self.env = gym.make(config.environment) # Agent self.agent = P_DQN(config, self.env) # Memory self.replay_memory_size = config.hyperparameters['replay_memory_size'] self.batch_size = config.hyperparameters['batch_size'] self.updates_per_step = config.hyperparameters['updates_per_step'] self.memory = ReplayBuffer(self.replay_memory_size) self.total_steps = 0 self.total_updates = 0 self.save_freq = config.save_freq self.file_to_save = config.file_to_save self.maximum_episodes = config.hyperparameters['maximum_episodes'] self.train = config.train self.evaluate = config.evaluate self.evaluate_internal = config.evaluate_internal self.agent_to_color_dictionary = config.agent_to_color_dictionary self.standard_deviation_results = config.standard_deviation_results self.colors = ['red', 'blue', 'green', 'orange', 'yellow', 'purple'] self.color_idx = 0 self.rolling_score_window = config.rolling_score_window self.runs_per_agent = config.runs_per_agent self.agent_name = config.agent_name self.ceil = config.ceil # Training Loop def train_agent(self): """ :return: """ rolling_scores_for_diff_runs = [] file_to_save_actor = os.path.join(self.file_to_save, 'actor/') file_to_save_actor_param = os.path.join(self.file_to_save, 'actor_param/') file_to_save_runs = os.path.join(self.file_to_save, 'runs_1/') file_to_save_rolling_scores = os.path.join(self.file_to_save, 'rolling_scores/') os.makedirs(file_to_save_actor, exist_ok=True) os.makedirs(file_to_save_actor_param, exist_ok=True) os.makedirs(file_to_save_runs, exist_ok=True) os.makedirs(file_to_save_rolling_scores, exist_ok=True) for run in range(self.runs_per_agent): game_full_episodes_scores = [] game_full_episodes_rolling_scores = [] for i_episode in range(self.maximum_episodes): if self.save_freq > 0 and i_episode % self.save_freq == 0: actor_path = os.path.join(file_to_save_actor, 'episode{}'.format(i_episode)) actor_param_path = os.path.join(file_to_save_actor_param, 'episode{}'.format(i_episode)) self.agent.save_models(actor_path, actor_param_path) episode_score = [] episode_steps = 0 done = 0 state = self.env.reset() # n_steps while not done: if len(self.memory) > self.batch_size: action, action_params = self.agent.select_action(state, self.train) if self.ceil: action_params = np.ceil(action_params).squeeze(0) action_for_env = [action, int(action_params[action])] for i in range(self.updates_per_step): self.agent.update(self.memory) self.total_updates += 1 else: action_params = np.random.randint(low=10, high=31, size=8) action = np.random.randint(7, size=1)[0] action_for_env = [action, action_params[action]] next_state, reward, done, info = self.env.step(action_for_env) print(reward) episode_steps += 1 episode_score.append(info) self.total_steps += 1 self.memory.push(state, action, action_params, reward, next_state, done) state = next_state episode_score_so_far = np.mean(episode_score) game_full_episodes_scores.append(episode_score_so_far) game_full_episodes_rolling_scores.append( np.mean(game_full_episodes_scores[-1 * self.rolling_score_window:])) print("Episode: {}, total steps:{}, episode steps:{}, scores:{}".format( i_episode, self.total_steps, episode_steps, episode_score_so_far)) self.env.close() file_path_for_pic = os.path.join(file_to_save_runs, 'episode{}_run{}.jpg'.format(i_episode, run)) visualize_results_per_run(agent_results=game_full_episodes_scores, agent_name=self.agent_name, save_freq=1, file_path_for_pic=file_path_for_pic) rolling_scores_for_diff_runs.append(game_full_episodes_rolling_scores) file_path_for_pic = os.path.join(file_to_save_rolling_scores, 'rolling_scores.jpg') visualize_overall_agent_results(agent_results=rolling_scores_for_diff_runs, agent_name=self.agent_name, show_mean_and_std_range=True, agent_to_color_dictionary=self.agent_to_color_dictionary, standard_deviation_results=1, file_path_for_pic=file_path_for_pic )
42.592593
113
0.598609
5,522
0.960348
0
0
0
0
0
0
397
0.069043
91d7cad5b4e7e6fe780b392c22b198941b8e6380
10,434
py
Python
server/splunkdj/views.py
splunk/splunk-webframework
a4179558616f5f4fcbfa2b54e9179f30e6395264
[ "Apache-2.0" ]
31
2015-01-20T12:49:17.000Z
2022-02-21T05:21:44.000Z
server/splunkdj/views.py
splunk/splunk-webframework
a4179558616f5f4fcbfa2b54e9179f30e6395264
[ "Apache-2.0" ]
2
2015-07-08T19:40:41.000Z
2018-04-26T21:34:35.000Z
server/splunkdj/views.py
splunk/splunk-webframework
a4179558616f5f4fcbfa2b54e9179f30e6395264
[ "Apache-2.0" ]
8
2015-02-26T13:19:45.000Z
2022-03-27T08:34:20.000Z
import sys import pprint import json import datetime import uuid import urllib import types import traceback from django.core.urlresolvers import reverse, resolve from django.http import HttpResponseRedirect, Http404, HttpResponseServerError, HttpResponseNotFound from django.conf import settings from django.contrib.auth.decorators import login_required from django.views.decorators.cache import never_cache from django.views.debug import ExceptionReporter, get_safe_settings from django.template import TemplateDoesNotExist, Context from django.template.loader import render_to_string from django.utils.encoding import force_bytes from django.shortcuts import render from splunkdj.decorators.render import render_to from splunkdj.utility import make_splunkweb_url from urlparse import urlparse import logging logger = logging.getLogger('spl.django.service') error_logger = logging.getLogger('spl.django.request_error') def format(value): """ Format values appropriately for json.dumps: - Basic types will remain the same - Unicode will be converted to str - Everything else will be formatted using pprint """ if value is None: return value if isinstance(value, (int, long, str, float, list, dict, tuple, bool, unicode)): return value return str(pprint.pformat(value)) def get_exception_info(request): # We use Django's debug reporter, even though we are doing our own template. # This is because it has a great way of collecting all the useful info we # need, so no reason not to leverage it exc_info = sys.exc_info() reporter = ExceptionReporter(request, *exc_info) ctx = reporter.get_traceback_data() # This is a refactor of what the technical_500_template contains, just # doing the logic in Python rather than in a template. We collect all this # information so that we can log it. exception_type = ctx['exception_type'] if 'exception_type' in ctx else "No exception supplied" exception_value = ctx['exception_value'] if 'exception_value' in ctx else "No exception supplied" django_version = ctx["django_version_info"] python_executable = ctx['sys_executable'] python_version = ctx['sys_version_info'] python_path = ctx['sys_path'] server_time = str(ctx['server_time']) unicode_hint = None if 'unicode_hint' in ctx: unicdoe_hint = ctx['unicode_hint'] last_frame = None if 'lastframe' in ctx: frame_info = ctx['lastframe'] last_frame = "%s in %s, line %s" % (frame_info['filename'], frame_info['function'], frame_info['lineno']) loaders = [] if 'template_does_not_exist' in ctx and 'loader_debug_info' in ctx and ctx['loader_debug_info']: for loader in ctx['loader_debug_info']: loader_info = {"name": loader['loader'], "templates": []} for tmpl in loader['templates']: loader_info['templates'].append({"file": tmpl['name'], "exists": tmpl['exists']}) loaders.append(loader_info) template_errors = None if 'template_info' in ctx and ctx['template_info']: template_info = ctx['template_info'] template_errors = { "name": template_info['name'], "line": template_info['line'], "message": template_info['message'] } exception_info = [] if 'frames' in ctx: frames = ctx['frames'] for frame in frames: frame_info = { "filename": frame['filename'], "function": frame['function'], "line": frame['lineno'], "context_line": frame['context_line'], "vars": [] } if 'vars' in frame: for var in frame['vars']: frame_info['vars'].append({ "variable": str(var[0]), "value": format(var[1]) }) exception_info.append(frame_info) request_info = { "path_info": request.path_info, "method": request.META['REQUEST_METHOD'], "url": request.build_absolute_uri(), "GET": {}, "POST": {}, "FILES": {}, "COOKIES": {}, "META": {} } if hasattr(request, "GET"): for key, value in request.GET.iteritems(): request_info['GET'][key] = format(value) if "filtered_POST" in ctx: for key, value in ctx['filtered_POST'].iteritems(): request_info['POST'][key] = format(value) if hasattr(request, "FILES"): for key, value in request.FILES.iteritems(): request_info['FILES'][key] = format(value) if hasattr(request, "COOKIES"): for key, value in request.COOKIES.iteritems(): request_info['COOKIES'][key] = format(value) if hasattr(request, "META"): for key, value in request.META.iteritems(): request_info['META'][key] = format(value) settings_info = {} for key, value in ctx['settings'].iteritems(): settings_info[key] = format(value) ctx['errorid'] = errorid = uuid.uuid4().hex full_info = dict( __time=datetime.datetime.now().isoformat(), __uuid=errorid, settings=settings_info, request=request_info, traceback=exception_info, stack=traceback.format_exc(exc_info[2]), last_frame=last_frame, template_loaders=loaders, template_errors=template_errors, unicode_hint=unicdoe_hint, exception_type=exception_type, exception_value=exception_value, django_version=django_version, python_version=python_version, python_executable=python_executable, python_path=python_path, server_time=server_time ) return (errorid, ctx, full_info) def redirector(request, app, view): params = {} for (key, val) in request.GET.iteritems(): params[key] = val full_name = "%s:%s" % (app, view) if not view or not app: logger.error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view)) raise Error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view)) return HttpResponseRedirect(reverse(full_name, kwargs=params)) def default_search(request): app = request.app_name lang_code = request.LANGUAGE_CODE return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/search" % (lang_code, app))) def default_flashtimeline(request): app = request.app_name lang_code = request.LANGUAGE_CODE return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/flashtimeline" % (lang_code, app))) @render_to() @login_required def default_template_render(request, template_name): app = request.app_name template_path = "%s:%s.html" % (app, template_name) return { "TEMPLATE": template_path } @never_cache def handle404(request): # This code is modified from views/debug.py in Django, as we want to display # a debug style view, just modified slightly. exc_info = sys.exc_info() exception = exc_info[1] try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': request.path_info[1:], # Trim leading slash 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), }) return HttpResponseNotFound(render_to_string('splunkdj:404.html', context_instance=c)) @never_cache def handle500(request): # Let's attempt to render a more useful error message errorid, ctx, exception = get_exception_info(request) # We log the raw error to the log file, so that splunk can pick it up as # JSON. error_logger.error(json.dumps(exception, sort_keys=True)) # Build up the URL for making the query lang_code = request.LANGUAGE_CODE query_args = { "q": 'search index=_internal sourcetype=django_error "%s" | head 1 | spath' % errorid, "display.events.maxlines": 0, "display.general.type": "events", "earliest": 0, "latest": "" } query_string = urllib.urlencode(query_args) ctx['search_url'] = make_splunkweb_url("/%s/app/search/search?%s" % (lang_code, query_string)) return HttpResponseServerError(render_to_string('splunkdj:500.html', context_instance=Context(ctx))) @never_cache @render_to('splunkdj:page_config.html', mimetype="application/javascript") @login_required def get_page_config(request): referer = request.META.get("HTTP_REFERER", "") app = "" app_label = "" if referer: try: parsed = urlparse(referer) parsed_path = parsed.path.replace("/%s/" % settings.MOUNT, "/") resolved = resolve(parsed_path) app = resolved.app_name if app: app_label = request.service.apps[app]["label"] except Exception, e: # If there was an error here, don't kill the entire page # just return some default info app = app or "" app_label = app_label or app zone_info = request.service.get('/services/search/timeparser/tz').body.read() return { "autoload": "1" == request.GET.get("autoload", "0"), "config": json.dumps({ "SPLUNKD_FREE_LICENSE": request.user.is_free, "MRSPARKLE_ROOT_PATH": "/%s" % str(settings.SPLUNK_WEB_MOUNT).strip("/"), "DJANGO_ROOT_PATH": "/%s" % str(settings.RAW_MOUNT), "MRSPARKLE_PORT_NUMBER": str(settings.SPLUNK_WEB_PORT), "DJANGO_PORT_NUMBER": str(settings.DJANGO_PORT), "LOCALE": str(request.LANGUAGE_CODE), "JS_LOGGER_MODE": "None", "USERNAME": str(request.user.username), "USER_DISPLAYNAME": str(request.user.realname), "APP": str(app), "APP_DISPLAYNAME": str(app_label), "SERVER_ZONEINFO": str(zone_info), }) }
37.804348
119
0.637531
0
0
0
0
3,661
0.350872
0
0
2,804
0.268737
91d867e70ec797fb77cf3fedd501ea6a1aca218d
8,301
py
Python
wbia/plottool/interact_keypoints.py
mmulich/wildbook-ia
81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663
[ "Apache-2.0" ]
null
null
null
wbia/plottool/interact_keypoints.py
mmulich/wildbook-ia
81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663
[ "Apache-2.0" ]
null
null
null
wbia/plottool/interact_keypoints.py
mmulich/wildbook-ia
81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import logging import utool as ut import six from . import draw_func2 as df2 from wbia.plottool import plot_helpers as ph from wbia.plottool import interact_helpers as ih from wbia.plottool.viz_featrow import draw_feat_row from wbia.plottool.viz_keypoints import show_keypoints from wbia.plottool import abstract_interaction (print, rrr, profile) = ut.inject2(__name__) logger = logging.getLogger('wbia') class KeypointInteraction(abstract_interaction.AbstractInteraction): r""" CommandLine: python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show --fname=lena.png Example: >>> # DISABLE_DOCTEST >>> from wbia.plottool.interact_keypoints import * # NOQA >>> import numpy as np >>> import wbia.plottool as pt >>> import utool as ut >>> import pyhesaff >>> import vtool as vt >>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts() >>> ut.quit_if_noshow() >>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct') >>> pt.interact_keypoints.KeypointInteraction(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, autostart=True) >>> pt.show_if_requested() """ def __init__(self, chip, kpts, vecs, fnum=0, figtitle=None, **kwargs): self.chip = chip self.kpts = kpts self.vecs = vecs self.figtitle = figtitle self.mode = 0 super(KeypointInteraction, self).__init__(**kwargs) def plot(self, fnum=None, pnum=(1, 1, 1), **kwargs): import wbia.plottool as pt fnum = pt.ensure_fnum(fnum) pt.figure(fnum=fnum, docla=True, doclf=True) show_keypoints(self.chip, self.kpts, fnum=fnum, pnum=pnum, **kwargs) if self.figtitle is not None: pt.set_figtitle(self.figtitle) def _select_ith_kpt(self, fx): logger.info('[interact] viewing ith=%r keypoint' % fx) # Get the fx-th keypiont kp, sift = self.kpts[fx], self.vecs[fx] # Draw the image with keypoint fx highlighted self.plot(self.fnum, (2, 1, 1), sel_fx=fx) # Draw the selected feature nRows, nCols, px = (2, 3, 3) draw_feat_row(self.chip, fx, kp, sift, self.fnum, nRows, nCols, px, None) def on_click_outside(self, event): self.mode = (self.mode + 1) % 3 ell = self.mode == 1 pts = self.mode == 2 logger.info('... default kpts view mode=%r' % self.mode) self.plot(self.fnum, ell=ell, pts=pts) self.draw() def on_click_inside(self, event, ax): import wbia.plottool as pt viztype = ph.get_plotdat(ax, 'viztype', None) logger.info('[ik] viztype=%r' % viztype) if viztype is None: pass elif viztype == 'keypoints': kpts = ph.get_plotdat(ax, 'kpts', []) if len(kpts) == 0: logger.info('...nokpts') else: logger.info('...nearest') x, y = event.xdata, event.ydata import vtool as vt fx = vt.nearest_point(x, y, kpts)[0] self._select_ith_kpt(fx) elif viztype == 'warped': hs_fx = ph.get_plotdat(ax, 'fx', None) if hs_fx is not None: kp = self.kpts[hs_fx] # FIXME sift = self.vecs[hs_fx] df2.draw_keypoint_gradient_orientations( self.chip, kp, sift=sift, mode='vec', fnum=pt.next_fnum() ) pt.draw() elif viztype.startswith('colorbar'): pass else: logger.info('...unhandled') self.draw() def ishow_keypoints(chip, kpts, desc, fnum=0, figtitle=None, nodraw=False, **kwargs): """ TODO: Depricate in favor of the class CommandLine: python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show --fname zebra.png Example: >>> # DISABLE_DOCTEST >>> from wbia.plottool.interact_keypoints import * # NOQA >>> import numpy as np >>> import wbia.plottool as pt >>> import utool as ut >>> import pyhesaff >>> import vtool as vt >>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts() >>> ut.quit_if_noshow() >>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct') >>> pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4) >>> pt.show_if_requested() """ if isinstance(chip, six.string_types): import vtool as vt chip = vt.imread(chip) fig = ih.begin_interaction('keypoint', fnum) annote_ptr = [1] self = ut.DynStruct() # MOVE TO A CLASS INTERACTION self.kpts = kpts vecs = desc self.vecs = vecs def _select_ith_kpt(fx): logger.info('[interact] viewing ith=%r keypoint' % fx) # Get the fx-th keypiont kp, sift = kpts[fx], vecs[fx] # Draw the image with keypoint fx highlighted _viz_keypoints(fnum, (2, 1, 1), sel_fx=fx, **kwargs) # MAYBE: remove kwargs # Draw the selected feature nRows, nCols, px = (2, 3, 3) draw_feat_row(chip, fx, kp, sift, fnum, nRows, nCols, px, None) def _viz_keypoints(fnum, pnum=(1, 1, 1), **kwargs): df2.figure(fnum=fnum, docla=True, doclf=True) show_keypoints(chip, kpts, fnum=fnum, pnum=pnum, **kwargs) if figtitle is not None: df2.set_figtitle(figtitle) def _on_keypoints_click(event): logger.info('[viz] clicked keypoint view') if event is None or event.xdata is None or event.inaxes is None: annote_ptr[0] = (annote_ptr[0] + 1) % 3 mode = annote_ptr[0] ell = mode == 1 pts = mode == 2 logger.info('... default kpts view mode=%r' % mode) _viz_keypoints(fnum, ell=ell, pts=pts, **kwargs) # MAYBE: remove kwargs else: ax = event.inaxes viztype = ph.get_plotdat(ax, 'viztype', None) logger.info('[ik] viztype=%r' % viztype) if viztype == 'keypoints': kpts = ph.get_plotdat(ax, 'kpts', []) if len(kpts) == 0: logger.info('...nokpts') else: logger.info('...nearest') x, y = event.xdata, event.ydata import vtool as vt fx = vt.nearest_point(x, y, kpts)[0] _select_ith_kpt(fx) elif viztype == 'warped': hs_fx = ph.get_plotdat(ax, 'fx', None) # kpts = ph.get_plotdat(ax, 'kpts', []) if hs_fx is not None: # Ugly. Interactions should be changed to classes. kp = self.kpts[hs_fx] # FIXME sift = self.vecs[hs_fx] df2.draw_keypoint_gradient_orientations( chip, kp, sift=sift, mode='vec', fnum=df2.next_fnum() ) elif viztype.startswith('colorbar'): pass # Hack to get a specific scoring feature # sortx = self.fs.argsort() # idx = np.clip(int(np.round(y * len(sortx))), 0, len(sortx) - 1) # mx = sortx[idx] # (fx1, fx2) = self.fm[mx] # (fx1, fx2) = self.fm[mx] # logger.info('... selected score at rank idx=%r' % (idx,)) # logger.info('... selected score with fs=%r' % (self.fs[mx],)) # logger.info('... resolved to mx=%r' % mx) # logger.info('... fx1, fx2 = %r, %r' % (fx1, fx2,)) # self.select_ith_match(mx) else: logger.info('...unhandled') ph.draw() # Draw without keypoints the first time _viz_keypoints(fnum, **kwargs) # MAYBE: remove kwargs ih.connect_callback(fig, 'button_press_event', _on_keypoints_click) if not nodraw: ph.draw()
39.15566
113
0.563787
3,382
0.407421
0
0
0
0
0
0
3,019
0.363691
91d921988391847f171d7f816701e122ce388582
143
py
Python
tb/storage/__init__.py
DronMDF/manabot
b412e8cb9b5247f05487bed4cbf4967f7b58327f
[ "MIT" ]
1
2017-11-29T11:51:12.000Z
2017-11-29T11:51:12.000Z
tb/storage/__init__.py
DronMDF/manabot
b412e8cb9b5247f05487bed4cbf4967f7b58327f
[ "MIT" ]
109
2017-11-28T20:51:59.000Z
2018-02-02T13:15:29.000Z
tb/storage/__init__.py
DronMDF/manabot
b412e8cb9b5247f05487bed4cbf4967f7b58327f
[ "MIT" ]
null
null
null
from .database import StDatabase from .telegram import StTelegram from .tinydb import TinyDataBase, TinySelect from .utility import StDispatch
28.6
44
0.846154
0
0
0
0
0
0
0
0
0
0
91d9d1d9ae07a637595f6f1be3521d0ea393c068
1,468
py
Python
algorithms/maths/chinese_remainder_theorem.py
hbqdev/algorithms
65cc8551d86d7e065069d165dd8bf9baf10345a0
[ "MIT" ]
22,426
2017-01-17T04:01:44.000Z
2022-03-31T12:06:16.000Z
algorithms/maths/chinese_remainder_theorem.py
Shubhanshu156/algorithms
d8f1428cee7f66376929f72c524b6e0325bf3492
[ "MIT" ]
523
2017-04-18T12:05:11.000Z
2022-03-20T11:10:41.000Z
algorithms/maths/chinese_remainder_theorem.py
AmandaStromdahl/algorithms
1652835c3aef9aa670b67a5459e51dd3a8e6a71c
[ "MIT" ]
4,900
2017-01-19T23:47:05.000Z
2022-03-31T10:00:47.000Z
from algorithms.maths.gcd import gcd from typing import List def solve_chinese_remainder(num : List[int], rem : List[int]): """ Computes the smallest x that satisfies the chinese remainder theorem for a system of equations. The system of equations has the form: x % num[0] = rem[0] x % num[1] = rem[1] ... x % num[k - 1] = rem[k - 1] Where k is the number of elements in num and rem, k > 0. All numbers in num needs to be pariwise coprime otherwise an exception is raised returns x: the smallest value for x that satisfies the system of equations """ if not len(num) == len(rem): raise Exception("num and rem should have equal length") if not len(num) > 0: raise Exception("Lists num and rem need to contain at least one element") for n in num: if not n > 1: raise Exception("All numbers in num needs to be > 1") if not _check_coprime(num): raise Exception("All pairs of numbers in num are not coprime") k = len(num) x = 1 while True: i = 0 while i < k: if x % num[i] != rem[i]: break i += 1 if i == k: return x else: x += 1 def _check_coprime(l : List[int]): for i in range(len(l)): for j in range(len(l)): if i == j: continue if gcd(l[i], l[j]) != 1: return False return True
31.234043
84
0.559264
0
0
0
0
0
0
0
0
645
0.439373
91da549f96f9ccca48e20a796a48546be83febae
206
py
Python
exercises/ja/exc_03_16_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
2,085
2019-04-17T13:10:40.000Z
2022-03-30T21:51:46.000Z
exercises/ja/exc_03_16_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
79
2019-04-18T14:42:55.000Z
2022-03-07T08:15:43.000Z
exercises/ja/exc_03_16_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
361
2019-04-17T13:34:32.000Z
2022-03-28T04:42:45.000Z
import spacy nlp = spacy.load("ja_core_news_sm") text = ( "チックフィレイはジョージア州カレッジパークに本社を置く、" "チキンサンドを専門とするアメリカのファストフードレストランチェーンです。" ) # トークナイズのみ行う doc = nlp(text) print([token.text for token in doc])
17.166667
42
0.73301
0
0
0
0
0
0
0
0
245
0.69209
91dad0ab0f33fc6693bf8cc4e9a065c0be985607
19,086
py
Python
apphelper/image.py
caiyueliang/chineseocr
4495598f938936c6bcb2222fa44f840a7919212c
[ "MIT" ]
null
null
null
apphelper/image.py
caiyueliang/chineseocr
4495598f938936c6bcb2222fa44f840a7919212c
[ "MIT" ]
null
null
null
apphelper/image.py
caiyueliang/chineseocr
4495598f938936c6bcb2222fa44f840a7919212c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ ##图像相关函数 @author: lywen """ import sys import six import os import base64 import requests import numpy as np import cv2 from PIL import Image import traceback import uuid from glob import glob from bs4 import BeautifulSoup def sort_box_(box): x1,y1,x2,y2,x3,y3,x4,y4 = box[:8] pts = (x1,y1),(x2,y2),(x3,y3),(x4,y4) pts = np.array(pts, dtype="float32") (x1,y1),(x2,y2),(x3,y3),(x4,y4) = _order_points(pts) """ newBox = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]] ## sort x newBox = sorted(newBox,key=lambda x:x[0]) x1,y1 = sorted(newBox[:2],key=lambda x:x[1])[0] index = newBox.index([x1,y1]) newBox.pop(index) newBox = sorted(newBox,key=lambda x:-x[1]) x4,y4 = sorted(newBox[:2],key=lambda x:x[0])[0] index = newBox.index([x4,y4]) newBox.pop(index) newBox = sorted(newBox,key=lambda x:-x[0]) x2,y2 = sorted(newBox[:2],key=lambda x:x[1])[0] index = newBox.index([x2,y2]) newBox.pop(index) newBox = sorted(newBox,key=lambda x:-x[1]) x3,y3 = sorted(newBox[:2],key=lambda x:x[0])[0] """ return x1,y1,x2,y2,x3,y3,x4,y4 import numpy as np from scipy.spatial import distance as dist def _order_points(pts): # 根据x坐标对点进行排序 """ --------------------- 作者:Tong_T 来源:CSDN 原文:https://blog.csdn.net/Tong_T/article/details/81907132 版权声明:本文为博主原创文章,转载请附上博文链接! """ x_sorted = pts[np.argsort(pts[:, 0]), :] # 从排序中获取最左侧和最右侧的点 # x坐标点 left_most = x_sorted[:2, :] right_most = x_sorted[2:, :] # 现在,根据它们的y坐标对最左边的坐标进行排序,这样我们就可以分别抓住左上角和左下角 left_most = left_most[np.argsort(left_most[:, 1]), :] (tl, bl) = left_most # 现在我们有了左上角坐标,用它作为锚来计算左上角和右上角之间的欧氏距离; # 根据毕达哥拉斯定理,距离最大的点将是我们的右下角 distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0] (br, tr) = right_most[np.argsort(distance)[::-1], :] # 返回左上角,右上角,右下角和左下角的坐标 return np.array([tl, tr, br, bl], dtype="float32") def solve(box): """ 绕 cx,cy点 w,h 旋转 angle 的坐标 x = cx-w/2 y = cy-h/2 x1-cx = -w/2*cos(angle) +h/2*sin(angle) y1 -cy= -w/2*sin(angle) -h/2*cos(angle) h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle) w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle) (hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy) """ x1,y1,x2,y2,x3,y3,x4,y4= box[:8] cx = (x1+x3+x2+x4)/4.0 cy = (y1+y3+y4+y2)/4.0 w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2 h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2 #x = cx-w/2 #y = cy-h/2 sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2 if abs(sinA)>1: angle = None else: angle = np.arcsin(sinA) return angle,w,h,cx,cy def read_singLine_for_yolo(p): """ 单行文本 """ im = Image.open(p).convert('RGB') w,h = im.size boxes = [{'cx':w/2,'cy':h/2,'w':w,'h':h,'angle':0.0}] return im,boxes def read_voc_xml(p): ##读取voc xml 文件 boxes = [] if os.path.exists(p): with open(p) as f: xmlString = f.read() xmlString = BeautifulSoup(xmlString,'lxml') objList = xmlString.findAll('object') for obj in objList: robndbox = obj.find('robndbox') bndbox = obj.find('bndbox') if robndbox is not None and bndbox is None: cx = np.float(robndbox.find('cx').text) cy = np.float(robndbox.find('cy').text) w = np.float(robndbox.find('w').text) h = np.float(robndbox.find('h').text) angle = robndbox.find('angle').text if angle=='nan' or h==0 or w==0: #boxes = [] continue angle = np.float(angle) if abs(angle)>np.pi/2: w,h = h,w angle = abs(angle)%(np.pi/2)*np.sign(angle) x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle) x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4]) """ if abs(angle)>np.pi/2: ##lableImg bug x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4]) """ angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4]) else: xmin = np.float(bndbox.find('xmin').text) xmax = np.float(bndbox.find('xmax').text) ymin = np.float(bndbox.find('ymin').text) ymax = np.float(bndbox.find('ymax').text) cx = (xmin+xmax)/2.0 cy = (ymin+ymax)/2.0 w = (-xmin+xmax)#/2.0 h = (-ymin+ymax)#/2.0 angle =0.0 boxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle}) return boxes def xy_rotate_box(cx,cy,w,h,angle): """ 绕 cx,cy点 w,h 旋转 angle 的坐标 x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy """ cx = float(cx) cy = float(cy) w = float(w) h = float(h) angle = float(angle) x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy) x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy) x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy) x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy) return x1,y1,x2,y2,x3,y3,x4,y4 from numpy import cos,sin,pi,tan def rotate(x,y,angle,cx,cy): """ 点(x,y) 绕(cx,cy)点旋转 """ #angle = angle*pi/180 x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy return x_new,y_new def resize_box(boxes,scale): newBoxes = [] for box in boxes: cx = box['cx']*scale cy = box['cy']*scale w = box['w']*scale h = box['h']*scale angle = box['angle'] newBoxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle}) return newBoxes def resize_im(w,h, scale=416, max_scale=608): f=float(scale)/min(h, w) if max_scale is not None: if f*max(h, w)>max_scale: f=float(max_scale)/max(h, w) newW,newH = int(w*f),int(h*f) return newW-(newW%32),newH-(newH%32) def get_rorate(boxes,im,degree=0): """ 获取旋转角度后的box及im """ imgW,imgH = im.size newBoxes = [] for line in boxes: cx0,cy0 = imgW/2.0,imgH/2.0 x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(**line) x1,y1 = rotate(x1,y1,-degree/180*np.pi,cx0,cy0) x2,y2 = rotate(x2,y2,-degree/180*np.pi,cx0,cy0) x3,y3 = rotate(x3,y3,-degree/180*np.pi,cx0,cy0) x4,y4 = rotate(x4,y4,-degree/180*np.pi,cx0,cy0) box = (x1,y1,x2,y2,x3,y3,x4,y4) degree_,w_,h_,cx_,cy_ = solve(box) newLine = {'angle':degree_,'w':w_,'h':h_,'cx':cx_,'cy':cy_} newBoxes.append(newLine) return im.rotate(degree,center=(imgW/2.0,imgH/2.0 )),newBoxes def letterbox_image(image, size,fillValue=[128,128,128]): ''' resize image with unchanged aspect ratio using padding ''' image_w, image_h = image.size w, h = size new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h)) new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h)) resized_image = image.resize((new_w,new_h), Image.BICUBIC) if fillValue is None: fillValue = [int(x.mean()) for x in cv2.split(np.array(im))] boxed_image = Image.new('RGB', size, tuple(fillValue)) boxed_image.paste(resized_image,) return boxed_image,new_w/image_w def box_split(boxes,splitW = 15): newBoxes = [] for box in boxes: w = box['w'] h = box['h'] cx = box['cx'] cy=box['cy'] angle = box['angle'] x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle) splitBoxes =[] i = 1 tanAngle = tan(-angle) while True: flag = 0 if i==1 else 1 xmin = x1+(i-1)*splitW ymin = y1-tanAngle*splitW*i xmax = x1+i*splitW ymax = y4-(i-1)*tanAngle*splitW +flag*tanAngle*(x4-x1) if xmax>max(x2,x3) and xmin>max(x2,x3): break splitBoxes.append([int(xmin),int(ymin),int(xmax),int(ymax)]) i+=1 newBoxes.append(splitBoxes) return newBoxes def get_box_spilt(boxes,im,sizeW,SizeH,splitW=8,isRoate=False,rorateDegree=0): """ isRoate:是否旋转box """ size = sizeW,SizeH if isRoate: ##旋转box im,boxes = get_rorate(boxes,im,degree=rorateDegree) newIm,f = letterbox_image(im, size) newBoxes = resize_box(boxes,f) newBoxes = sum(box_split(newBoxes,splitW),[]) newBoxes = [box+[1] for box in newBoxes] return newBoxes,newIm def box_rotate(box,angle=0,imgH=0,imgW=0): """ 对坐标进行旋转 逆时针方向 0\90\180\270, """ x1,y1,x2,y2,x3,y3,x4,y4 = box[:8] if angle==90: x1_,y1_ = y2,imgW-x2 x2_,y2_ = y3,imgW-x3 x3_,y3_ = y4,imgW-x4 x4_,y4_ = y1,imgW-x1 elif angle==180: x1_,y1_ = imgW-x3,imgH-y3 x2_,y2_ = imgW-x4,imgH-y4 x3_,y3_ = imgW-x1,imgH-y1 x4_,y4_ = imgW-x2,imgH-y2 elif angle==270: x1_,y1_ = imgH-y4,x4 x2_,y2_ = imgH-y1,x1 x3_,y3_ = imgH-y2,x2 x4_,y4_ = imgH-y3,x3 else: x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_ = x1,y1,x2,y2,x3,y3,x4,y4 return (x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_) def solve(box): """ 绕 cx,cy点 w,h 旋转 angle 的坐标 x = cx-w/2 y = cy-h/2 x1-cx = -w/2*cos(angle) +h/2*sin(angle) y1 -cy= -w/2*sin(angle) -h/2*cos(angle) h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle) w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle) (hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy) """ x1,y1,x2,y2,x3,y3,x4,y4= box[:8] cx = (x1+x3+x2+x4)/4.0 cy = (y1+y3+y4+y2)/4.0 w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2 h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2 sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2 angle = np.arcsin(sinA) return angle,w,h,cx,cy from numpy import cos,sin,pi def rotate(x,y,angle,cx,cy): angle = angle#*pi/180 x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy return x_new,y_new def xy_rotate_box(cx,cy,w,h,angle): """ 绕 cx,cy点 w,h 旋转 angle 的坐标 x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy """ cx = float(cx) cy = float(cy) w = float(w) h = float(h) angle = float(angle) x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy) x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy) x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy) x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy) return x1,y1,x2,y2,x3,y3,x4,y4 # def rotate_cut_img(im, degree, box, w, h, leftAdjust=False, rightAdjust=False, alph=0.2): # x1, y1, x2, y2, x3, y3, x4, y4 = box[:8] # # print('rotate_cut_img', x1, y1, x2, y2, x3, y3, x4, y4) # # x_center, y_center = np.mean([x1, x2, x3, x4]), np.mean([y1, y2, y3, y4]) # right = 0 # left = 0 # if rightAdjust: # right = 1 # if leftAdjust: # left = 1 # # # print(im.shape) # box = (max(1, x_center - w / 2 - left * alph * (w / 2)), # xmin # y_center - h / 2, # ymin # min(x_center + w / 2 + right * alph * (w / 2), im.shape[1] - 1), # xmax # y_center + h / 2) # ymax # # print('box', box) # # newW = int(box[2] - box[0]) # newH = int(box[3] - box[1]) # # # ===================================================== # # remap_points = np.array([[0, 0], [164, 0], [164, 48], [0, 48]], dtype=np.float32) # remap_points = np.array([[0, 0], [newW, 0], [newW, newH], [0, newH]], dtype=np.float32) # old_points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], dtype=np.float32) # # 透视变换:用到opencv函数 # M = cv2.getPerspectiveTransform(old_points, remap_points) # tmpImg = cv2.warpPerspective(im, M, (newW, newH)) # # cv2.imshow('rotate_cut_img', tmpImg) # # cv2.waitKey(0) # # return tmpImg, newW, newH def rotate_cut_img(im, degree, box, w, h, leftAdjust=False, rightAdjust=False, alph=0.2): x1, y1, x2, y2, x3, y3, x4, y4 = box[:8] x_center, y_center = np.mean([x1, x2, x3, x4]), np.mean([y1, y2, y3, y4]) degree_ = degree * 180.0 / np.pi right = 0 left = 0 if rightAdjust: right = 1 if leftAdjust: left = 1 box = (max(1, x_center - w / 2 - left * alph * (w / 2)), # xmin y_center - h / 2, # ymin min(x_center + w / 2 + right * alph * (w / 2), im.size[0] - 1), # xmax y_center + h / 2) # ymax newW = box[2] - box[0] newH = box[3] - box[1] tmpImg = im.rotate(degree_, center=(x_center, y_center)).crop(box) return tmpImg, newW, newH def letterbox_image(image, size, fillValue=[128, 128, 128]): '''resize image with unchanged aspect ratio using padding''' image_w, image_h = image.size w, h = size new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h)) new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h)) resized_image = image.resize((new_w,new_h), Image.BICUBIC) if fillValue is None: fillValue = [int(x.mean()) for x in cv2.split(np.array(im))] boxed_image = Image.new('RGB', size, tuple(fillValue)) boxed_image.paste(resized_image, (0,0)) return boxed_image,new_w/image_w from scipy.ndimage import filters,interpolation,morphology,measurements,minimum #from pylab import amin, amax from numpy import amin, amax def estimate_skew_angle(raw): """ 估计图像文字角度 """ def resize_im(im, scale, max_scale=None): f=float(scale)/min(im.shape[0], im.shape[1]) if max_scale!=None and f*max(im.shape[0], im.shape[1])>max_scale: f=float(max_scale)/max(im.shape[0], im.shape[1]) return cv2.resize(im, (0, 0), fx=f, fy=f) raw = resize_im(raw, scale=600, max_scale=900) image = raw-amin(raw) image = image/amax(image) m = interpolation.zoom(image,0.5) m = filters.percentile_filter(m,80,size=(20,2)) m = filters.percentile_filter(m,80,size=(2,20)) m = interpolation.zoom(m,1.0/0.5) w,h = min(image.shape[1],m.shape[1]),min(image.shape[0],m.shape[0]) flat = np.clip(image[:h,:w]-m[:h,:w]+1,0,1) d0,d1 = flat.shape o0,o1 = int(0.1*d0),int(0.1*d1) flat = amax(flat)-flat flat -= amin(flat) est = flat[o0:d0-o0,o1:d1-o1] angles = range(-15,15) estimates = [] for a in angles: roest =interpolation.rotate(est,a,order=0,mode='constant') v = np.mean(roest,axis=1) v = np.var(v) estimates.append((v,a)) _,a = max(estimates) return a def sort_box(box): """ 对box排序,及页面进行排版 box[index, 0] = x1 box[index, 1] = y1 box[index, 2] = x2 box[index, 3] = y2 box[index, 4] = x3 box[index, 5] = y3 box[index, 6] = x4 box[index, 7] = y4 """ box = sorted(box,key=lambda x:sum([x[1],x[3],x[5],x[7]])) return list(box) def get_boxes( bboxes): """ boxes: bounding boxes """ text_recs=np.zeros((len(bboxes), 8), np.int) index = 0 for box in bboxes: b1 = box[6] - box[7] / 2 b2 = box[6] + box[7] / 2 x1 = box[0] y1 = box[5] * box[0] + b1 x2 = box[2] y2 = box[5] * box[2] + b1 x3 = box[0] y3 = box[5] * box[0] + b2 x4 = box[2] y4 = box[5] * box[2] + b2 disX = x2 - x1 disY = y2 - y1 width = np.sqrt(disX*disX + disY*disY) fTmp0 = y3 - y1 fTmp1 = fTmp0 * disY / width x = np.fabs(fTmp1*disX / width) y = np.fabs(fTmp1*disY / width) if box[5] < 0: x1 -= x y1 += y x4 += x y4 -= y else: x2 += x y2 += y x3 -= x y3 -= y text_recs[index, 0] = x1 text_recs[index, 1] = y1 text_recs[index, 2] = x2 text_recs[index, 3] = y2 text_recs[index, 4] = x3 text_recs[index, 5] = y3 text_recs[index, 6] = x4 text_recs[index, 7] = y4 index = index + 1 return text_recs def union_rbox(result,alpha=0.1): """ 按行合并box """ def diff(box1,box2): """ 计算box1,box2之间的距离 """ cy1 = box1['cy'] cy2 = box2['cy'] h1 = box1['h'] h2 = box2['h'] return abs(cy1-cy2)/max(0.01,min(h1/2,h2/2)) def sort_group_box(boxes): """ 对box进行排序, 并合并box """ N = len(boxes) boxes = sorted(boxes,key=lambda x:x['cx']) text = ' '.join([bx['text'] for bx in boxes]) box4 = np.zeros((N,8)) for i in range(N): cx =boxes[i]['cx'] cy = boxes[i]['cy'] degree =boxes[i]['degree'] w = boxes[i]['w'] h = boxes[i]['h'] x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi) box4[i] = [x1,y1,x2,y2,x3,y3,x4,y4] x1 = box4[:,0].min() y1 = box4[:,1].min() x2 = box4[:,2].max() y2 = box4[:,3].min() x3 = box4[:,4].max() y3 = box4[:,5].max() x4 = box4[:,6].min() y4 = box4[:,7].max() angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4]) return {'text':text,'cx':cx,'cy':cy,'w':w,'h':h,'degree':angle/np.pi*180} newBox = [] for line in result: if len(newBox)==0: newBox.append([line]) else: check=False for box in newBox[-1]: if diff(line,box)>alpha: check = True if not check: newBox[-1].append(line) else: newBox.append([line]) newBox = [sort_group_box(bx) for bx in newBox] return newBox def adjust_box_to_origin(img,angle, result): """ 调整box到原图坐标 """ h,w = img.shape[:2] if angle in [90,270]: imgW,imgH = img.shape[:2] else: imgH,imgW= img.shape[:2] newresult = [] for line in result: cx =line['box']['cx'] cy = line['box']['cy'] degree =line['box']['angle'] w = line['box']['w'] h = line['box']['h'] x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi) x1,y1,x2,y2,x3,y3,x4,y4 = box_rotate([x1,y1,x2,y2,x3,y3,x4,y4],angle=(360-angle)%360,imgH=imgH,imgW=imgW) box = x1,y1,x2,y2,x3,y3,x4,y4 newresult.append({'name':line['name'],'text':line['text'],'box':box}) return newresult
29.40832
113
0.515561
0
0
0
0
0
0
0
0
5,332
0.27066
91db99963a9d2cafd0fa8e863ed2ec3e7df55f3e
1,320
py
Python
opendatatools/common/ui_util.py
harveywwu/OpenData
cf421465dd9b11fdbb2fbf4d00512e3aaf09d070
[ "Apache-2.0" ]
null
null
null
opendatatools/common/ui_util.py
harveywwu/OpenData
cf421465dd9b11fdbb2fbf4d00512e3aaf09d070
[ "Apache-2.0" ]
null
null
null
opendatatools/common/ui_util.py
harveywwu/OpenData
cf421465dd9b11fdbb2fbf4d00512e3aaf09d070
[ "Apache-2.0" ]
1
2020-05-29T00:26:59.000Z
2020-05-29T00:26:59.000Z
# -*- coding: UTF-8 -*- import sys, time class ShowProcess(): """ 显示处理进度的类 调用该类相关函数即可实现处理进度的显示 """ i = 0 # 当前的处理进度 max_steps = 0 # 总共需要处理的次数 max_arrow = 50 #进度条的长度 infoDone = 'done' # 初始化函数,需要知道总共的处理次数 def __init__(self, max_steps, infoDone = 'Done'): self.max_steps = max_steps self.i = 0 self.infoDone = infoDone # 显示函数,根据当前的处理进度i显示进度 # 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00% def show_process(self, i=None): if i is not None: self.i = i else: self.i += 1 num_arrow = int(self.i * self.max_arrow / self.max_steps) #计算显示多少个'>' num_line = self.max_arrow - num_arrow #计算显示多少个'-' percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx% process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\ + '%.2f' % percent + '%' + '\r' #带输出的字符串,'\r'表示不换行回到最左边 print process_bar #打印字符到终端 if self.i >= self.max_steps: self.close() def close(self): print('') print(self.infoDone) self.i = 0 if __name__=='__main__': max_steps = 100 process_bar = ShowProcess(max_steps, 'OK') for i in range(max_steps): process_bar.show_process() time.sleep(0.1)
26.938776
77
0.526515
1,364
0.856784
0
0
0
0
0
0
597
0.375
91dbd76ebb4a6ee074d9e41d9b7337c54be487ec
1,748
py
Python
data_structure/stack_and_queue/494. Target Sum_ Medium.py
JunzhongLin/leetcode_practice
47b2f5cc3c87de004ae21a94024e751b40b8f559
[ "MIT" ]
null
null
null
data_structure/stack_and_queue/494. Target Sum_ Medium.py
JunzhongLin/leetcode_practice
47b2f5cc3c87de004ae21a94024e751b40b8f559
[ "MIT" ]
null
null
null
data_structure/stack_and_queue/494. Target Sum_ Medium.py
JunzhongLin/leetcode_practice
47b2f5cc3c87de004ae21a94024e751b40b8f559
[ "MIT" ]
null
null
null
''' You are given an integer array nums and an integer target. You want to build an expression out of nums by adding one of the symbols '+' and '-' before each integer in nums and then concatenate all the integers. For example, if nums = [2, 1], you can add a '+' before 2 and a '-' before 1 and concatenate them to build the expression "+2-1". Return the number of different expressions that you can build, which evaluates to target. ''' from collections import defaultdict class Solution: def findTargetSumWays(self, nums, target) -> int: count = 0 target_depth = len(nums) - 1 stack = [(0, -1, 0)] cache = defaultdict(int) while stack: # print(stack) # count += 1 # if count == 10: # break curr_sum, depth, visited = stack.pop() if visited: if depth == target_depth: if curr_sum == target: cache[(curr_sum, depth, visited)] = 1 else: l = cache[(curr_sum + nums[depth + 1], depth + 1, 1)] r = cache[(curr_sum - nums[depth + 1], depth + 1, 1)] cache[(curr_sum, depth, visited)] = l + r continue else: if (curr_sum, depth, 1) in cache: continue stack.append((curr_sum, depth, 1)) if depth < target_depth: stack.append((curr_sum + nums[depth + 1], depth + 1, 0)) stack.append((curr_sum - nums[depth + 1], depth + 1, 0)) return cache[(0, -1, 1)] input_val, target = [1,1,1,1,1], 3 res = Solution().findTargetSumWays(input_val, target)
34.96
151
0.529748
1,177
0.673341
0
0
0
0
0
0
495
0.283181
91dd1a3b5de5801e9e8baf1d02a035b6853b1ad3
3,733
py
Python
fixtrack/frontend/pickable_markers.py
os-gabe/fixtrack
a0af4dfa9342acc0ba05c0249a32806c825b74b2
[ "MIT" ]
null
null
null
fixtrack/frontend/pickable_markers.py
os-gabe/fixtrack
a0af4dfa9342acc0ba05c0249a32806c825b74b2
[ "MIT" ]
null
null
null
fixtrack/frontend/pickable_markers.py
os-gabe/fixtrack
a0af4dfa9342acc0ba05c0249a32806c825b74b2
[ "MIT" ]
1
2022-03-25T04:26:36.000Z
2022-03-25T04:26:36.000Z
import numpy as np from fixtrack.frontend.pickable_base import PickableBase from vispy import scene class PickableMarkers(PickableBase): """ Markers that can highlight on hover and be selected """ class State(PickableBase.State): def __init__(self, **kwargs): super(PickableMarkers.State, self).__init__(**kwargs) self.sizes_raw = None self.sizes = None class Config(PickableBase.Config): def __init__(self, select_scale=1.0, hover_scale=1.0, **kwargs): super(PickableMarkers.Config, self).__init__(**kwargs) self.select_scale = select_scale self.hover_scale = hover_scale _kwargs_ignore = ["size", "color_select", "color_hover"] def __init__(self, parent=None, data=np.zeros((0, 3)), select_scale=2.0, **kwargs): super(PickableMarkers, self).__init__( scene.visuals.Markers(pos=data, parent=parent), data=data, parent=parent, **kwargs ) self.visual.set_gl_state("translucent", depth_test=False, blend=True) self._cfg.select_scale = select_scale self._cfg.hover_scale = select_scale * 1.15 self.multi_sel = None @property def marker_size(self): return self._cfg.vis_args["size"] @marker_size.setter def marker_size(self, s): self._cfg.vis_args["size"] = max(1, s) self._init_data() self.set_data() def _selected_idxs(self): sel = [] if self.multi_sel is None: if self._state.idx_selected >= 0: sel = [self._state.idx_selected] else: sel = self.multi_sel return sel def _init_data(self): super(PickableMarkers, self)._init_data() n = len(self._state.data) self._state.sizes_raw = np.full((n, ), self._cfg.vis_args["size"]) self._state.sizes = self._state.sizes_raw.copy() def _highlight(self): self._state.sizes = self._state.sizes_raw.copy() super(PickableMarkers, self)._highlight() def _highlight_selected(self): super(PickableMarkers, self)._highlight_selected() cfg = self._cfg state = self._state if (state.idx_selected >= 0) and cfg.pickable: state.sizes[self._selected_idxs()] = cfg.vis_args["size"] * cfg.select_scale def _highlight_hovered(self): super(PickableMarkers, self)._highlight_hovered() cfg = self._cfg state = self._state if (state.idx_hover >= 0) and cfg.hoverable: state.sizes[self._hover_idxs()] = cfg.vis_args["size"] * cfg.hover_scale def _set_data(self): if len(self._state.data) > 0: kwargs = { k: v for k, v in self._cfg.vis_args.items() if k not in self._kwargs_ignore } self._state.edge_colors[:, 3] = self._state.colors[:, 3] self.visual.set_data( pos=self._state.data, size=self._state.sizes, face_color=self._state.colors, edge_color=self._state.edge_colors, edge_width=3, **kwargs ) else: self.visual.set_data(np.zeros((0, 3))) def _set_data_false(self): if len(self._state.data) > 0: colors = self._pa.unique_colors(id(self)) / 255.0 colors[self._state.colors[:, 3] < 1.0e-3] = 0.0 self.visual.set_data( pos=self._state.data, size=self._state.sizes, face_color=colors, edge_color=colors, edge_width=0, ) else: self.visual.set_data(np.zeros((0, 3)))
33.936364
94
0.587731
3,628
0.971872
0
0
224
0.060005
0
0
143
0.038307
91dedad5ac38b05af586adadc029baeb5dbdb36c
2,242
py
Python
examples/blocking_subscribe.py
FFY00/jeepney
293241a54fbb73581755e97191720ed1603aed34
[ "MIT" ]
null
null
null
examples/blocking_subscribe.py
FFY00/jeepney
293241a54fbb73581755e97191720ed1603aed34
[ "MIT" ]
null
null
null
examples/blocking_subscribe.py
FFY00/jeepney
293241a54fbb73581755e97191720ed1603aed34
[ "MIT" ]
null
null
null
""" Example of subscribing to a D-Bus signal using blocking I/O. This subscribes to the signal for a desktop notification being closed. To try it, start this script, then trigger a desktop notification, and close it somehow to trigger the signal. Use Ctrl-C to stop the script. This example relies on the ``org.freedesktop.Notifications.NotificationClosed`` signal; some desktops may not support it. See the notification spec for more details: https://people.gnome.org/~mccann/docs/notification-spec/notification-spec-latest.html Match rules are defined in the D-Bus specification: https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules """ from jeepney.bus_messages import MatchRule, message_bus from jeepney.integrate.blocking import connect_and_authenticate, Proxy from jeepney.wrappers import DBusAddress noti = DBusAddress('/org/freedesktop/Notifications', bus_name='org.freedesktop.Notifications', interface='org.freedesktop.Notifications') connection = connect_and_authenticate(bus="SESSION") match_rule = MatchRule( type="signal", sender=noti.bus_name, interface=noti.interface, member="NotificationClosed", path=noti.object_path, ) # This defines messages for talking to the D-Bus bus daemon itself: session_bus = Proxy(message_bus, connection) # Tell the session bus to pass us matching signal messages: print("Match added?", session_bus.AddMatch(match_rule) == ()) reasons = {1: 'expiry', 2: 'dismissal', 3: 'dbus', '4': 'undefined'} def notification_closed(data): """Callback for when we receive a notification closed signal""" nid, reason_no = data reason = reasons.get(reason_no, 'unknown') print('Notification {} closed by: {}'.format(nid, reason)) # Connect the callback to the relevant signal connection.router.subscribe_signal( callback=notification_closed, path=noti.object_path, interface=noti.interface, member="NotificationClosed" ) # Using dbus-send or d-feet or blocking_notify.py, send a notification and # manually close it or call ``.CloseNotification`` after a beat. try: while True: connection.recv_messages() except KeyboardInterrupt: pass connection.close()
34.492308
88
0.752007
0
0
0
0
0
0
0
0
1,293
0.576717
91dfcb96d3cfa72fba7a82aeea1a69a09b3627d9
126
py
Python
test.py
league3236/shholiday
54d0fcfd393d09183cd77cab697f5bc60864b314
[ "MIT" ]
null
null
null
test.py
league3236/shholiday
54d0fcfd393d09183cd77cab697f5bc60864b314
[ "MIT" ]
null
null
null
test.py
league3236/shholiday
54d0fcfd393d09183cd77cab697f5bc60864b314
[ "MIT" ]
null
null
null
from shholiday import holiday2020 as hd daytuple = (1,1) nowholiday = hd.holiday2020() print(nowholiday.is_holiday(daytuple))
25.2
39
0.793651
0
0
0
0
0
0
0
0
0
0
91e036fe4dd0d56410bf8828136484d3650838c6
740
py
Python
setup.py
dalejung/pandas-composition
e73e5295b2d2f44f09805dcf06db12108c555197
[ "MIT" ]
5
2015-04-08T20:58:25.000Z
2018-04-22T00:10:44.000Z
setup.py
dalejung/pandas-composition
e73e5295b2d2f44f09805dcf06db12108c555197
[ "MIT" ]
null
null
null
setup.py
dalejung/pandas-composition
e73e5295b2d2f44f09805dcf06db12108c555197
[ "MIT" ]
null
null
null
from distutils.core import setup DISTNAME='pandas_composition' FULLVERSION='0.1' setup(name=DISTNAME, version=FULLVERSION, packages=['pandas_composition', ] )
67.272727
94
0.172973
0
0
0
0
0
0
0
0
45
0.060811
91e12d660ef7f4298457f9dc8c2b1a07e4f99285
404
py
Python
blog/migrations/0005_title_null.py
encukou/Zpetnovazebnik
0d058fd67049a3d42814b04486bde93bc406fa3b
[ "MIT" ]
1
2019-12-04T10:10:53.000Z
2019-12-04T10:10:53.000Z
blog/migrations/0005_title_null.py
encukou/Zpetnovazebnik
0d058fd67049a3d42814b04486bde93bc406fa3b
[ "MIT" ]
14
2019-04-07T07:46:07.000Z
2022-03-11T23:44:31.000Z
blog/migrations/0005_title_null.py
encukou/Zpetnovazebnik
0d058fd67049a3d42814b04486bde93bc406fa3b
[ "MIT" ]
1
2019-02-16T09:25:51.000Z
2019-02-16T09:25:51.000Z
# Generated by Django 2.1.7 on 2019-02-27 14:23 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0004_longer_password'), ] operations = [ migrations.AlterField( model_name='session', name='title', field=models.CharField(blank=True, max_length=200, null=True), ), ]
21.263158
74
0.601485
311
0.769802
0
0
0
0
0
0
91
0.225248
91e1834b8771a7ae37346ead4e29d9b3101da09b
917
py
Python
setup.py
Kuba77/Xian-DB
2f15ef1b9b7a96c21bd46e9fb8481de6feb713b7
[ "MIT" ]
1
2016-10-22T21:04:09.000Z
2016-10-22T21:04:09.000Z
setup.py
Kuba77/Xian-DB
2f15ef1b9b7a96c21bd46e9fb8481de6feb713b7
[ "MIT" ]
null
null
null
setup.py
Kuba77/Xian-DB
2f15ef1b9b7a96c21bd46e9fb8481de6feb713b7
[ "MIT" ]
null
null
null
from setuptools import setup from codecs import open from os import path here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='xiandb', version='0.2.0', description='A database model for Xian', long_description=long_description, url='https://github.com/Kuba77/Xian-DB', author='Jakub Chronowski', author_email='jakub@chronow.ski', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: XIAN Collaborators', 'Topic :: Software Development :: Database', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7' ], keywords='xian database db', packages=['xiandb', 'xiandb.models'], install_requires=['mongokat', 'pyyaml', 'bcrypt'], extras_require={} )
21.325581
64
0.641221
0
0
0
0
0
0
0
0
401
0.437296
91e197a6aad024d05c47c68f4923bef335ff491f
4,993
py
Python
yolo3/focal_loss.py
ashishpatel26/tf2-yolo3
38814178643eb8e1f8b5e4fe8d448faed44ad574
[ "Apache-2.0" ]
43
2019-12-08T15:05:53.000Z
2022-03-20T13:38:07.000Z
yolo3/focal_loss.py
1911590204/tf2-yolo3
38814178643eb8e1f8b5e4fe8d448faed44ad574
[ "Apache-2.0" ]
3
2020-05-18T11:20:15.000Z
2021-02-26T01:11:04.000Z
yolo3/focal_loss.py
1911590204/tf2-yolo3
38814178643eb8e1f8b5e4fe8d448faed44ad574
[ "Apache-2.0" ]
15
2019-12-25T01:44:29.000Z
2022-01-18T08:45:49.000Z
from functools import partial import tensorflow as tf _EPSILON = tf.keras.backend.epsilon() def register_keras_custom_object(cls): tf.keras.utils.get_custom_objects()[cls.__name__] = cls return cls def binary_focal_loss(y_true, y_pred, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None): y_pred = tf.convert_to_tensor(y_pred) if not y_pred.dtype.is_floating: y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32) if from_logits: return _binary_focal_loss_from_logits(labels=y_true, logits=y_pred, gamma=gamma, pos_weight=pos_weight, label_smoothing=label_smoothing) else: return _binary_focal_loss_from_probs(labels=y_true, p=y_pred, gamma=gamma, pos_weight=pos_weight, label_smoothing=label_smoothing) @register_keras_custom_object class BinaryFocalLoss(tf.keras.losses.Loss): def __init__(self, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None, **kwargs): super().__init__(**kwargs) self.gamma = gamma self.pos_weight = pos_weight self.from_logits = from_logits self.label_smoothing = label_smoothing def get_config(self): config = super().get_config() config.update(gamma=self.gamma, pos_weight=self.pos_weight, from_logits=self.from_logits, label_smoothing=self.label_smoothing) return config def call(self, y_true, y_pred): return binary_focal_loss(y_true=y_true, y_pred=y_pred, gamma=self.gamma, pos_weight=self.pos_weight, from_logits=self.from_logits, label_smoothing=self.label_smoothing) # Helper functions below def _process_labels(labels, label_smoothing, dtype): labels = tf.dtypes.cast(labels, dtype=dtype) if label_smoothing is not None: labels = (1 - label_smoothing) * labels + label_smoothing * 0.5 return labels def _binary_focal_loss_from_logits(labels, logits, gamma, pos_weight, label_smoothing): labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=logits.dtype) # Compute probabilities for the positive class p = tf.math.sigmoid(logits) if label_smoothing is None: labels_shape = labels.shape logits_shape = logits.shape if not labels_shape.is_fully_defined() or labels_shape != logits_shape: labels_shape = tf.shape(labels) logits_shape = tf.shape(logits) shape = tf.broadcast_dynamic_shape(labels_shape, logits_shape) labels = tf.broadcast_to(labels, shape) logits = tf.broadcast_to(logits, shape) if pos_weight is None: loss_func = tf.nn.sigmoid_cross_entropy_with_logits else: loss_func = partial(tf.nn.weighted_cross_entropy_with_logits, pos_weight=pos_weight) loss = loss_func(labels=labels, logits=logits) modulation_pos = (1 - p)**gamma modulation_neg = p**gamma mask = tf.dtypes.cast(labels, dtype=tf.bool) modulation = tf.where(mask, modulation_pos, modulation_neg) return modulation * loss # Terms for the positive and negative class components of the loss pos_term = labels * ((1 - p)**gamma) neg_term = (1 - labels) * (p**gamma) # Term involving the log and ReLU log_weight = pos_term if pos_weight is not None: log_weight *= pos_weight log_weight += neg_term log_term = tf.math.log1p(tf.math.exp(-tf.math.abs(logits))) log_term += tf.nn.relu(-logits) log_term *= log_weight # Combine all the terms into the loss loss = neg_term * logits + log_term return loss def _binary_focal_loss_from_probs(labels, p, gamma, pos_weight, label_smoothing): q = 1 - p # For numerical stability (so we don't inadvertently take the log of 0) p = tf.math.maximum(p, _EPSILON) q = tf.math.maximum(q, _EPSILON) # Loss for the positive examples pos_loss = -(q**gamma) * tf.math.log(p) if pos_weight is not None: pos_loss *= pos_weight # Loss for the negative examples neg_loss = -(p**gamma) * tf.math.log(q) # Combine loss terms if label_smoothing is None: labels = tf.dtypes.cast(labels, dtype=tf.bool) loss = tf.where(labels, pos_loss, neg_loss) else: labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=p.dtype) loss = labels * pos_loss + (1 - labels) * neg_loss return loss
36.985185
106
0.616263
1,000
0.20028
0
0
1,030
0.206289
0
0
361
0.072301
91e197bc72174a007b45ebf73223d69beb79eca0
13,808
py
Python
characters/models/characters.py
Sult/evetool
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
[ "MIT" ]
null
null
null
characters/models/characters.py
Sult/evetool
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
[ "MIT" ]
null
null
null
characters/models/characters.py
Sult/evetool
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
[ "MIT" ]
null
null
null
import time from collections import OrderedDict from datetime import datetime, timedelta from django.db import models from django.conf import settings from django.utils.timezone import utc from .skills import Skill, SkillGroup from metrics.models import Corporation from tasks.models import EveApiCache, Task from evetool.storage import OverwriteStorage import utils class CharacterApi(models.Model): """ charactertype apis """ api = models.ForeignKey("apis.Api") characterid = models.BigIntegerField() charactername = models.CharField(max_length=254) corporationid = models.BigIntegerField() corporationname = models.CharField(max_length=254) def __unicode__(self): return self.charactername #get right icon for characters view def view_icon(self): try: icon = self.characterapiicon_set.get(size=128, relation=self) return icon.icon except CharacterApiIcon.DoesNotExist: return None #def character sheet image def sheet_icon(self): try: icon = self.characterapiicon_set.get(size=200, relation=self) return icon.icon except CharacterApiIcon.DoesNotExist: return None def current_balance(self): if self.api.access_to("CharacterInfo"): sheet = utils.connection.api_request( "CharacterInfoAuth", obj=self ) if sheet.accountBalance: return round(float(sheet.accountBalance), 2) return 0 def sheet_cache_key(self): key = "CharacterInfo" category = EveApiCache.EVE kwargs = {"characterID": self.characterid} if self.api.access_to("CharacterInfo"): return utils.connection.generate_cache_key( category, key, api=self.api, **kwargs ) else: return utils.connection.generate_cache_key(category, key) def sheet_set_cache_job(self): key = "CharacterInfo" category = EveApiCache.EVE kwargs = {"characterID": self.characterid} if self.api.access_to("CharacterInfo"): api = self.api else: api = None EveApiCache.objects.create( priority=Task.VERY_HIGH, api=api, category=category, key=key, kwargs=kwargs, ) #get the data for landing page after character selection def character_sheet(self): sheet = utils.connection.get_cache(self.sheet_cache_key()) employment = self.employment_history(sheet) return sheet, employment #employment history of a player @staticmethod def employment_history(sheet): cache_key = "employment_history_%d" % int(sheet.characterID) #result = utils.connection.get_cache(cache_key) result = None if not result: cache_timer = 60 * 60 result = [] for corp_data in sheet.employmentHistory: result.append({ "corporation": Corporation.find_corporation( corp_data.corporationID ), "startdate": utils.common.convert_timestamp( corp_data.startDate ) }) utils.connection.set_cache(cache_key, result, cache_timer) return result #get skill in training def skill_in_training(self): training_skill = None if self.api.access_to("SkillInTraining"): in_training = utils.connection.api_request( "SkillInTraining", obj=self ) try: training_skill = { "skill": Skill.objects.get( typeid=int(in_training.trainingTypeID) ).typename, "to_level": int(in_training.trainingToLevel), "finnished": utils.common.convert_timestamp( in_training.trainingEndTime ) } except AttributeError: training_skill = {"skill": "No skill in training"} return training_skill #characters trained skills def trained_skills(self): cache_key = "trained_skills_%d" % self.pk result = utils.connection.get_cache(cache_key) if not result: cache_timer = 60 * 5 sheet = utils.connection.api_request("CharacterSheet", obj=self) groups = SkillGroup.objects.exclude( groupname="Fake Skills" ).order_by("groupname") skills = Skill.objects.order_by("typename") all_skills = OrderedDict() skillpoints = {} for group in groups: all_skills[group.groupname] = list() skillpoints[group.groupname] = 0 for skill in skills: trained = sheet.skills.Get(skill.typeid, False) if trained: all_skills[skill.skillgroup.groupname].append( { "skill": skill, "level": int(trained.level) } ) skillpoints[skill.skillgroup.groupname] += \ trained.skillpoints result = { "all_skills": all_skills, "skillpoints": skillpoints, } utils.connection.set_cache(cache_key, result, cache_timer) return result #get skillqueue def skill_queue(self): queue = None if self.api.access_to("SkillQueue"): queue = {} skills = utils.connection.api_request( "SkillQueue", obj=self ).skillqueue queue["skills"] = skills queue["total"] = self.total_skillpoints(skills) now = datetime.now().replace(tzinfo=utc) try: trainingtime = utils.common.convert_timestamp( skills[-1].endTime ) - now trainingtime -= timedelta( microseconds=trainingtime.microseconds ) queue["trainingtime"] = trainingtime except TypeError: pass return queue #get total skillpoints for skills in queue @staticmethod def total_skillpoints(skills): total = 0 for skill in skills: total += int(skill.endSP - skill.startSP) return total #walletjournal def wallet_journal(self): cache_key = "walletjournal_character_%d" % self.pk result = utils.connection.get_cache(cache_key) if not result: self.update_journal() cache_timer = 60 * 10 utils.connection.set_cache(cache_key, True, cache_timer) return CharacterJournal.objects.filter(characterapi=self) #updates journal to current moment def update_journal(self): fromid = 0 transactions = utils.connection.api_request( "WalletJournal", obj=self, rowcount=2500 ).transactions while True: for trans in transactions: date = utils.common.convert_timestamp(trans.date) #check for duplicate if CharacterJournal.objects.filter( characterapi=self, balance=trans.balance, date=date, ).exists(): continue else: CharacterJournal.create_entry(self, trans) if int(trans.refID) < fromid or fromid == 0: fromid = int(trans.refID) if len(transactions) < 2500: break else: time.sleep(1) transactions = utils.connection.api_request( "WalletJournal", obj=self, rowcount=2500, fromid=fromid ).transactions class CharacterApiIcon(models.Model): """ images related to characters """ relation = models.ForeignKey("characters.CharacterApi") size = models.IntegerField(choices=settings.IMAGE_SIZES) typeid = models.IntegerField() icon = models.ImageField( upload_to="images/characters/", storage=OverwriteStorage(), blank=True, null=True ) class Meta: unique_together = ["size", "relation"] def __unicode__(self): return "Character Image %s" % self.relation.charactername # def save(self, *args, **kwargs): # try: # temp = CharacterApiIcon.objects.get(pk=self.pk) # if temp.icon != self.icon: # temp.icon.delete() # except ObjectDoesNotExist: # pass # super(CharacterApiIcon, self).save(*args, **kwargs) #get list of wanted character icon sizes @staticmethod def icon_sizes(): return [128, 200] class Transaction(models.Model): reftypeid = models.SmallIntegerField() ownername1 = models.CharField(max_length=254) ownerid1 = models.IntegerField() ownername2 = models.CharField(max_length=254) ownerid2 = models.IntegerField() argname1 = models.CharField(max_length=254) argid1 = models.IntegerField() amount = models.FloatField(null=True) reason = models.TextField(blank=True) taxreceiverid = models.IntegerField(null=True) taxamount = models.FloatField(null=True) class Meta: abstract = True class CharacterJournal(Transaction): """ Wallet transcations of a player. Saved to database so data can be filtered, and metadata can be created. Like balance graphs, see how much you paid in taxes and more. """ characterapi = models.ForeignKey(CharacterApi) date = models.DateTimeField() balance = models.FloatField() class Meta: unique_together = ["characterapi", "date", "balance"] ordering = ["-date", "-reftypeid"] def __unicode__(self): return "%s's transaction" % self.characterapi.charactername @staticmethod def create_entry(characterapi, transaction): if transaction.taxReceiverID == "": taxreceiverid = None else: taxreceiverid = int(transaction.taxReceiverID) if transaction.taxAmount == "": taxamount = None else: taxamount = round(float(transaction.taxAmount), 2) date = utils.common.convert_timestamp(transaction.date) CharacterJournal.objects.create( characterapi=characterapi, date=date, balance=round(float(transaction.balance), 2), reftypeid=int(transaction.refTypeID), ownername1=str(transaction.ownerName1), ownerid1=int(transaction.ownerID1), ownername2=str(transaction.ownerName2), ownerid2=int(transaction.ownerID2), argname1=str(transaction.argName1), argid1=int(transaction.argID1), amount=round(float(transaction.amount), 2), reason=str(transaction.reason), taxreceiverid=taxreceiverid, taxamount=taxamount, ) @staticmethod def monthly_balance(characterapi): last_restart = utils.common.last_server_restart() days = last_restart - timedelta(days=31) entries = CharacterJournal.objects.filter( characterapi=characterapi, date__range=[days, last_restart] ) balance = [] for days in range(31): first = entries.first() date = (last_restart - timedelta(days=days)) #make timestamp in miliseconds timestamp = int(time.mktime(date.timetuple()) * 1000) if first: isk = first.balance else: try: isk = balance[-1][1] except IndexError: isk = characterapi.current_balance() balance.append([timestamp, isk]) entries = entries.filter(date__lt=(date - timedelta(days=1))) #return reversed list return balance[::-1] @staticmethod def weekly_balance(characterapi): now = datetime.now().replace(tzinfo=utc) entries = CharacterJournal.objects.filter( characterapi=characterapi, date__range=[ now.replace(hour=23, minute=59, second=0) - timedelta(days=9), now ] ) balance = [] for days in range(8): date = now.replace( hour=0, minute=0, second=0 ) - timedelta(days=days) day_entries = entries.filter( date__lt=now.replace( hour=23, minute=59, second=59 ) - timedelta(days=days), date__gt=date ) if not day_entries.count() > 0: try: isk = balance[-1][1] except IndexError: isk = characterapi.current_balance() timestamp = int(time.mktime(date.timetuple()) * 1000) balance.append([timestamp, isk]) else: for entry in day_entries: timestamp = int(time.mktime(entry.date.timetuple()) * 1000) balance.append([timestamp, entry.balance]) #add last value for date on xaxis date = now.replace(hour=23, minute=59, second=59) - timedelta(days=8) isk = balance[-1][1] timestamp = int(time.mktime(date.timetuple()) * 1000) balance.append([timestamp, isk]) return balance[::-1]
33.596107
79
0.571915
13,426
0.972335
0
0
4,574
0.331257
0
0
1,654
0.119786
91e3f480fdcbf40fb3a62c946e3ea8b2a208638d
92
py
Python
webex_assistant_sdk/templates/mindmeld_template/{{cookiecutter.skill_name}}/{{cookiecutter.skill_name}}/__init__.py
sachanacar/webex-assistant-sdk
bb0f1ad16973cfa5784d7d887381229fab01effa
[ "Apache-2.0" ]
null
null
null
webex_assistant_sdk/templates/mindmeld_template/{{cookiecutter.skill_name}}/{{cookiecutter.skill_name}}/__init__.py
sachanacar/webex-assistant-sdk
bb0f1ad16973cfa5784d7d887381229fab01effa
[ "Apache-2.0" ]
null
null
null
webex_assistant_sdk/templates/mindmeld_template/{{cookiecutter.skill_name}}/{{cookiecutter.skill_name}}/__init__.py
sachanacar/webex-assistant-sdk
bb0f1ad16973cfa5784d7d887381229fab01effa
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from {{cookiecutter.skill_name}}.root import app __all__ = ['app']
18.4
48
0.641304
0
0
0
0
0
0
0
0
28
0.304348
91e4401665d568cd4d6102a4a69c6d2f7668744f
602
py
Python
backend/api/v1/dialogs/urls.py
donicrazy/ChatApp
ab129a9c0706bbb972cbce43283ba6e06d144635
[ "MIT" ]
null
null
null
backend/api/v1/dialogs/urls.py
donicrazy/ChatApp
ab129a9c0706bbb972cbce43283ba6e06d144635
[ "MIT" ]
7
2021-03-19T04:47:13.000Z
2022-01-13T02:02:46.000Z
backend/api/v1/dialogs/urls.py
donicrazy/ChatApp
ab129a9c0706bbb972cbce43283ba6e06d144635
[ "MIT" ]
null
null
null
from django.urls import path from backend.api.v1.dialogs.views import ( DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView, ) urlpatterns = [ path('', DialogListCreateView.as_view()), path('<int:pk>', DialogRetrieveUpdateDestroyAPIView.as_view()), path('membership/', DialogMembershipListCreateView.as_view()), path('messages/', DialogMessageListCreateView.as_view()), path('messages/<int:pk>', DialogMessageRetrieveUpdateDestroyAPIView.as_view()), ]
35.411765
83
0.770764
0
0
0
0
0
0
0
0
55
0.091362
91e4a887944adf9f3a04214dd378ac72dc05e86a
2,100
py
Python
biomaj2galaxy/commands/init.py
genouest/biomaj2galaxy
8c76f3cc96902d9401a03e7b1a6cd8f4a7ba17bd
[ "MIT" ]
1
2015-05-11T00:08:24.000Z
2015-05-11T00:08:24.000Z
biomaj2galaxy/commands/init.py
genouest/biomaj2galaxy
8c76f3cc96902d9401a03e7b1a6cd8f4a7ba17bd
[ "MIT" ]
5
2019-04-15T16:09:50.000Z
2020-11-24T10:35:21.000Z
biomaj2galaxy/commands/init.py
genouest/biomaj2galaxy
8c76f3cc96902d9401a03e7b1a6cd8f4a7ba17bd
[ "MIT" ]
3
2015-06-14T08:33:49.000Z
2020-10-16T09:07:21.000Z
# coding: utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from bioblend import galaxy from biomaj2galaxy import config, pass_context from biomaj2galaxy.io import info, warn import click CONFIG_TEMPLATE = """## BioMAJ2Galaxy: Global Configuration File. # Each stanza should contain a single Galaxy server to interact with. # # You can set the key __default to the name of a default instance __default: local local: url: "%(url)s" apikey: "%(apikey)s" """ SUCCESS_MESSAGE = ( "Ready to go! Type `biomaj2galaxy` to get a list of commands you can execute." ) @click.command() @pass_context def init(ctx, url=None, api_key=None, admin=False, **kwds): """Help initialize global configuration (in home directory) """ click.echo("""Welcome to BioMAJ2Galaxy""") if os.path.exists(config.global_config_path()): info("Your biomaj2galaxy configuration already exists. Please edit it instead: %s" % config.global_config_path()) return 0 while True: # Check environment url = click.prompt("url") apikey = click.prompt("apikey") info("Testing connection...") try: instance = galaxy.GalaxyInstance(url=url, key=apikey) instance.libraries.get_libraries() # We do a connection test during startup. info("Ok! Everything looks good.") break except Exception as e: warn("Error, we could not access the configuration data for your instance: %s", e) should_break = click.prompt("Continue despite inability to contact this instance? [y/n]") if should_break in ('Y', 'y'): break config_path = config.global_config_path() if os.path.exists(config_path): warn("File %s already exists, refusing to overwrite." % config_path) return -1 with open(config_path, "w") as f: f.write(CONFIG_TEMPLATE % { 'url': url, 'apikey': apikey, }) info(SUCCESS_MESSAGE)
29.166667
121
0.65381
0
0
0
0
1,445
0.688095
0
0
852
0.405714
91e4f118680c4b4128c740a76beaad48599ab626
848
py
Python
datamart/tests/test_Dimension.py
josemrsantos/zoopla_datamart
f3a3af8071199deeb712d1814aecb6cc3cd88d57
[ "MIT" ]
1
2016-02-01T20:27:25.000Z
2016-02-01T20:27:25.000Z
datamart/tests/test_Dimension.py
josemrsantos/zoopla_datamart
f3a3af8071199deeb712d1814aecb6cc3cd88d57
[ "MIT" ]
null
null
null
datamart/tests/test_Dimension.py
josemrsantos/zoopla_datamart
f3a3af8071199deeb712d1814aecb6cc3cd88d57
[ "MIT" ]
null
null
null
from ..datamart import * def test_create_dimension(): dimension = Dimension("test_dimension") assert dimension.is_degenerate == False def test_create_dimension_insert_2_identical_lines(): ''' with 2 identical lines, only one gets stored ''' dimension = Dimension("test_dimension") dimension.addDimensionLine('test') dimension.addDimensionLine('test') assert dimension.id_value == 1 assert len(list(dimension.values)) == 1 def test_create_dimension_insert_2_identical_lines_and_1_different(): ''' with 2 identical lines and one different, only 2 get stored ''' dimension = Dimension("test_dimension") dimension.addDimensionLine('test') dimension.addDimensionLine('test2') dimension.addDimensionLine('test') assert dimension.id_value == 2 assert len(list(dimension.values)) == 2
33.92
69
0.732311
0
0
0
0
0
0
0
0
206
0.242925
91e5db16db7c305afa819a65e2ba7480fc9d4276
4,700
py
Python
preprocessing/convert_formats/msmarco_doc_create_train_input.py
PranjaliJain/matchmaker
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
[ "Apache-2.0" ]
97
2021-07-11T14:34:40.000Z
2022-03-31T14:17:25.000Z
preprocessing/convert_formats/msmarco_doc_create_train_input.py
PranjaliJain/matchmaker
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
[ "Apache-2.0" ]
12
2021-07-11T13:03:23.000Z
2022-03-02T16:07:11.000Z
preprocessing/convert_formats/msmarco_doc_create_train_input.py
PranjaliJain/matchmaker
b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f
[ "Apache-2.0" ]
16
2019-12-23T01:22:35.000Z
2021-06-23T12:54:36.000Z
# # msmarco doc: create the train.tsv triples # ------------------------------- import random random.seed(42) import argparse import os import sys from tqdm import tqdm sys.path.append(os.getcwd()) from matchmaker.evaluation.msmarco_eval import * from collections import defaultdict from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer # # config # parser = argparse.ArgumentParser() parser.add_argument('--out-file', action='store', dest='out_file', help='training output text file location', required=True) parser.add_argument('--out-file-ids', action='store', dest='out_file_ids', help='training output ids file location', required=True) parser.add_argument('--candidate-file', action='store', dest='candidate_file', help='trec ranking file location (lucene output)', required=True) parser.add_argument('--collection-file', action='store', dest='collection_file', help='collection.tsv location', required=True) parser.add_argument('--query-file', action='store', dest='query_file', help='query.tsv location', required=True) parser.add_argument('--qrel', action='store', dest='qrel_file', help='qrel location', required=True) args = parser.parse_args() max_triples = 10_000_000 max_doc_char_length = 150_000 max_doc_token_length = 10000 # # load data # ------------------------------- # collection = {} #collection_length = {} tokenizer = BlingFireTokenizer() with open(args.collection_file,"r",encoding="utf8") as collection_file: for line in tqdm(collection_file): ls = line.split("\t") # id<\t>text .... _id = ls[0] max_char_doc = ls[1].rstrip()[:max_doc_char_length] collection[_id] = max_char_doc #collection_length[_id] = len(tokenizer.tokenize(max_char_doc)) queries = {} with open(args.query_file,"r",encoding="utf8") as query_file: for line in tqdm(query_file): ls = line.split("\t") # id<\t>text .... _id = ls[0] queries[_id] = ls[1].rstrip() qrels = load_reference(args.qrel_file) # # produce output # ------------------------------- # triples = [] stats = defaultdict(int) with open(args.candidate_file,"r",encoding="utf8") as candidate_file: for line in tqdm(candidate_file): #if random.random() <= 0.5: continue #skip some entries for faster processing [topicid, _ , unjudged_docid, rank, _ , _ ] = line.split() #if int(rank) <= 100: # #if random.random() < 0.7: continue # skip 70% of candidates to speed up things... # #else: # stats['< 100 sampling count'] += 1 #else: # if random.random() <= 0.9: continue # skip 90% of candidates assumong top1k -> same number of samples from 0-100 as 101 - 1000 # else: # stats['> 100 sampling count'] += 1 if topicid not in queries or topicid not in qrels: # added: because we carved out the validation qrels from the train -> so there are some missing stats['skipped'] += 1 continue #assert topicid in qrels assert unjudged_docid in collection # Use topicid to get our positive_docid positive_docid = random.choice(qrels[topicid]) assert positive_docid in collection if unjudged_docid in qrels[topicid]: stats['docid_collision'] += 1 continue stats['kept'] += 1 #if collection_length[positive_docid] > max_doc_token_length and collection_length[unjudged_docid] > max_doc_token_length: # stats['both_to_long'] += 1 # continue #if collection_length[positive_docid] > max_doc_token_length: # stats['pos_to_long'] += 1 # continue #if collection_length[unjudged_docid] > max_doc_token_length: # stats['unjuged_to_long'] += 1 # continue triples.append((topicid,positive_docid,unjudged_docid)) # important: shuffle the train data random.shuffle(triples) with open(args.out_file,"w",encoding="utf8") as out_file_text ,\ open(args.out_file_ids,"w",encoding="utf8") as out_file_ids: for i,(topicid, positive_docid, unjudged_docid) in tqdm(enumerate(triples)): if i == max_triples: break if collection[positive_docid].strip() != "" and collection[unjudged_docid].strip() != "": out_file_ids.write(str(topicid)+"\t"+positive_docid+"\t"+unjudged_docid+"\n") out_file_text.write(queries[topicid]+"\t"+collection[positive_docid]+"\t"+collection[unjudged_docid]+"\n") for key, val in stats.items(): print(f"{key}\t{val}")
33.098592
154
0.636809
0
0
0
0
0
0
0
0
1,819
0.387021
91e6679035e0b02c68e5fa8e7ebbce0f267caee0
13,748
py
Python
tests/communities/test_reply.py
powerblossom/workcloud
fd943220366ebeadfa90c59fc395f84a734b5686
[ "MIT" ]
1
2019-10-18T05:57:13.000Z
2019-10-18T05:57:13.000Z
tests/communities/test_reply.py
powerblossom/workcloud
fd943220366ebeadfa90c59fc395f84a734b5686
[ "MIT" ]
11
2019-12-02T13:59:22.000Z
2021-04-24T08:52:19.000Z
tests/communities/test_reply.py
powerblossom/workcloud
fd943220366ebeadfa90c59fc395f84a734b5686
[ "MIT" ]
null
null
null
from core.response import Response from communities.tests import TestCase class ReplyPermissionTest(TestCase): def setUp(self): self.create_user(is_staff=True) def test_permission_reply_all(self): self.create_forum() self.create_thread() thread_id = self.thread.id response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'name': 'tester', 'content': 'test' } ) assert ( response.status_code == Response.HTTP_201 and self.data.get('thread').get('id') == thread_id and self.data.get('reply_id') == 0 and not self.data.get('user') and self.data.get('name') == 'tester' and self.data.get('content') == 'test' and not self.data.get('is_deleted') ) reply_id = self.data.get('id') response = self.get( '/api/communities/f/%d/replies/' % thread_id ) assert ( response.status_code == Response.HTTP_200 and len(self.data) == 1 and self.data[0].get('name') == 'tester' and self.data[0].get('content') == 'test' ) response = self.patch( '/api/communities/r/%d/' % reply_id, { 'content': 'edit' }, ) assert response.status_code == Response.HTTP_401 response = self.delete( '/api/communities/r/%d/' % reply_id ) assert response.status_code == Response.HTTP_401 response = self.patch( '/api/communities/r/%d/' % reply_id, { 'content': 'edit', }, auth=True ) assert response.status_code == Response.HTTP_200 response = self.delete( '/api/communities/r/%d/' % reply_id, auth=True ) assert response.status_code == Response.HTTP_200 self.create_user(username='2@a.com') response = self.patch( '/api/communities/r/%d/' % reply_id, { 'content': 'edit', }, auth=True ) assert response.status_code == Response.HTTP_404 response = self.delete( '/api/communities/r/%d/' % reply_id, auth=True ) assert response.status_code == Response.HTTP_404 response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'name': 'tester', 'content': 'test' }, auth=True ) assert ( response.status_code == Response.HTTP_201 and self.data.get('thread').get('id') == thread_id and self.data.get('reply_id') == 0 and self.data.get('user').get('id') == self.user.id and self.data.get('content') == 'test' and not self.data.get('is_deleted') ) response = self.get( '/api/communities/f/%d/replies/' % thread_id ) assert ( response.status_code == Response.HTTP_200 and len(self.data) == 2 ) def test_permission_reply_member(self): option = self.create_option( permission_reply='member' ) self.create_forum(option=option) self.create_thread() thread_id = self.thread.id response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'name': 'tester', 'content': 'test' } ) assert response.status_code == Response.HTTP_401 response = self.get( '/api/communities/f/%d/replies/' % thread_id ) assert response.status_code == Response.HTTP_200 self.create_user(username='4@a.com') response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'content': 'test' }, auth=True ) reply_id = self.data.get('id') assert ( response.status_code == Response.HTTP_201 and self.data.get('content') == 'test' and self.data.get('user').get('username') == self.user.username ) response = self.patch( '/api/communities/r/%d/' % reply_id, { 'content': 'edit', }, auth=True ) assert ( response.status_code == Response.HTTP_200 and self.data.get('content') == 'edit' ) response = self.delete( '/api/communities/r/%d/' % reply_id, auth=True ) assert response.status_code == Response.HTTP_200 def test_permission_reply_staff(self): option = self.create_option( permission_reply='staff' ) self.create_forum(option=option) self.create_thread() thread_id = self.thread.id response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'name': 'tester', 'content': 'test' } ) assert response.status_code == Response.HTTP_401 response = self.get( '/api/communities/f/%d/replies/' % thread_id ) assert response.status_code == Response.HTTP_200 response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'content': 'test' }, auth=True ) assert response.status_code == Response.HTTP_201 reply_id = self.data.get('id') response = self.patch( '/api/communities/r/%d/' % reply_id, { 'content': 'edit', }, auth=True ) assert ( response.status_code == Response.HTTP_200 and self.data.get('content') == 'edit' ) response = self.delete( '/api/communities/r/%d/' % reply_id, auth=True ) assert response.status_code == Response.HTTP_200 self.create_user(username='4@a.com') response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'content': 'test' }, auth=True ) assert response.status_code == Response.HTTP_403 response = self.patch( '/api/communities/r/%d/' % reply_id, { 'content': 'edit', }, auth=True ) assert response.status_code == Response.HTTP_404 response = self.delete( '/api/communities/r/%d/' % reply_id, auth=True ) assert response.status_code == Response.HTTP_404 def test_permission_thread_read_member(self): option = self.create_option( permission_read='member', permission_reply='member' ) self.create_forum(option=option) self.create_thread() thread_id = self.thread.id response = self.get( '/api/communities/f/%d/replies/' % thread_id ) assert response.status_code == Response.HTTP_401 response = self.get( '/api/communities/f/%d/replies/' % thread_id, auth=True ) assert response.status_code == Response.HTTP_200 self.create_user(username='2@a.com') response = self.get( '/api/communities/f/%d/replies/' % thread_id, auth=True ) assert response.status_code == Response.HTTP_200 def test_permission_thread_read_staff(self): option = self.create_option( permission_read='staff', permission_reply='staff' ) self.create_forum(option=option) self.create_thread() thread_id = self.thread.id response = self.get( '/api/communities/f/%d/replies/' % thread_id ) assert response.status_code == Response.HTTP_401 response = self.get( '/api/communities/f/%d/replies/' % thread_id, auth=True ) assert response.status_code == Response.HTTP_200 self.create_user(username='2@a.com') response = self.get( '/api/communities/f/%d/replies/' % thread_id, auth=True ) assert response.status_code == Response.HTTP_403 class ReplyModelTest(TestCase): def setUp(self): self.create_user(is_staff=True) self.create_forum() self.create_thread() self.create_reply() def test_nested_reply(self): response = self.post( '/api/communities/f/%d/reply/' % self.thread.id, { 'content': 'test' }, auth=True ) assert ( response.status_code == Response.HTTP_201 and self.data.get('reply_id') == 0 ) reply_id = self.data.get('id') response = self.post( '/api/communities/f/%d/reply/' % self.thread.id, { 'reply_id': reply_id, 'content': 'test' }, auth=True ) assert ( response.status_code == Response.HTTP_201 and self.data.get('reply_id') == reply_id ) response = self.post( '/api/communities/f/%d/reply/' % self.thread.id, { 'reply_id': self.data.get('id'), 'content': 'test' }, auth=True ) assert ( response.status_code == Response.HTTP_201 and self.data.get('reply_id') == reply_id ) def test_reply_edit_delete(self): response = self.patch( '/api/communities/r/%d/' % self.reply.id, { 'content': 'bow wow' }, auth=True ) assert ( response.status_code == Response.HTTP_200 and self.data.get('content') == 'bow wow' and self.data.get('reply_id') == 0 and not self.data.get('name') ) response = self.patch( '/api/communities/r/%d/' % self.reply.id, { 'reply_id': self.reply.id, 'name': 'dog', 'content': 'meow' }, auth=True ) assert ( response.status_code == Response.HTTP_200 and self.data.get('content') == 'meow' and self.data.get('reply_id') == 0 and not self.data.get('name') ) response = self.delete( '/api/communities/r/%d/' % self.reply.id, auth=True ) assert response.status_code == Response.HTTP_200 self.get( '/api/communities/f/%d/replies/' % self.thread.id, auth=True ) assert ( len(self.data) == 1 and self.data[0].get('is_deleted') ) def test_reply_to_invalid_id(self): thread_id = int(self.thread.id) + 1 response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'content': 'test' }, auth=True ) assert response.status_code == Response.HTTP_404 reply_id = int(self.reply.id) + 1 response = self.post( '/api/communities/f/%d/reply/' % thread_id, { 'reply_id': reply_id, 'content': 'test' }, auth=True ) assert response.status_code == Response.HTTP_404 class ReplyListTest(TestCase): def setUp(self): self.create_user(is_staff=True) self.create_forum() self.create_thread() def test_reply_list(self): self.post( '/api/communities/f/%d/reply/' % self.thread.id, { 'content': '1' }, auth=True ) reply_id = self.data.get('id') self.post( '/api/communities/f/%d/reply/' % self.thread.id, { 'content': '4' }, auth=True ) self.post( '/api/communities/f/%d/reply/' % self.thread.id, { 'reply_id': reply_id, 'content': '2' }, auth=True ) nested_reply_id = self.data.get('id') self.post( '/api/communities/f/%d/reply/' % self.thread.id, { 'content': '5' }, auth=True ) self.post( '/api/communities/f/%d/reply/' % self.thread.id, { 'reply_id': nested_reply_id, 'content': '3' }, auth=True ) self.get( '/api/communities/f/%d/replies/' % self.thread.id, auth=True ) assert ( len(self.data) == 5 and self.data[0].get('content') == '1' and self.data[0].get('reply_id') == 0 and self.data[1].get('content') == '2' and self.data[1].get('reply_id') == reply_id and self.data[2].get('content') == '3' and self.data[2].get('reply_id') == reply_id and self.data[3].get('content') == '4' and self.data[3].get('reply_id') == 0 and self.data[4].get('content') == '5' and self.data[4].get('reply_id') == 0 )
28.404959
71
0.485598
13,665
0.993963
0
0
0
0
0
0
2,297
0.167079
91e82476dc55d0591c20d0a5e9975a53641bca72
6,711
py
Python
examples/Word2Vec_AverageVectorsTuto.py
noiseux1523/Deep-Belief-Network
6eb364a85fb128a33c539e5e414ef451f24e499d
[ "MIT" ]
1
2019-08-20T12:13:34.000Z
2019-08-20T12:13:34.000Z
examples/Word2Vec_AverageVectorsTuto.py
noiseux1523/Deep-Belief-Network
6eb364a85fb128a33c539e5e414ef451f24e499d
[ "MIT" ]
null
null
null
examples/Word2Vec_AverageVectorsTuto.py
noiseux1523/Deep-Belief-Network
6eb364a85fb128a33c539e5e414ef451f24e499d
[ "MIT" ]
null
null
null
# Author: Angela Chapman # Date: 8/6/2014 # # This file contains code to accompany the Kaggle tutorial # "Deep learning goes to the movies". The code in this file # is for Parts 2 and 3 of the tutorial, which cover how to # train a model using Word2Vec. # # *************************************** # # ****** Read the two training sets and the test set # import pandas as pd import os from nltk.corpus import stopwords import nltk.data import logging import numpy as np # Make sure that numpy is imported from gensim.models import Word2Vec from sklearn.ensemble import RandomForestClassifier from KaggleWord2VecUtility import KaggleWord2VecUtility # ****** Define functions to create average word vectors # def makeFeatureVec(words, model, num_features): # Function to average all of the word vectors in a given # paragraph # # Pre-initialize an empty numpy array (for speed) featureVec = np.zeros((num_features,), dtype="float32") # nwords = 0. # # Index2word is a list that contains the names of the words in # the model's vocabulary. Convert it to a set, for speed index2word_set = set(model.wv.index2word) # # Loop over each word in the review and, if it is in the model's # vocaublary, add its feature vector to the total for word in words: if word in index2word_set: nwords = nwords + 1. featureVec = np.add(featureVec, model[word]) # # Divide the result by the number of words to get the average featureVec = np.divide(featureVec, nwords) return featureVec def getAvgFeatureVecs(reviews, model, num_features): # Given a set of reviews (each one a list of words), calculate # the average feature vector for each one and return a 2D numpy array # # Initialize a counter counter = 0. # # Preallocate a 2D numpy array, for speed reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype="float32") # # Loop through the reviews for review in reviews: # # Print a status message every 1000th review if counter % 1000. == 0.: print "Review %d of %d" % (counter, len(reviews)) # # Call the function (defined above) that makes average feature vectors reviewFeatureVecs[int(counter)] = makeFeatureVec(review, model, \ num_features) # # Increment the counter counter = counter + 1. return reviewFeatureVecs def getCleanReviews(reviews): clean_reviews = [] for review in reviews["review"]: clean_reviews.append(KaggleWord2VecUtility.review_to_wordlist(review, remove_stopwords=True)) return clean_reviews if __name__ == '__main__': # Read data from files train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0, delimiter="\t", quoting=3) test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t", quoting=3) unlabeled_train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', "unlabeledTrainData.tsv"), header=0, delimiter="\t", quoting=3) # Verify the number of reviews that were read (100,000 in total) print "Read %d labeled train reviews, %d labeled test reviews, " \ "and %d unlabeled reviews\n" % (train["review"].size, test["review"].size, unlabeled_train["review"].size) # Load the punkt tokenizer tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') # ****** Split the labeled and unlabeled training sets into clean sentences # sentences = [] # Initialize an empty list of sentences print "Parsing sentences from training set" for review in train["review"]: sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer) print "Parsing sentences from unlabeled set" for review in unlabeled_train["review"]: sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer) # ****** Set parameters and train the word2vec model # # Import the built-in logging module and configure it so that Word2Vec # creates nice output messages logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', \ level=logging.INFO) # Set values for various parameters num_features = 300 # Word vector dimensionality min_word_count = 40 # Minimum word count num_workers = 4 # Number of threads to run in parallel context = 10 # Context window size downsampling = 1e-3 # Downsample setting for frequent words # Initialize and train the model (this will take some time) print "Training Word2Vec model..." model = Word2Vec(sentences, workers=num_workers, \ size=num_features, min_count=min_word_count, \ window=context, sample=downsampling, seed=1) # If you don't plan to train the model any further, calling # init_sims will make the model much more memory-efficient. model.init_sims(replace=True) # It can be helpful to create a meaningful model name and # save the model for later use. You can load it later using Word2Vec.load() model_name = "300features_40minwords_10context" model.save(model_name) model.doesnt_match("man woman child kitchen".split()) model.doesnt_match("france england germany berlin".split()) model.doesnt_match("paris berlin london austria".split()) model.most_similar("man") model.most_similar("queen") model.most_similar("awful") # ****** Create average vectors for the training and test sets # print "Creating average feature vecs for training reviews" trainDataVecs = getAvgFeatureVecs(getCleanReviews(train), model, num_features) print "Creating average feature vecs for test reviews" testDataVecs = getAvgFeatureVecs(getCleanReviews(test), model, num_features) # ****** Fit a random forest to the training set, then make predictions # # Fit a random forest to the training data, using 100 trees forest = RandomForestClassifier(n_estimators=100) print "Fitting a random forest to labeled training data..." forest = forest.fit(trainDataVecs, train["sentiment"]) # Test & extract results result = forest.predict(testDataVecs) # Write the test results output = pd.DataFrame(data={"id": test["id"], "sentiment": result}) output.to_csv("Word2Vec_AverageVectors.csv", index=False, quoting=3) print "Wrote Word2Vec_AverageVectors.csv"
37.915254
118
0.670094
0
0
0
0
0
0
0
0
3,163
0.471316
91e8cdd7e37d12c63565c41b5269a325281584b2
36
py
Python
src/phl_budget_data/etl/qcmr/positions/__init__.py
PhiladelphiaController/phl-budget-data
438999017b8659de5bfb223a038f49fe6fd4a83a
[ "MIT" ]
null
null
null
src/phl_budget_data/etl/qcmr/positions/__init__.py
PhiladelphiaController/phl-budget-data
438999017b8659de5bfb223a038f49fe6fd4a83a
[ "MIT" ]
null
null
null
src/phl_budget_data/etl/qcmr/positions/__init__.py
PhiladelphiaController/phl-budget-data
438999017b8659de5bfb223a038f49fe6fd4a83a
[ "MIT" ]
null
null
null
from .core import FullTimePositions
18
35
0.861111
0
0
0
0
0
0
0
0
0
0
91e914734fc05c34e408967e2c372a75de766234
1,207
py
Python
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_get_schema_response.py
tzhanl/azure-sdk-for-python
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
[ "MIT" ]
1
2021-09-07T18:36:04.000Z
2021-09-07T18:36:04.000Z
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_get_schema_response.py
tzhanl/azure-sdk-for-python
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
[ "MIT" ]
2
2019-10-02T23:37:38.000Z
2020-10-02T01:17:31.000Z
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_get_schema_response.py
tzhanl/azure-sdk-for-python
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
[ "MIT" ]
1
2019-06-17T22:18:23.000Z
2019-06-17T22:18:23.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class SearchGetSchemaResponse(Model): """The get schema operation response. :param metadata: The metadata from search results. :type metadata: ~azure.mgmt.loganalytics.models.SearchMetadata :param value: The array of result values. :type value: list[~azure.mgmt.loganalytics.models.SearchSchemaValue] """ _attribute_map = { 'metadata': {'key': 'metadata', 'type': 'SearchMetadata'}, 'value': {'key': 'value', 'type': '[SearchSchemaValue]'}, } def __init__(self, **kwargs): super(SearchGetSchemaResponse, self).__init__(**kwargs) self.metadata = kwargs.get('metadata', None) self.value = kwargs.get('value', None)
36.575758
76
0.610605
691
0.572494
0
0
0
0
0
0
860
0.71251
91ebeac4c8302d86c1514c58ecbae0f104ee5904
1,332
py
Python
python/ds/spiralprint.py
unhingedporter/DataStructureMustKnow
3c5b3225afa2775d37a2ff90121f73208717640a
[ "MIT" ]
3
2019-11-23T08:43:58.000Z
2019-11-23T08:52:53.000Z
python/ds/spiralprint.py
unhingedpotter/DSMustKnow
64958cbbbb3f4cdb1104c2255e555233554503f9
[ "MIT" ]
null
null
null
python/ds/spiralprint.py
unhingedpotter/DSMustKnow
64958cbbbb3f4cdb1104c2255e555233554503f9
[ "MIT" ]
null
null
null
# Python3 program to print # given matrix in spiral form def spiralPrint(m, n, a): start_row_index = 0 start_col_index = 0 l = 0 ''' start_row_index - starting row index m - ending row index start_col_index - starting column index n - ending column index i - iterator ''' while (start_row_index < m and start_col_index < n): # Print the first row from # the remaining rows for i in range(start_col_index, n): print(a[start_row_index][i], end=" ") start_row_index += 1 # Print the last column from # the remaining columns for i in range(start_row_index, m): print(a[i][n - 1], end=" ") n -= 1 # Print the last row from # the remaining rows if (start_row_index < m): for i in range(n - 1, (start_col_index - 1), -1): print(a[m - 1][i], end=" ") m -= 1 # Print the first column from # the remaining columns if (start_col_index < n): for i in range(m - 1, start_row_index - 1, -1): print(a[i][start_col_index], end=" ") start_col_index += 1 # Driver Code a = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18]] R = 3 C = 6 spiralPrint(R, C, a)
22.965517
61
0.534535
0
0
0
0
0
0
0
0
428
0.321321
91ed3db43e489e433ff783f8e76e26a52b78a6d5
568
py
Python
rest-api/routers/authorization.py
marintrace/backend
ad34bd50bd5e3f90be1ac16a74d39a0a9342fa33
[ "MIT" ]
2
2021-12-14T03:14:41.000Z
2022-01-17T18:36:31.000Z
rest-api/routers/authorization.py
marintrace/backend
ad34bd50bd5e3f90be1ac16a74d39a0a9342fa33
[ "MIT" ]
1
2021-03-29T08:06:42.000Z
2021-03-29T08:06:42.000Z
rest-api/routers/authorization.py
tracing-app/backend
ad34bd50bd5e3f90be1ac16a74d39a0a9342fa33
[ "MIT" ]
null
null
null
""" Authorization Utilities """ from shared.models.user_entities import User from shared.service.jwt_auth_wrapper import JWTAuthManager manager = JWTAuthManager(oidc_vault_secret="oidc/rest", object_creator=lambda claims, assumed_role, user_roles: User( first_name=claims["given_name"], last_name=claims["family_name"], school=assumed_role, email=claims['email'] )) AUTH_USER = manager.auth_header()
35.5
86
0.572183
0
0
0
0
0
0
0
0
74
0.130282
91ee06a1881d10f22e7c8d7c219f9ef37412d52d
1,365
py
Python
photonpy/tests/psf_g2d_sigma.py
qnano/photonpy
9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4
[ "MIT" ]
5
2021-04-29T21:06:05.000Z
2022-03-23T03:45:25.000Z
photonpy/tests/psf_g2d_sigma.py
qnano/photonpy
9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4
[ "MIT" ]
null
null
null
photonpy/tests/psf_g2d_sigma.py
qnano/photonpy
9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4
[ "MIT" ]
1
2021-06-18T12:39:28.000Z
2021-06-18T12:39:28.000Z
import matplotlib.pyplot as plt import numpy as np from photonpy.cpp.context import Context import photonpy.cpp.gaussian as gaussian from photonpy.smlm.util import imshow_hstack from photonpy.cpp.estimator import Estimator def CheckDeriv(psf:Estimator, theta): nderiv,ev=psf.NumDeriv(theta,eps=1e-6) deriv,ev=psf.Derivatives(theta) maxerr = np.max( np.abs(deriv-nderiv), (-1,-2) ) print(f"PSF {psf.ParamFormat()}, max {np.max(deriv)}, min: {np.min(deriv)}: Deriv-NumDeriv: {maxerr}") plt.figure() imshow_hstack(deriv[0] - nderiv[0]) with Context() as ctx: g = gaussian.Gaussian(ctx) for cuda in [False]: print(f"CUDA = {cuda}") sigma=2 roisize=12 psf = g.CreatePSF_XYIBg(roisize, sigma, cuda) theta = [[4, 4, 1000, 3]] img = psf.ExpectedValue(theta) plt.figure() plt.set_cmap('inferno') smp = np.random.poisson(img) plt.imshow(smp[0]) psf_sigma = g.CreatePSF_XYIBgSigma(roisize, sigma, cuda) theta_s = [[4,4,1000,3,sigma]] img2 = psf_sigma.ExpectedValue(theta_s) CheckDeriv(psf, theta) # CheckDeriv(psf_sigma) print(f"PSF Sigma crlb: {psf_sigma.CRLB(theta_s)}") theta = psf_sigma.Estimate(smp)[0] print(theta)
26.764706
106
0.606593
0
0
0
0
0
0
0
0
198
0.145055
91ee64a13c556aefe5259d2a930de14c6c79472f
2,018
py
Python
tests/tools_tests/helpers_tests.py
Gautierhyp/tespy
d44ae41874baeff77619e560faea59dd0cb84c7c
[ "MIT" ]
null
null
null
tests/tools_tests/helpers_tests.py
Gautierhyp/tespy
d44ae41874baeff77619e560faea59dd0cb84c7c
[ "MIT" ]
null
null
null
tests/tools_tests/helpers_tests.py
Gautierhyp/tespy
d44ae41874baeff77619e560faea59dd0cb84c7c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 """Module for testing helper functions. This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted by the contributors recorded in the version control history of the file, available from its original location tests/tools_tests/helpers_tests.py SPDX-License-Identifier: MIT """ from nose.tools import eq_ from tespy.tools.helpers import newton def func(params, x): return x ** 2 + x - 20 def deriv(params, x): return 2 * x + 1 def test_newton_bounds(): """ Test newton algorithm value limit handling. Try to calculate a zero crossing of a quadratic function in three tries. - zero crossing within limits, starting value near 4 - zero crossing within limits, starting value near -5 - zero crossing below minimum - zero crossing above maximum The function is x^2 + x - 20, there crossings are -5 and 4. """ result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=0) msg = ('The newton algorithm should find the zero crossing at 4.0. ' + str(round(result, 1)) + ' was found instead.') eq_(4.0, result, msg) result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=-10) msg = ('The newton algorithm should find the zero crossing at -5.0. ' + str(round(result, 1)) + ' was found instead.') eq_(-5.0, result, msg) result = newton(func, deriv, [], 0, valmin=-4, valmax=-2, val0=-3) msg = ('The newton algorithm should not be able to find a zero crossing. ' 'The value ' + str(round(result, 1)) + ' was found, but the ' 'algorithm should have found the lower boundary of -4.0.') eq_(-4.0, result, msg) result = newton(func, deriv, [], 0, valmin=-20, valmax=-10, val0=-10) msg = ('The newton algorithm should not be able to find a zero crossing. ' 'The value ' + str(round(result, 1)) + ' was found, but the ' 'algorithm should have found the upper boundary of -10.0.') eq_(-10.0, result, msg)
32.548387
78
0.646184
0
0
0
0
0
0
0
0
1,189
0.589197
91eebd9cfe8ecc166ed16501e2c6d724f724535d
4,110
py
Python
theory/model/form.py
ralfonso/theory
41684969313cfc545d74b306e409fd5bf21387b3
[ "MIT" ]
4
2015-07-03T19:53:59.000Z
2016-04-25T03:03:56.000Z
theory/model/form.py
ralfonso/theory
41684969313cfc545d74b306e409fd5bf21387b3
[ "MIT" ]
null
null
null
theory/model/form.py
ralfonso/theory
41684969313cfc545d74b306e409fd5bf21387b3
[ "MIT" ]
2
2020-03-29T22:02:29.000Z
2021-07-13T07:17:19.000Z
import formencode import pylons from pylons import app_globals as g class OutputSchema(formencode.Schema): allow_extra_fields = False enabled = formencode.validators.Int() class ConfigForm(formencode.Schema): allow_extra_fields = True filter_extra_fields = True #pre_validators = [formencode.NestedVariables()] action = formencode.validators.String(not_empty=False,if_missing=None) cancel = formencode.validators.String(not_empty=False,if_missing=None) firsttime = formencode.validators.Int(not_empty=False, if_missing=0) server = formencode.validators.String(strip=True,not_empty=True,messages={'empty':'please enter a server host name'}) port = formencode.validators.Int(strip=True,not_empty=True,messages={'empty':'please enter a port, MPD default is 6600', 'integer':'please enter an integer value for port, MPD default is 6600' }) password = formencode.validators.String(not_empty=False,if_missing=None) webpassword = formencode.validators.String(not_empty=False,if_missing=None) timeout = formencode.validators.Bool() default_search = formencode.validators.String(not_empty=True) awskey = formencode.validators.String(strip=True,not_empty=False,if_missing=None) aws_secret = formencode.validators.String(strip=True,not_empty=False,if_missing=None) outputs = formencode.ForEach(OutputSchema(), if_missing=[]) class StreamNameInUse(formencode.validators.FancyValidator): def validate_python(self, values, state): # if old name is set, don't do this check if values['oldname']: return if values['name'] in [name[0] for name in g.tc.streams]: raise formencode.Invalid({'stream_name_taken':"that stream name has already been used"}, values, state) class StreamForm(formencode.Schema): allow_extra_fields = False name = formencode.validators.String(not_empty=True,strip=True,messages={'empty':'please enter a name for this stream'}) url = formencode.validators.URL(not_empty=True,require_tld=False,strip=True,check_exists=False,messages={'empty':'please enter a URL'}) oldname = formencode.validators.String(not_empty=False) chained_validators = [StreamNameInUse()] class State(object): """Trivial class to be used as State objects to transport information to formencode validators""" def __init__(self, **kw): for key in kw: setattr(self, key, kw[key]) def __repr__(self): atts = [] for key in self.__dict__: atts.append( (key, getattr(self, key)) ) return self.__class__.__name__ + '(' + ', '.join(x[0] + '=' + repr(x[1]) for x in atts) + ')' def validate_custom(schema, **state_kwargs): """Validate a formencode schema. Works similar to the @validate decorator. On success return a dictionary of parameters from request.params. On failure throws a formencode.Invalid exception.""" # Create a state object if requested if state_kwargs: state = State(**state_kwargs) else: state = None # In case of validation errors an exception is thrown. This needs to # be caught elsewhere. if state_kwargs.get('variable_decode', False): params = formencode.variabledecode.variable_decode(pylons.request.params) print pylons.request.params print params else: params = pylons.request.params return schema.to_python(params, state) def htmlfill(html, exception_error=None): """Add formencode error messages to an HTML string. 'html' contains the HTML page with the form (e.g. created with render()). 'exception_error' is the formencode.Invalid-Exception from formencode.""" return formencode.htmlfill.render( form=html, defaults=pylons.request.params, errors=(exception_error and exception_error.unpack_errors()), encoding=pylons.response.determine_charset() )
42.8125
139
0.682968
2,714
0.660341
0
0
0
0
0
0
1,058
0.257421
91eed42dd8cd7828f31d4494c0f4f389955bf685
8,960
py
Python
utils/dynamo.py
OnRails-IN/backend
5f5c9703fcda282ed54f2e6315680fb30fd91a6f
[ "MIT" ]
null
null
null
utils/dynamo.py
OnRails-IN/backend
5f5c9703fcda282ed54f2e6315680fb30fd91a6f
[ "MIT" ]
null
null
null
utils/dynamo.py
OnRails-IN/backend
5f5c9703fcda282ed54f2e6315680fb30fd91a6f
[ "MIT" ]
null
null
null
""" Dynamo Utils ============ All utility functions for interactions with DynamoDB Functions - ensure_json - create_user_table - create_or_update_record - list_tables - list_records - get_record - delete_table - delete_record - check_active """ import boto3 from decimal import Decimal from constants import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, DYNAMO_URL ddb = boto3.resource( 'dynamodb', aws_access_key_id = AWS_ACCESS_KEY, aws_secret_access_key = AWS_SECRET_KEY, endpoint_url = DYNAMO_URL, region_name = AWS_REGION ) client = boto3.client( 'dynamodb', aws_access_key_id = AWS_ACCESS_KEY, aws_secret_access_key = AWS_SECRET_KEY, endpoint_url = DYNAMO_URL, region_name = AWS_REGION ) def ensure_json(obj): """ Function to ensure that a python object is JSON serializable Params: obj::dict|[dict] Object to be JSON serializable Returns: obj::dict|[dict] Returns the JSON serializable object """ if isinstance(obj, list): for i in range(len(obj)): obj[i] = ensure_json(obj[i]) return obj elif isinstance(obj, dict): for k in obj.keys(): obj[k] = ensure_json(obj[k]) return obj elif isinstance(obj, Decimal): if obj % 1 == 0: return int(obj) else: return float(obj) else: return obj def create_user_table(): """ Function to create the "users" table in DynamoDB Returns: bool If the table was created or not """ try: table = ddb.create_table( TableName = "users", KeySchema = [ { "AttributeName": "username", "KeyType": "HASH" # Partition key }, { "AttributeName": "index", "KeyType": "RANGE" # Sort key } ], AttributeDefinitions = [ { "AttributeName": "username", "AttributeType": "S" }, { "AttributeName": "index", "AttributeType": "S" } ], ProvisionedThroughput = { "ReadCapacityUnits": 10, "WriteCapacityUnits": 10 } ) return True except client.exceptions.ResourceNotFoundException: print("Table does not exist") return False except Exception as e: print("Exception @ create_user_table\n{}".format(e)) return None def create_train_table(): """ Function to create the "trains" table in DynamoDB Returns: bool If the table was created or not """ try: table = ddb.create_table( TableName = "trains", KeySchema = [ { "AttributeName": "train_name", "KeyType": "HASH" # Partition key }, { "AttributeName": "train_type", "KeyType": "RANGE" # Sort key } ], AttributeDefinitions = [ { "AttributeName": "train_name", "AttributeType": "N" }, { "AttributeName": "train_type", "AttributeType": "S" } ], ProvisionedThroughput = { "ReadCapacityUnits": 10, "WriteCapacityUnits": 10 } ) return True except client.exceptions.ResourceNotFoundException: print("Table does not exist") return False except Exception as e: print("Exception @ create_user_table\n{}".format(e)) return None def create_or_update_record(tableName, record): """ Function to create or update a record in DynamoDB Params: tableName::str The table name to get the record record::dict The object to store Returns: bool If the record was inserted or not """ if not tableName or not record: return False if not {'username', 'index'}.issubset(record): return False try: res = ddb.Table(tableName).get_item( Key = { "username": record['username'], "index": record['index'] } ) record = { **res['Item'], **record } if 'Item' in res else record ddb.Table(tableName).put_item( Item = record ) return True except client.exceptions.ResourceNotFoundException: print("Table does not exist") return False except Exception as e: print("Exception @ create_or_update_record\n{}".format(e)) return None def list_tables(): """ Function to list all tables in DynamoDB Returns: tables::[str] The list of tables """ try: return client.list_tables()['TableNames'] except client.exceptions.ResourceNotFoundException: print("Tables do not exist") return False except Exception as e: print("Exception @ list_tables\n{}".format(e)) return None def list_records(tableName): """ Function to list all records from a DynamoDB table Params: tableName::str The table name to get the records Returns: records::[dict] The list of records stored in the table """ if not tableName: return False try: table = ddb.Table(tableName) res = table.scan() docs = ensure_json(res['Items']) while 'LastEvaluatedKey' in res: res = table.scan(ExclusiveStartKey = res['LastEvaluatedKey']) docs.extend(ensure_json(res['Items'])) return docs except client.exceptions.ResourceNotFoundException: print("Table does not exist") return False except Exception as e: print("Exception @ list_records\n{}".format(e)) return None def get_record(tableName, query): """ Function to retrieve one record from DynamoDB table Params: tableName::str The table name to get the record query::dict The query to fetch the record Returns: doc::dict The record retrieved from the table """ if not tableName or not query or not isinstance(query, dict): return False try: res = ddb.Table(tableName).get_item( Key = query ) doc = ensure_json(res['Item']) if 'Item' in res else None return doc except client.exceptions.ResourceNotFoundException: print("Table does not exist") return False except Exception as e: print("Exception @ get_record\n{}".format(e)) return None def delete_table(tableName): """ Function to delete a DynamoDB table Params: tableName::str The table name to delete Returns: bool If the table was deleted or not """ if not tableName: return False try: ddb.Table(tableName).delete() return True except client.exceptions.ResourceNotFoundException: print("Table does not exist") return False except Exception as e: print("Exception @ delete_table\n{}".format(e)) return None def delete_record(tableName, query): """ Function to delete a DynamoDB table Params: tableName::str The table name to get the record query::dict The query to fetch the record Returns: bool If the record was deleted or not """ if not tableName or not key or not val: return False try: res = ddb.Table(tableName).delete_item( Key = query ) print(res) return True except client.exceptions.ResourceNotFoundException: print("Table does not exist") return False except Exception as e: print("Exception @ delete_record\n{}".format(e)) return None def check_active(tableName): """ Function to check if a table is ACTIVE Params: tableName::str The table name to check Returns: bool If the table is active or not """ if not tableName: return False try: if ddb.Table(tableName).table_status == "ACTIVE": return True return False except client.exceptions.ResourceNotFoundException: print("Table does not exist") return False except Exception as e: print("Exception @ check_status\n{}".format(e)) return None
24.888889
76
0.547098
0
0
0
0
0
0
0
0
3,500
0.390625
91eee874c4335dfd997cd3ef3e9c2d23c76e47b1
26
py
Python
cloudcms/branch/__init__.py
gitana/cloudcms-python-driver
8685c634880c1a6af6f359f1a25de42dcf49f319
[ "Apache-2.0" ]
null
null
null
cloudcms/branch/__init__.py
gitana/cloudcms-python-driver
8685c634880c1a6af6f359f1a25de42dcf49f319
[ "Apache-2.0" ]
null
null
null
cloudcms/branch/__init__.py
gitana/cloudcms-python-driver
8685c634880c1a6af6f359f1a25de42dcf49f319
[ "Apache-2.0" ]
null
null
null
from .branch import Branch
26
26
0.846154
0
0
0
0
0
0
0
0
0
0
91ef6e454e8d3a02bbbb8495426f9e53729bb9c8
30
py
Python
test2/test2.py
kubatom/my_nemtiko_repo
842a303ae120d871623c267ea76c2353d70b2fce
[ "Apache-2.0" ]
null
null
null
test2/test2.py
kubatom/my_nemtiko_repo
842a303ae120d871623c267ea76c2353d70b2fce
[ "Apache-2.0" ]
null
null
null
test2/test2.py
kubatom/my_nemtiko_repo
842a303ae120d871623c267ea76c2353d70b2fce
[ "Apache-2.0" ]
null
null
null
print('this is a test2 file')
15
29
0.7
0
0
0
0
0
0
0
0
22
0.733333
37cd4b6be89839faecee7dd52588398ff12411ba
247
py
Python
src/compas_blender/forms/__init__.py
yijiangh/compas
a9e86edf6b602f47ca051fccedcaa88a5e5d3600
[ "MIT" ]
1
2019-03-27T22:32:56.000Z
2019-03-27T22:32:56.000Z
src/compas_blender/forms/__init__.py
yijiangh/compas
a9e86edf6b602f47ca051fccedcaa88a5e5d3600
[ "MIT" ]
null
null
null
src/compas_blender/forms/__init__.py
yijiangh/compas
a9e86edf6b602f47ca051fccedcaa88a5e5d3600
[ "MIT" ]
null
null
null
""" ******************************************************************************** compas_blender.forms ******************************************************************************** .. currentmodule:: compas_blender.forms """ __all__ = []
22.454545
80
0.234818
0
0
0
0
0
0
0
0
232
0.939271
37cd97b1c214ca81d9e46e1e2c07bc9bb82f06f0
340
py
Python
Source/Git/Experiments/git_annotate.py
cadappl/scm-workbench
302cdb8e36bb755f4977062e8977c37e7f4491f9
[ "Apache-2.0" ]
24
2017-03-23T06:24:02.000Z
2022-03-19T13:35:44.000Z
Source/Git/Experiments/git_annotate.py
cadappl/scm-workbench
302cdb8e36bb755f4977062e8977c37e7f4491f9
[ "Apache-2.0" ]
14
2016-06-21T10:06:27.000Z
2020-07-25T11:56:23.000Z
Source/Git/Experiments/git_annotate.py
barry-scott/git-workbench
9f352875ab097ce5e45f85bf255b1fa02a196807
[ "Apache-2.0" ]
11
2016-12-25T12:36:16.000Z
2022-03-23T14:25:25.000Z
#!/usr/bin/python3 import sys import git r = git.Repo( sys.argv[1] ) num = 0 for info in r.blame( 'HEAD', sys.argv[2] ): num += 1 commit = info[0] all_lines = info[1] print( '%s %6d:%s' % (commit, num, all_lines[0]) ) for line in all_lines[1:]: num += 1 print( '%*s %6d:%s' % (40, '', num, line) )
17
54
0.517647
0
0
0
0
0
0
0
0
49
0.144118
37cf805b2f12051ac4eca05f7ae1c89c1a8dc059
544
py
Python
configs/global_configs.py
HansikaPH/time-series-forecasting
23be319a190489bc1464653a3d672edd70ab110b
[ "MIT" ]
67
2019-09-09T14:53:35.000Z
2022-02-21T08:51:15.000Z
configs/global_configs.py
HansikaPH/time-series-forecasting
23be319a190489bc1464653a3d672edd70ab110b
[ "MIT" ]
6
2019-09-09T06:11:51.000Z
2019-12-16T04:31:11.000Z
configs/global_configs.py
HansikaPH/time-series-forecasting
23be319a190489bc1464653a3d672edd70ab110b
[ "MIT" ]
18
2019-09-12T02:49:58.000Z
2022-02-16T11:15:57.000Z
# configs for the model training class model_training_configs: VALIDATION_ERRORS_DIRECTORY = 'results/validation_errors/' INFO_FREQ = 1 # configs for the model testing class model_testing_configs: RNN_FORECASTS_DIRECTORY = 'results/rnn_forecasts/' RNN_ERRORS_DIRECTORY = 'results/errors' PROCESSED_RNN_FORECASTS_DIRECTORY = '/results/processed_rnn_forecasts/' # configs for hyperparameter tuning(SMAC3) class hyperparameter_tuning_configs: SMAC_RUNCOUNT_LIMIT = 50 class gpu_configs: log_device_placement = False
30.222222
75
0.799632
429
0.788603
0
0
0
0
0
0
208
0.382353
37cf939b241a87e359fb447071196040b0ef99e6
26,714
py
Python
openprocurement/blade/tests/auctions.py
imaginal/openprocurement.blade
4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb
[ "Apache-2.0" ]
null
null
null
openprocurement/blade/tests/auctions.py
imaginal/openprocurement.blade
4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb
[ "Apache-2.0" ]
null
null
null
openprocurement/blade/tests/auctions.py
imaginal/openprocurement.blade
4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import unittest from uuid import uuid4 from copy import deepcopy from openprocurement.api.models import get_now from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX try: import openprocurement.auctions.core as auctions_core except ImportError: auctions_core = None @unittest.skipUnless(auctions_core, "Auctions is not reachable") class AuctionResourceTest(AuctionBaseWebTest): def test_empty_listing(self): response = self.app.get('/auctions') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data'], []) self.assertNotIn('{\n "', response.body) self.assertNotIn('callback({', response.body) self.assertEqual(response.json['next_page']['offset'], '') self.assertNotIn('prev_page', response.json) response = self.app.get('/auctions?opt_jsonp=callback') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertNotIn('{\n "', response.body) self.assertIn('callback({', response.body) response = self.app.get('/auctions?opt_pretty=1') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertIn('{\n "', response.body) self.assertNotIn('callback({', response.body) response = self.app.get('/auctions?opt_jsonp=callback&opt_pretty=1') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertIn('{\n "', response.body) self.assertIn('callback({', response.body) response = self.app.get('/auctions?offset=2015-01-01T00:00:00+02:00&descending=1&limit=10') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data'], []) self.assertIn('descending=1', response.json['next_page']['uri']) self.assertIn('limit=10', response.json['next_page']['uri']) self.assertNotIn('descending=1', response.json['prev_page']['uri']) self.assertIn('limit=10', response.json['prev_page']['uri']) response = self.app.get('/auctions?feed=changes') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data'], []) self.assertEqual(response.json['next_page']['offset'], '') self.assertNotIn('prev_page', response.json) response = self.app.get('/auctions?feed=changes&offset=0', status=404) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Offset expired/invalid', u'location': u'params', u'name': u'offset'} ]) response = self.app.get('/auctions?feed=changes&descending=1&limit=10') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data'], []) self.assertIn('descending=1', response.json['next_page']['uri']) self.assertIn('limit=10', response.json['next_page']['uri']) self.assertNotIn('descending=1', response.json['prev_page']['uri']) self.assertIn('limit=10', response.json['prev_page']['uri']) def test_listing(self): response = self.app.get('/auctions') self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 0) auctions = [] for i in range(3): offset = get_now().isoformat() auctions.append(self.create_auction()) ids = ','.join([i['id'] for i in auctions]) while True: response = self.app.get('/auctions') self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']]))) if len(response.json['data']) == 3: break self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified'])) self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions])) self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions])) self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions])) while True: response = self.app.get('/auctions?offset={}'.format(offset)) self.assertEqual(response.status, '200 OK') if len(response.json['data']) == 1: break self.assertEqual(len(response.json['data']), 1) response = self.app.get('/auctions?limit=2') self.assertEqual(response.status, '200 OK') self.assertNotIn('prev_page', response.json) self.assertEqual(len(response.json['data']), 2) response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, '')) self.assertEqual(response.status, '200 OK') self.assertIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 1) response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, '')) self.assertEqual(response.status, '200 OK') self.assertIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 0) response = self.app.get('/auctions', params=[('opt_fields', 'status')]) self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status'])) self.assertIn('opt_fields=status', response.json['next_page']['uri']) response = self.app.get('/auctions', params=[('opt_fields', 'status,enquiryPeriod')]) self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod'])) self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri']) response = self.app.get('/auctions?descending=1') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified'])) self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions])) self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True)) response = self.app.get('/auctions?descending=1&limit=2') self.assertEqual(response.status, '200 OK') self.assertNotIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 2) response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, '')) self.assertEqual(response.status, '200 OK') self.assertNotIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 1) response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, '')) self.assertEqual(response.status, '200 OK') self.assertNotIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 0) test_auction_data2 = test_auction_data.copy() test_auction_data2['mode'] = 'test' self.create_auction(test_auction_data2) while True: response = self.app.get('/auctions?mode=test') self.assertEqual(response.status, '200 OK') if len(response.json['data']) == 1: break self.assertEqual(len(response.json['data']), 1) response = self.app.get('/auctions?mode=_all_') self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 4) def test_listing_changes(self): response = self.app.get('/auctions?feed=changes') self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 0) auctions = [] for i in range(3): auctions.append(self.create_auction()) ids = ','.join([i['id'] for i in auctions]) while True: response = self.app.get('/auctions?feed=changes') self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']]))) if len(response.json['data']) == 3: break self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified'])) self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions])) self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions])) self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions])) response = self.app.get('/auctions?feed=changes&limit=2') self.assertEqual(response.status, '200 OK') self.assertNotIn('prev_page', response.json) self.assertEqual(len(response.json['data']), 2) response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, '')) self.assertEqual(response.status, '200 OK') self.assertIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 1) response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, '')) self.assertEqual(response.status, '200 OK') self.assertIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 0) response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status')]) self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status'])) self.assertIn('opt_fields=status', response.json['next_page']['uri']) response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status,enquiryPeriod')]) self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod'])) self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri']) response = self.app.get('/auctions?feed=changes&descending=1') self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified'])) self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions])) self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True)) response = self.app.get('/auctions?feed=changes&descending=1&limit=2') self.assertEqual(response.status, '200 OK') self.assertNotIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 2) response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, '')) self.assertEqual(response.status, '200 OK') self.assertNotIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 1) response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, '')) self.assertEqual(response.status, '200 OK') self.assertNotIn('descending=1', response.json['prev_page']['uri']) self.assertEqual(len(response.json['data']), 0) test_auction_data2 = test_auction_data.copy() test_auction_data2['mode'] = 'test' self.create_auction(test_auction_data2) while True: response = self.app.get('/auctions?feed=changes&mode=test') self.assertEqual(response.status, '200 OK') if len(response.json['data']) == 1: break self.assertEqual(len(response.json['data']), 1) response = self.app.get('/auctions?feed=changes&mode=_all_') self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 4) def test_listing_draft(self): response = self.app.get('/auctions') self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 0) auctions = [] data = test_auction_data.copy() data.update({'status': 'draft'}) for i in range(3): auctions.append(self.create_auction(data)) ids = ','.join([i['id'] for i in auctions]) while True: response = self.app.get('/auctions') self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']]))) if len(response.json['data']) == 3: break self.assertEqual(len(response.json['data']), 3) self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified'])) self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions])) self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions])) self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions])) def test_get_auction(self): auction = self.create_auction() response = self.app.get('/auctions/{}'.format(auction['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertDictEqual(response.json['data'], auction) response = self.app.get('/auctions/{}?opt_jsonp=callback'.format(auction['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertIn('callback({"data": {"', response.body) response = self.app.get('/auctions/{}?opt_pretty=1'.format(auction['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertIn('{\n "data": {\n "', response.body) def test_auction_not_found(self): response = self.app.get('/auctions') self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), 0) response = self.app.get('/auctions/some_id', status=404) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'} ]) response = self.app.patch_json( '/auctions/some_id', {'data': {}}, status=404) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'} ]) # put custom document object into database to check auction construction on non-Auction data data = {'contract': 'test', '_id': uuid4().hex} self.db.save(data) response = self.app.get('/auctions/{}'.format(data['_id']), status=404) self.assertEqual(response.status, '404 Not Found') @unittest.skipUnless(auctions_core, "Auctions is not reachable") class AuctionAwardResourceTest(AuctionBaseWebTest): def test_listing(self): auction = self.create_auction() response = self.app.get('/auctions/{}/awards'.format(auction['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data'], auction['awards']) self.assertNotIn('{\n "', response.body) self.assertNotIn('callback({', response.body) response = self.app.get('/auctions/{}/awards?opt_jsonp=callback'.format(auction['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertNotIn('{\n "', response.body) self.assertIn('callback({', response.body) response = self.app.get('/auctions/{}/awards?opt_pretty=1'.format(auction['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertIn('{\n "', response.body) self.assertNotIn('callback({', response.body) response = self.app.get('/auctions/{}/awards?opt_jsonp=callback&opt_pretty=1'.format(auction['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertIn('{\n "', response.body) self.assertIn('callback({', response.body) def test_listing_changes(self): auction = self.create_auction() data = self.db[auction['id']] awards = data['awards'] for i in range(3): award = deepcopy(test_award) award['date'] = get_now().isoformat() award['id'] = uuid4().hex awards.append(award) self.db.save(data) ids = ','.join([i['id'] for i in awards]) response = self.app.get('/auctions/{}/awards'.format(auction['id'])) self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']]))) self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), len(awards)) self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in awards])) self.assertEqual(set([i['date'] for i in response.json['data']]), set([i['date'] for i in awards])) self.assertEqual([i['date'] for i in response.json['data']], sorted([i['date'] for i in awards])) def test_get_award(self): auction = self.create_auction() award = auction['awards'][0] response = self.app.get('/auctions/{}/awards/{}'.format(auction['id'], award['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertDictEqual(response.json['data'], award) response = self.app.get('/auctions/{}/awards/{}?opt_jsonp=callback'.format(auction['id'], award['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertIn('callback({"data": {"', response.body) response = self.app.get('/auctions/{}/awards/{}?opt_pretty=1'.format(auction['id'], award['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertIn('{\n "data": {\n "', response.body) def test_award_not_found(self): auction = self.create_auction() response = self.app.get('/auctions/{}/awards/some_id'.format(auction['id']), status=404) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Not Found', u'location': u'url', u'name': u'award_id'} ]) def test_get_document_with_versions(self): auction = self.create_auction() data = self.db[auction['id']] documents = data['documents'] for i in range(3): document = deepcopy(test_document) document['id'] = data['documents'][0]['id'] document['url'] += str(i) document['dateModified'] = get_now().isoformat() documents.append(document) self.db.save(data) versions = [{'dateModified': i['dateModified'], 'url': i['url']} for i in documents[:-1]] response = self.app.get('/auctions/{}/documents/{}'.format(auction['id'], document['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(len(response.json['data']['previousVersions']), len(versions)) self.assertEqual(response.json['data']['previousVersions'], versions) @unittest.skipUnless(auctions_core, "Auctions is not reachable") class AuctionAwardDocumentResourceTest(AuctionBaseWebTest): def test_listing(self): auction = self.create_auction() award = auction['awards'][0] document = award['documents'][0] response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['data'], award['documents']) self.assertNotIn('{\n "', response.body) self.assertNotIn('callback({', response.body) response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback'.format(auction['id'], award['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertNotIn('{\n "', response.body) self.assertIn('callback({', response.body) response = self.app.get('/auctions/{}/awards/{}/documents?opt_pretty=1'.format(auction['id'], award['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertIn('{\n "', response.body) self.assertNotIn('callback({', response.body) response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback&opt_pretty=1'.format(auction['id'], award['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertIn('{\n "', response.body) self.assertIn('callback({', response.body) def test_listing_changes(self): auction = self.create_auction() data = self.db[auction['id']] award = data['awards'][0] award_documents = award['documents'] for i in range(3): document = deepcopy(test_document) document['dateModified'] = get_now().isoformat() document['id'] = uuid4().hex award_documents.append(document) self.db.save(data) ids = ','.join([i['id'] for i in award_documents]) response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id'])) self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']]))) self.assertEqual(response.status, '200 OK') self.assertEqual(len(response.json['data']), len(award_documents)) self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in award_documents])) self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in award_documents])) self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in award_documents])) def test_get_award_document(self): auction = self.create_auction() award = auction['awards'][0] award_document = award['documents'][0] response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(auction['id'], award['id'], award_document['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertDictEqual(response.json['data'], award_document) response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_jsonp=callback'.format(auction['id'], award['id'],award_document['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/javascript') self.assertIn('callback({"data": {"', response.body) response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_pretty=1'.format(auction['id'], award['id'], award_document['id'])) self.assertEqual(response.status, '200 OK') self.assertEqual(response.content_type, 'application/json') self.assertIn('{\n "data": {\n "', response.body) def test_award_document_not_found(self): auction = self.create_auction() response = self.app.get('/auctions/{}/awards/{}/documents/some_id'.format(auction['id'], auction['awards'][0]['id']), status=404) self.assertEqual(response.status, '404 Not Found') self.assertEqual(response.content_type, 'application/json') self.assertEqual(response.json['status'], 'error') self.assertEqual(response.json['errors'], [ {u'description': u'Not Found', u'location': u'url', u'name': u'document_id'} ]) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(AuctionResourceTest)) suite.addTest(unittest.makeSuite(AuctionAwardResourceTest)) suite.addTest(unittest.makeSuite(AuctionAwardDocumentResourceTest)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
47.960503
145
0.63521
25,814
0.96631
0
0
26,009
0.973609
0
0
6,575
0.246126
37cfc9903bdf3148211aecc7d83461d403271fff
3,967
py
Python
webium/controls/select.py
kejkz/webium
ccb09876a201e75f5c5810392d4db7a8708b90cb
[ "Apache-2.0" ]
152
2015-01-16T11:26:56.000Z
2022-01-22T12:11:28.000Z
webium/controls/select.py
goblinintree/webium
ccb09876a201e75f5c5810392d4db7a8708b90cb
[ "Apache-2.0" ]
13
2015-03-05T14:36:44.000Z
2018-08-08T09:43:39.000Z
webium/controls/select.py
goblinintree/webium
ccb09876a201e75f5c5810392d4db7a8708b90cb
[ "Apache-2.0" ]
57
2015-01-27T12:53:49.000Z
2022-03-26T23:02:36.000Z
from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.remote.webelement import WebElement class Select(WebElement): """ Implements logic to work with Web List UI elements """ @property def is_multiple(self): value = self.get_attribute('multiple') return value is not None and not value == 'false' def select_option(self, option): """ Performs selection of provided item from Web List @params option - string item name """ items_list = self.get_options() for item in items_list: if item.get_attribute("value") == option: item.click() break def get_options(self): """ Performs search for provided item in Web List """ return self.find_elements_by_tag_name('option') def get_attribute_selected(self, attribute): """ Performs search of selected item from Web List Return attribute of selected item @params attribute - string attribute name """ items_list = self.get_options() return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None) def get_value_selected(self): """ Performs search of selected item from Web List Return value of selected item """ return self.get_attribute_selected('value') def get_text_selected(self): """ Performs search of selected item from Web List Return text of selected item """ return self.get_attribute_selected('text') def select_by_visible_text(self, text): """ Performs search of selected item from Web List @params text - string visible text """ xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text)) opts = self.find_elements_by_xpath(xpath) matched = False for opt in opts: self._set_selected(opt) if not self.is_multiple: return matched = True # in case the target option isn't found by xpath # attempt to find it by direct comparison among options which contain at least the longest token from the text if len(opts) == 0 and ' ' in text: sub_string_without_space = self._get_longest_token(text) if sub_string_without_space == "": candidates = self.get_options() else: xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space)) candidates = self.find_elements_by_xpath(xpath) for candidate in candidates: if text == candidate.text: self._set_selected(candidate) if not self.is_multiple: return matched = True if not matched: raise NoSuchElementException("Could not locate element with visible text: " + str(text)) @staticmethod def _escape_string(value): if '"' in value and "'" in value: substrings = value.split('"') result = ['concat('] for substring in substrings: result.append('"{0}"'.format(substring)) result.append(', \'"\', ') result.pop() if value.endswith('"'): result.append(', \'"\'') return ''.join(result) + ')' if '"' in value: return "'{0}'".format(value) return '"{0}"'.format(value) @staticmethod def _get_longest_token(value): items = value.split(' ') longest = '' for item in items: if len(item) > len(longest): longest = item return longest @staticmethod def _set_selected(option): if not option.is_selected(): option.click()
33.058333
118
0.57575
3,842
0.96849
0
0
1,023
0.257877
0
0
1,137
0.286615
37d1196ce920fb2354298f73f3de4a4a984c7332
12,564
py
Python
mc/cookies/CookieManager.py
zy-sunshine/falkon-pyqt5
bc2b60aa21c9b136439bd57a11f391d68c736f99
[ "MIT" ]
1
2021-04-29T05:36:44.000Z
2021-04-29T05:36:44.000Z
mc/cookies/CookieManager.py
zy-sunshine/falkon-pyqt5
bc2b60aa21c9b136439bd57a11f391d68c736f99
[ "MIT" ]
1
2020-03-28T17:43:18.000Z
2020-03-28T17:43:18.000Z
mc/cookies/CookieManager.py
zy-sunshine/falkon-pyqt5
bc2b60aa21c9b136439bd57a11f391d68c736f99
[ "MIT" ]
1
2021-01-15T20:09:24.000Z
2021-01-15T20:09:24.000Z
from PyQt5.QtWidgets import QDialog from PyQt5 import uic from PyQt5.Qt import Qt from PyQt5.Qt import QShortcut from PyQt5.Qt import QKeySequence from PyQt5.QtWidgets import QMessageBox from PyQt5.QtWidgets import QInputDialog from PyQt5.Qt import QDateTime from PyQt5.Qt import QStyle from PyQt5.Qt import QNetworkCookie from PyQt5.QtWidgets import QTreeWidgetItem from mc.common.globalvars import gVar from mc.app.Settings import Settings from mc.common import const from mc.tools.TreeWidget import TreeWidget from mc.tools.IconProvider import IconProvider class HashableTreeWidgetItem(QTreeWidgetItem): def __hash__(self): return id(self) class CookieManager(QDialog): def __init__(self, parent=None): ''' @param parent QWidget ''' super().__init__(parent) self._ui = uic.loadUi('mc/cookies/CookieManager.ui', self) self._domainHash = {} # QHash<QString, QTreeWidgetItem> self._itemHash = {} # QHash<QTreeWidgetItem, QNetworkCookie> self.setAttribute(Qt.WA_DeleteOnClose) gVar.appTools.centerWidgetOnScreen(self) if self.isRightToLeft(): self._ui.cookieTree.headerItem().setTextAlignment(0, Qt.AlignRight | Qt.AlignVCenter) self._ui.cookieTree.headerItem().setTextAlignment(1, Qt.AlignRight | Qt.AlignVCenter) self._ui.cookieTree.setLayoutDirection(Qt.LeftToRight) self._ui.whiteList.setLayoutDirection(Qt.LeftToRight) self._ui.blackList.setLayoutDirection(Qt.LeftToRight) # Stored Cookies self._ui.cookieTree.currentItemChanged.connect(self._currentItemChanged) self._ui.removeAll.clicked.connect(self._removeAll) self._ui.removeOne.clicked.connect(self._remove) self._ui.close.clicked.connect(lambda: self._close()) self._ui.close2.clicked.connect(lambda: self._close()) self._ui.close3.clicked.connect(lambda: self._close()) self._ui.search.textChanged.connect(self._filterString) # Cookie Filtering self._ui.whiteAdd.clicked.connect(self._addWhitelist) self._ui.whiteRemove.clicked.connect(self._removeWhitelist) self._ui.blackAdd.clicked.connect(self._addBlacklist) self._ui.blackRemove.clicked.connect(self._removeBlacklist) # Cookie Settings settings = Settings() settings.beginGroup('Cookie-Settings') self._ui.saveCookies.setChecked(settings.value('allCookies', True)) self._ui.filter3rdParty.setChecked(settings.value('filterThirdPartyCookies', False)) self._ui.filterTracking.setChecked(settings.value('filterTrackingCookie', False)) self._ui.deleteCookiesOnClose.setChecked(settings.value('deleteCookiesOnClose', False)) self._ui.whiteList.addItems(settings.value('whitelist', [])) self._ui.blackList.addItems(settings.value('blacklist', [])) settings.endGroup() if const.QTWEBENGINEWIDGETS_VERSION < const.QT_VERSION_CHECK(5, 11, 0): self._ui.filter3rdParty.hide() self._ui.search.setPlaceholderText(_('Search')) self._ui.cookieTree.setDefaultItemShowMode(TreeWidget.ItemsCollapsed) self._ui.cookieTree.sortItems(0, Qt.AscendingOrder) self._ui.cookieTree.header().setDefaultSectionSize(220) self._ui.cookieTree.setFocus() self._ui.whiteList.setSortingEnabled(True) self._ui.blackList.setSortingEnabled(True) self._removeShortcut = QShortcut(QKeySequence('Del'), self) self._removeShortcut.activated.connect(self._deletePressed) self._ui.search.textChanged.connect(self._filterString) cookieJar = gVar.app.cookieJar() cookieJar.cookieAdded.connect(self._addCookie) cookieJar.cookieRemoved.connect(self._removeCookie) # Load cookies for cookie in cookieJar.getAllCookies(): self._addCookie(cookie) gVar.appTools.setWmClass('Cookies', self) def _close(self): super().close() # private Q_SLOTS: def _currentItemChanged(self, current, parent): ''' @param: current QTreeWidgetItem @param: parent QTreeWidgetItem ''' if not current: return if not current.text(1): self._ui.name.setText(_('<cookie not selected>')) self._ui.value.setText(_("<cookie not selected>")) self._ui.server.setText(_("<cookie not selected>")) self._ui.path.setText(_("<cookie not selected>")) self._ui.secure.setText(_("<cookie not selected>")) self._ui.expiration.setText(_("<cookie not selected>")) self._ui.removeOne.setText(_("Remove cookies")) return cookie = current.data(0, Qt.UserRole + 10) self._ui.name.setText(cookie.name().data().decode()) self._ui.value.setText(cookie.value().data().decode()) self._ui.server.setText(cookie.domain()) self._ui.path.setText(cookie.path()) if cookie.isSecure(): self._ui.secure.setText(_('Secure only')) else: self._ui.secure.setText(_('All connections')) if cookie.isSessionCookie(): self._ui.expiration.setText(_('Session cookie')) else: self._ui.expiration.setText( QDateTime(cookie.expirationDate()).toString('hh:mm:ss dddd d. MMMM yyyy') ) self._ui.removeOne.setText(_('Remove cookie')) def _remove(self): current = self._ui.cookieTree.currentItem() if not current: return cookies = [] # QList<QNetworkCookie> if current.childCount(): for idx in range(current.childCount()): # QTreeWidgetItem item = current.child(idx) if item and item in self._itemHash: cookies.append(self._itemHash[item]) elif current in self._itemHash: cookies.append(self._itemHash[current]) cookieJar = gVar.app.cookieJar() for cookie in cookies: cookieJar.deleteCookie(cookie) def _removeAll(self): button = QMessageBox.warning(self, _('Confirmation'), _('Are you sure you want to delete all cookies on your computer?'), QMessageBox.Yes | QMessageBox.No) if button != QMessageBox.Yes: return gVar.app.cookieJar().deleteAllCookies() self._itemHash.clear() self._domainHash.clear() self._ui.cookieTree.clear() def _addWhitelist(self): server, ok = QInputDialog.getText(self, _('Add to whitelist'), _('Server:')) if not server: return if self._ui.blackList.findItems(server, Qt.MatchFixedString): QMessageBox.information(self, _('Already blacklisted!'), _("The server \"%s\" is already in blacklist, please remove it first.") % server) return if not self._ui.whiteList.findItems(server, Qt.MatchFixedString): self._ui.whiteList.addItem(server) def _removeWhitelist(self): item = self._ui.whiteList.currentItem() self._removeTreeItem(self._ui.whiteList, item) def _addBlacklist(self): server, ok = QInputDialog.getText(self, _('Add to blacklist'), _('Server:')) self._addBlacklistByServer(server) def _removeBlacklist(self): item = self._ui.blackList.currentItem() self._removeTreeItem(self._ui.blackList, item) def _deletePressed(self): if self._ui.cookieTree.hasFocus(): self._remove() elif self._ui.whiteList.hasFocus(): self._removeWhitelist() elif self._ui.blackList.hasFocus(): self._removeBlacklist() def _filterString(self, string): ''' @param: string QString ''' print('=====>', string) if not string: for idx in range(self._ui.cookieTree.topLevelItemCount()): item = self._ui.cookieTree.topLevelItem(idx) item.setHidden(False) item.setExpanded(self._ui.cookieTree.defaultItemShowMode() == TreeWidget.ItemsExpanded) else: strLower = string.lower() for idx in range(self._ui.cookieTree.topLevelItemCount()): item = self._ui.cookieTree.topLevelItem(idx) text = '.' + item.text(0) item.setHidden(text.lower() not in strLower) item.setExpanded(True) def _addCookie(self, cookie): ''' @param: cookie QNetworkCookie ''' item = None # QTreeWidgetItem domain = self._cookieDomain(cookie) findParent = self._domainHash.get(domain) if findParent: item = HashableTreeWidgetItem(findParent) else: newParent = HashableTreeWidgetItem(self._ui.cookieTree) newParent.setText(0, domain) newParent.setIcon(0, IconProvider.standardIcon(QStyle.SP_DirIcon)) newParent.setData(0, Qt.UserRole + 10, cookie.domain()) self._ui.cookieTree.addTopLevelItem(newParent) self._domainHash[domain] = newParent item = HashableTreeWidgetItem(newParent) cookie = QNetworkCookie(cookie) item.setText(0, '.' + domain) item.setText(1, cookie.name().data().decode()) item.setData(0, Qt.UserRole + 10, cookie) self._ui.cookieTree.addTopLevelItem(item) self._itemHash[item] = cookie def _removeCookie(self, cookie): ''' @param: cookie QNetworkCookie ''' # QTreeWidgetItem item = self._cookieItem(cookie) if not item: return self._itemHash.pop(item, None) itemParent = item.parent() if itemParent and itemParent.childCount() == 1: self._domainHash.pop(self._cookieDomain(cookie), None) self._removeTreeItem(self._ui.cookieTree, itemParent) item = None if item: self._removeTreeItem(self._ui.cookieTree, item) def _removeTreeItem(self, tree, item): if not item: return (item.parent() or tree.invisibleRootItem()).removeChild(item) # private: # override def closeEvent(self, event): ''' @param event QCloseEvent ''' whitelist = [] blacklist = [] for idx in range(self._ui.whiteList.count()): item = self._ui.whiteList.item(idx) whitelist.append(item.text()) for idx in range(self._ui.blackList.count()): item = self._ui.blackList.item(idx) blacklist.append(item.text()) settings = Settings() settings.beginGroup('Cookie-Settings') settings.setValue('allowCookies', self._ui.saveCookies.isChecked()) settings.setValue('filterThirdPartyCookies', self._ui.filter3rdParty.isChecked()) settings.setValue('filterTrackingCookie', self._ui.filterTracking.isChecked()) settings.setValue('deleteCookiesOnClose', self._ui.deleteCookiesOnClose.isChecked()) settings.setValue('whitelist', whitelist) settings.setValue('blacklist', blacklist) settings.endGroup() gVar.app.cookieJar().loadSettings() event.accept() # override def keyPressEvent(self, event): ''' @param event QKeyEvent ''' if event.key() == Qt.Key_Escape: self._close() super().keyPressEvent(event) def _addBlacklistByServer(self, server): ''' @param: server QString ''' if not server: return if self._ui.whiteList.findItems(server, Qt.MatchFixedString): QMessageBox.information(self, _('Already whitelisted!'), _("The server \"%s\" is already in whitelist, please remove it first.") % server) return if not self._ui.blackList.findItems(server, Qt.MatchFixedString): self._ui.blackList.addItem(server) def _cookieDomain(self, cookie): ''' @param: cookie QNetworkCookie @return: QString ''' domain = cookie.domain() domain = domain.lstrip('.') return domain def _cookieItem(self, cookie): ''' @param: cookie QNetworkCookie @return: QTreeWidgetItem ''' for key, val in self._itemHash.items(): if val == cookie: return key return None
35.897143
103
0.630372
12,000
0.95511
0
0
0
0
0
0
1,719
0.136819
37d161d2ab9998ed2955dcc68be64d87474fc1ce
1,803
py
Python
.circleci/process_submitted_data.py
dongbohu/cimr-d
7d8f7f7319cff0092946a28d1416d38c06e085d7
[ "CC-BY-4.0" ]
null
null
null
.circleci/process_submitted_data.py
dongbohu/cimr-d
7d8f7f7319cff0092946a28d1416d38c06e085d7
[ "CC-BY-4.0" ]
null
null
null
.circleci/process_submitted_data.py
dongbohu/cimr-d
7d8f7f7319cff0092946a28d1416d38c06e085d7
[ "CC-BY-4.0" ]
2
2019-05-22T16:05:54.000Z
2019-05-23T14:29:10.000Z
#!/usr/bin/env python3 import os import sys import logging import subprocess logging.basicConfig(level=logging.INFO) root_dir = 'submitted_data' submitted_file_split = set() for dir_, _, files in os.walk(root_dir): for file_name in files: rel_dir = os.path.relpath(dir_, root_dir) rel_file = os.path.join(root_dir, rel_dir, file_name) submitted_file_split.add(rel_file) for submitted_file in submitted_file_split: if submitted_file.startswith('submitted_data'): dir_name, data_type, file_name = submitted_file.split('/') out_dir_name = 'processed_data' if not os.path.isdir(out_dir_name): os.makedirs(out_dir_name, exist_ok=True) if not os.path.isdir(out_dir_name + '/' + data_type): os.makedirs(out_dir_name + '/' + data_type, exist_ok=True) outfile = submitted_file.replace(dir_name, out_dir_name) if not os.path.isfile(outfile): if not data_type == 'tad': from cimr.processor.utils import Infiler infile = Infiler( data_type, submitted_file, genome_build='b38', update_rsid=False, outfile=str(outfile), chunksize=700000 ) infile.read_file() if data_type == 'eqtl': from cimr.processor.query import Querier genes = list(infile.list_genes()) queried = Querier(genes) queried.form_query() else: logging.info(f' processed file already exists for {submitted_file}') logging.info(f' if reprocessing, delete {outfile} and file a new pull request')
31.086207
95
0.585136
0
0
0
0
0
0
0
0
214
0.118691
37d19d97641fbdfe4cfca519ffd963eb1a649c60
469
py
Python
common/enums.py
resourceidea/resourceideaapi
4cc7db98f981d8f2011c1995e23e8a8655e31f75
[ "MIT" ]
1
2020-05-30T22:27:59.000Z
2020-05-30T22:27:59.000Z
common/enums.py
resourceidea/resourceideaapi
4cc7db98f981d8f2011c1995e23e8a8655e31f75
[ "MIT" ]
15
2020-02-11T21:53:08.000Z
2021-11-02T21:20:03.000Z
common/enums.py
resourceidea/resourceideaapi
4cc7db98f981d8f2011c1995e23e8a8655e31f75
[ "MIT" ]
1
2020-08-27T10:57:47.000Z
2020-08-27T10:57:47.000Z
import enum class Status(enum.Enum): """Status enumeration.""" ACTIVE = 'ACTIVE' DISABLED = 'DISABLED' ARCHIVED = 'ARCHIVED' DELETED = 'DELETED' class ProgressStatus(enum.Enum): """Enumeration indicates the different stages of the progress made on an engagement, job or task.""" NOT_STARTED = 'NOT STARTED' RUNNING = 'RUNNING' IN_REVIEW = 'IN REVIEW' REVIEWED = 'REVIEWED' CLOSED = 'CLOSED'
21.318182
50
0.616205
444
0.946695
0
0
0
0
0
0
223
0.47548
37d29492156d47c44672b00f04cedb7fbbdcf78e
5,880
py
Python
networks/mobilenet.py
softsys4ai/FlexiBO
1406d67e5bd14d6b7210e724e6b239889f210db6
[ "MIT" ]
8
2020-06-23T07:05:18.000Z
2021-10-24T02:38:14.000Z
networks/mobilenet.py
softsys4ai/FlexiBO
1406d67e5bd14d6b7210e724e6b239889f210db6
[ "MIT" ]
null
null
null
networks/mobilenet.py
softsys4ai/FlexiBO
1406d67e5bd14d6b7210e724e6b239889f210db6
[ "MIT" ]
3
2020-01-06T10:49:12.000Z
2020-04-20T03:26:33.000Z
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # MobileNet 224 (2017) # Paper: https://arxiv.org/pdf/1704.04861.pdf import os import tensorflow as tf from tensorflow.keras import layers, Input, Model def stem(inputs, alpha, n_filters, filter_size): """ Construct the stem group inputs : input tensor alpha : width multiplier """ # Convolutional block x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(inputs) x = layers.Conv2D(n_filters, (filter_size, filter_size), strides=(2, 2), padding='valid')(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # Depthwise Separable Convolution Block x = depthwise_block(x, 64, alpha, (1, 1)) return x def classifier(x, alpha, dropout, n_classes): """ Construct the classifier group x : input to the classifier alpha : width multiplier dropout : dropout percentage n_classes : number of output classes """ # Flatten the feature maps into 1D feature maps (?, N) x = layers.GlobalAveragePooling2D()(x) # Reshape the feature maps to (?, 1, 1, 1024) shape = (1, 1, int(1024 * alpha)) x = layers.Reshape(shape)(x) # Perform dropout for preventing overfitting x = layers.Dropout(dropout)(x) # Use convolution for classifying (emulates a fully connected layer) x = layers.Conv2D(n_classes, (1, 1), padding='same')(x) x = layers.Activation('softmax')(x) # Reshape the resulting output to 1D vector of number of classes x = layers.Reshape((n_classes, ))(x) return x def depthwise_block(x, n_filters, alpha, strides): """ Construct a Depthwise Separable Convolution block x : input to the block n_filters : number of filters alpha : width multiplier strides : strides """ # Apply the width filter to the number of feature maps filters = int(n_filters * alpha) # Strided convolution to match number of filters if strides == (2, 2): x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(x) padding = 'valid' else: padding = 'same' # Depthwise Convolution x = layers.DepthwiseConv2D((3, 3), strides, padding=padding)(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # Pointwise Convolution x = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) return x def get_configurable_hyperparams(): """This function is used to ge the configurable hyperparameters """ import yaml with open("cur_config.yaml") as fp: cur_cfg=yaml.load(fp) return (cur_cfg["cur_conf"][0], cur_cfg["cur_conf"][1], cur_cfg["cur_conf"][2], cur_cfg["cur_conf"][3], cur_cfg["cur_conf"][4]) def get_data(): """This function is used to get train and test data """ from tensorflow.keras.datasets import cifar10 import numpy as np (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = (x_train / 255.0).astype(np.float32) x_test = (x_test / 255.0).astype(np.float32) return x_train, y_train, x_test, y_test if __name__=="__main__": # get configurable hyperparams (stem_n_filters, stem_filter_size depthwise_block1_n_filters, depthwise_block2_n_filters, depthwise_block3_n_filters, depthwise_block4_n_filters,)=get_configurable_hyperparams() alpha = 1 # width multiplier dropout = 0.5 # dropout percentage n_classes = 1000 # number of classes inputs = Input(shape=(224, 224, 3)) # Create the stem group x = stem(inputs, alpha, stem_n_filters, stem_filter_size) # First Depth wise Separable Convolution Group # Strided convolution - feature map size reduction x = depthwise_block(x, depthwise_block1_n_filters, alpha, strides=(2, 2)) x = depthwise_block(x, depthwise_block1_n_filters, alpha, strides=(1, 1)) # Second Depthwise Separable Convolution Group # Strided convolution - feature map size reduction x = depthwise_block(x, depthwise_block2_n_filters, alpha, strides=(2, 2)) x = depthwise_block(x, depthwise_block2_n_filters, alpha, strides=(1, 1)) # Third Depthwise Separable Convolution Group # Strided convolution - feature map size reduction x = depthwise_block(x, depthwise_block3_n_filters, alpha, strides=(2, 2)) for _ in range(5): x = depthwise_block(x, depthwise_block3_n_filters, alpha, strides=(1, 1)) # Fourth Depthwise Separable Convolution Group # Strided convolution - feature map size reduction x = depthwise_block(x, depthwise_block4_n_filters, alpha, strides=(2, 2)) x = depthwise_block(x, depthwise_block4_n_filters, alpha, strides=(1, 1)) # Create the classifier outputs = classifier(x, alpha, dropout, n_classes) # Instantiate the Model model = Model(inputs, outputs) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() xtrain, ytrain, x_test, y_test=get_data() # train model model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.1, verbose=1) # save model fmodel=os.path.join(os.getcwd(),"model.h5") model.save(fmodel)
35.209581
96
0.671769
0
0
0
0
0
0
0
0
2,489
0.423299
37d2de39d6a42eafed34788e36c34749e153b301
500
py
Python
info.py
altfool/mri_face_detection
3117f7f00c98efe2260936146ce6b5454b059672
[ "MIT" ]
1
2021-11-13T02:42:49.000Z
2021-11-13T02:42:49.000Z
info.py
altfool/mri_face_detection
3117f7f00c98efe2260936146ce6b5454b059672
[ "MIT" ]
null
null
null
info.py
altfool/mri_face_detection
3117f7f00c98efe2260936146ce6b5454b059672
[ "MIT" ]
null
null
null
import numpy as np img_dtype = np.float32 imgX, imgY, imgZ = (256, 256, 150) imgs_path_withfaces = '../dataset/withfaces' imgs_path_nofaces = '../dataset/nofaces' imgX_dwt1, imgY_dwt1, imgZ_dwt1 = (128, 128, 75) imgs_path_withfaces_dwt = './dataset/withfaces' imgs_path_nofaces_dwt = './dataset/nofaces' dwt_flag = (True, False)[0] if dwt_flag: imgX, imgY, imgZ = imgX_dwt1, imgY_dwt1, imgZ_dwt1 imgs_path_withfaces = imgs_path_withfaces_dwt imgs_path_nofaces = imgs_path_nofaces_dwt
27.777778
54
0.752
0
0
0
0
0
0
0
0
82
0.164
37d34e7f40c00147044227bceb687730996c355b
10,288
py
Python
biggan/paddorch/paddorch/vision/functional.py
zzz2010/Contrib
d351d83da718145cef9f6c98598f7fedc027efe5
[ "Apache-2.0" ]
20
2020-03-13T13:40:32.000Z
2022-03-10T07:31:48.000Z
biggan/paddorch/paddorch/vision/functional.py
zzz2010/Contrib
d351d83da718145cef9f6c98598f7fedc027efe5
[ "Apache-2.0" ]
34
2020-02-20T11:04:58.000Z
2022-03-12T00:54:26.000Z
biggan/paddorch/paddorch/vision/functional.py
zzz2010/Contrib
d351d83da718145cef9f6c98598f7fedc027efe5
[ "Apache-2.0" ]
41
2020-02-14T09:34:39.000Z
2022-03-10T07:31:42.000Z
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import collections import random import math import cv2 import numbers import numpy as np if sys.version_info < (3, 3): Sequence = collections.Sequence Iterable = collections.Iterable else: Sequence = collections.abc.Sequence Iterable = collections.abc.Iterable __all__ = ['flip', 'resize', 'pad', 'rotate', 'to_grayscale'] def flip(image, code): """ Accordding to the code (the type of flip), flip the input image Args: image: Input image, with (H, W, C) shape code: Code that indicates the type of flip. -1 : Flip horizontally and vertically 0 : Flip vertically 1 : Flip horizontally Examples: .. code-block:: python import numpy as np from paddle.incubate.hapi.vision.transforms import functional as F fake_img = np.random.rand(224, 224, 3) # flip horizontally and vertically F.flip(fake_img, -1) # flip vertically F.flip(fake_img, 0) # flip horizontally F.flip(fake_img, 1) """ return cv2.flip(image, flipCode=code) def resize(img, size, interpolation=cv2.INTER_LINEAR): """ resize the input data to given size Args: input: Input data, could be image or masks, with (H, W, C) shape size: Target size of input data, with (height, width) shape. interpolation: Interpolation method. Examples: .. code-block:: python import numpy as np from paddle.incubate.hapi.vision.transforms import functional as F fake_img = np.random.rand(256, 256, 3) F.resize(fake_img, 224) F.resize(fake_img, (200, 150)) """ if isinstance(interpolation, Sequence): interpolation = random.choice(interpolation) if isinstance(size, int): h, w = img.shape[:2] if (w <= h and w == size) or (h <= w and h == size): return img if w < h: ow = size oh = int(size * h / w) return cv2.resize(img, (ow, oh), interpolation=interpolation) else: oh = size ow = int(size * w / h) return cv2.resize(img, (ow, oh), interpolation=interpolation) else: return cv2.resize(img, tuple(size[::-1]), interpolation=interpolation) def pad(img, padding, fill=(0, 0, 0), padding_mode='constant'): """Pads the given CV Image on all sides with speficified padding mode and fill value. Args: img (np.ndarray): Image to be padded. padding (int|tuple): Padding on each border. If a single int is provided this is used to pad all borders. If tuple of length 2 is provided this is the padding on left/right and top/bottom respectively. If a tuple of length 4 is provided this is the padding for the left, top, right and bottom borders respectively. fill (int|tuple): Pixel fill value for constant fill. Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. This value is only used when the padding_mode is constant padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. ``constant`` means padding with a constant value, this value is specified with fill. ``edge`` means padding with the last value at the edge of the image. ``reflect`` means padding with reflection of image (without repeating the last value on the edge) padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``. ``symmetric`` menas pads with reflection of image (repeating the last value on the edge) padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``. Returns: numpy ndarray: Padded image. Examples: .. code-block:: python import numpy as np from paddle.incubate.hapi.vision.transforms.functional import pad fake_img = np.random.rand(500, 500, 3).astype('float32') fake_img = pad(fake_img, 2) print(fake_img.shape) """ if not isinstance(padding, (numbers.Number, list, tuple)): raise TypeError('Got inappropriate padding arg') if not isinstance(fill, (numbers.Number, str, list, tuple)): raise TypeError('Got inappropriate fill arg') if not isinstance(padding_mode, str): raise TypeError('Got inappropriate padding_mode arg') if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]: raise ValueError( "Padding must be an int or a 2, or 4 element tuple, not a " + "{} element tuple".format(len(padding))) assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \ 'Expected padding mode be either constant, edge, reflect or symmetric, but got {}'.format(padding_mode) PAD_MOD = { 'constant': cv2.BORDER_CONSTANT, 'edge': cv2.BORDER_REPLICATE, 'reflect': cv2.BORDER_DEFAULT, 'symmetric': cv2.BORDER_REFLECT } if isinstance(padding, int): pad_left = pad_right = pad_top = pad_bottom = padding if isinstance(padding, collections.Sequence) and len(padding) == 2: pad_left = pad_right = padding[0] pad_top = pad_bottom = padding[1] if isinstance(padding, collections.Sequence) and len(padding) == 4: pad_left, pad_top, pad_right, pad_bottom = padding if isinstance(fill, numbers.Number): fill = (fill,) * (2 * len(img.shape) - 3) if padding_mode == 'constant': assert (len(fill) == 3 and len(img.shape) == 3) or (len(fill) == 1 and len(img.shape) == 2), \ 'channel of image is {} but length of fill is {}'.format(img.shape[-1], len(fill)) img = cv2.copyMakeBorder( src=img, top=pad_top, bottom=pad_bottom, left=pad_left, right=pad_right, borderType=PAD_MOD[padding_mode], value=fill) return img def rotate(img, angle, interpolation=cv2.INTER_LINEAR, expand=False, center=None): """Rotates the image by angle. Args: img (numpy.ndarray): Image to be rotated. angle (float|int): In degrees clockwise order. interpolation (int, optional): interpolation: Interpolation method. expand (bool|optional): Optional expansion flag. If true, expands the output image to make it large enough to hold the entire rotated image. If false or omitted, make the output image the same size as the input image. Note that the expand flag assumes rotation around the center and no translation. center (2-tuple|optional): Optional center of rotation. Origin is the upper left corner. Default is the center of the image. Returns: numpy ndarray: Rotated image. Examples: .. code-block:: python import numpy as np from paddle.incubate.hapi.vision.transforms.functional import rotate fake_img = np.random.rand(500, 500, 3).astype('float32') fake_img = rotate(fake_img, 10) print(fake_img.shape) """ dtype = img.dtype h, w, _ = img.shape point = center or (w / 2, h / 2) M = cv2.getRotationMatrix2D(point, angle=-angle, scale=1) if expand: if center is None: cos = np.abs(M[0, 0]) sin = np.abs(M[0, 1]) nW = int((h * sin) + (w * cos)) nH = int((h * cos) + (w * sin)) M[0, 2] += (nW / 2) - point[0] M[1, 2] += (nH / 2) - point[1] dst = cv2.warpAffine(img, M, (nW, nH)) else: xx = [] yy = [] for point in (np.array([0, 0, 1]), np.array([w - 1, 0, 1]), np.array([w - 1, h - 1, 1]), np.array([0, h - 1, 1])): target = np.dot(M, point) xx.append(target[0]) yy.append(target[1]) nh = int(math.ceil(max(yy)) - math.floor(min(yy))) nw = int(math.ceil(max(xx)) - math.floor(min(xx))) M[0, 2] += (nw - w) / 2 M[1, 2] += (nh - h) / 2 dst = cv2.warpAffine(img, M, (nw, nh), flags=interpolation) else: dst = cv2.warpAffine(img, M, (w, h), flags=interpolation) return dst.astype(dtype) def to_grayscale(img, num_output_channels=1): """Converts image to grayscale version of image. Args: img (numpy.ndarray): Image to be converted to grayscale. Returns: numpy.ndarray: Grayscale version of the image. if num_output_channels == 1, returned image is single channel if num_output_channels == 3, returned image is 3 channel with r == g == b Examples: .. code-block:: python import numpy as np from paddle.incubate.hapi.vision.transforms.functional import to_grayscale fake_img = np.random.rand(500, 500, 3).astype('float32') fake_img = to_grayscale(fake_img) print(fake_img.shape) """ if num_output_channels == 1: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) elif num_output_channels == 3: img = cv2.cvtColor( cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB) else: raise ValueError('num_output_channels should be either 1 or 3') return img
38.38806
111
0.602061
0
0
0
0
0
0
0
0
6,004
0.583593
37d5209ef3010122c779cf4e6e97b119c2f9a504
14,267
py
Python
ground_battle.py
ashhansen6/minigames
5b2e0db14b3567c9b6220206105ed448fb303551
[ "MIT" ]
null
null
null
ground_battle.py
ashhansen6/minigames
5b2e0db14b3567c9b6220206105ed448fb303551
[ "MIT" ]
3
2021-03-25T02:39:44.000Z
2021-06-16T17:53:36.000Z
ground_battle.py
ashhansen6/minigames
5b2e0db14b3567c9b6220206105ed448fb303551
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Jan 29 13:38:35 2021 GROUND INVASION! The Game @author: Ashton Hansen (ashhansen6@outlook.com) """ # Packages used: import numpy as np import pandas as pd import random as rng from termcolor import colored # Defining starting forces ## Defenders: def_force = 1250 def_reserves = 400 defenders = def_force + def_reserves def_strength = def_force def_guard = def_force ## Attackers: att_force = 900 att_reserves = 1000 attackers = att_force + att_reserves att_strength = att_force att_guard = att_force # Defining strategies: ## Defenders: def_strat = ["draft", "turtle"] ### Draft def draft(def_force, def_reserves): global def_pair global def_strength global def_guard # Defender Strategy Information print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan")) print("You hear news that a draft decree was issued...") print("Intelligence suggests that there will be more enemy combatants.") print("You expect the drafted soldiers to have decreased combat effectiveness.") # Defender Strategy Effects if def_reserves >= 100: def_danger = def_force + 100 def_safe = def_reserves - 100 print("Defender's fielded forces:", def_danger) print("Defender's forces still in reserve:", def_safe) else: def_danger = def_force + def_reserves def_safe = 0 print("Defender's fielded forces:", def_danger) print("Defender's forces still in reserve:", def_safe) def_power = def_danger * 0.980 def_protection = def_danger * 0.95 def_deployment = [def_danger, def_safe, def_power, def_protection] return(def_deployment) ### Turtle def turtle(def_force, def_reserves): global def_pair global def_strength global def_guard # Defender Strategy Information print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan")) print("The defenders appear to bolster their defenses in preparation.") print("Intelligence suggests that their defenses will be difficult to penetrate.") print("It is likely that the defenders will try to keep soldiers out of harm's way.") # Defender Strategy Effects if def_force > 1100: def_danger = def_force def_safe = def_reserves + (def_danger - 1100) def_danger = 1100 print("Defender's fielded forces:", def_danger) print("Defender's forces still in reserve:", def_safe) else: def_danger = def_force def_safe = def_reserves print("Defender's fielded forces:", def_danger) print("Defender's forces still in reserve:", def_safe) def_power = def_danger * 0.975 def_protection = def_danger * 1.15 def_deployment = [def_danger, def_safe, def_power, def_protection] return(def_deployment) ## Attackers: att_strat = ["blitz", "guerilla"] ### Blitz def blitz(att_force, att_reserves): global att_pair global att_strength global att_guard # Attacker Strategy Information print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan")) print("Your officers grimly accept your orders...") print("There is an air of apprehension as the troops prepare to deploy.") print("While offensive effectiveness will improve, heavier losses are expected.") # Attacker Strategy Effects if att_reserves >= 200: att_danger = att_force + 200 att_safe = att_reserves - 200 print("Attacker's fielded forces:", att_danger) print("Attacker's forces still in reserve:", att_safe) else: att_danger = att_force + att_reserves att_safe = 0 print("Attacker's fielded forces:", att_danger) print("Attacker's forces still in reserve:", att_reserves) att_power = att_danger * 1.10 att_protection = att_danger * 0.90 att_deployment = [att_danger, att_safe, att_power, att_protection] return(att_deployment) ### Guerilla def guerilla(att_force, att_reserves): global att_pair global att_strength global att_guard # Attacker Strategy Information print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan")) print("Your officers immediately begin plans to target strategic weak points.") print("Soldiers move out in small forces and keep the enemy guessing.") print("While not as effective offensively, troop survival rates should be higher.") # Attacker Strategy Effects if att_force > 750: att_danger = att_force att_safe = att_reserves + (att_force - 750) att_danger = 750 else: att_danger = att_force att_safe = att_reserves print("Attacker's fielded forces:", att_danger) print("Attacker's forces still in reserve:", att_safe) att_power = att_danger * 0.95 att_protection = att_danger * 1.25 att_deployment = [att_danger, att_safe, att_power, att_protection] return(att_deployment) # Ground Battle Event (Player == Attacker) wave = 0 player = input("Attacker or Defender? [A/D]:") while (attackers > 0) and (defenders > 0): # Wave Information wave = wave + 1 if wave == 1: print("############################################################") print("PREPARE FOR BATTLE! THE FIRST WAVE OF THE BATTLE BEGINS NOW.") print("############################################################") else: print("########## WAVE:", wave, "##########") print("#############################") print("Defending force strength:", def_force) print("Defending forces in reserve:", def_reserves) print("Attacking force strength:", att_force) print("Attacking forces in reserve:", att_reserves) if player =="A": # Active Player (Attacker) att_strat_chosen = input(colored("How should we proceed, commander? [blitz/guerilla]:", "yellow")) elif player == "D": # CPU Attacker att_strat_chosen = rng.choice(att_strat) # Defender Setup if player == "A": # CPU Defender if def_reserves > 0: def_strat = ["none", "draft", "draft", "draft", "draft", "draft", "draft", "turtle", "turtle", "turtle"] def_strat_chosen = rng.choice(def_strat) else: def_strat = ["none", "none", "turtle", "turtle", "turtle" ,"turtle", "turtle", "turtle", "turtle", "turtle"] def_strat_chosen = rng.choice(def_strat) elif player == "D": # Active Player (defender) def_strat_chosen = input(colored("How should we proceed, commander? [draft/turtle]:", "yellow")) if def_strat_chosen == "draft": draft_results = draft(def_force, def_reserves) def_force = draft_results[0] def_reserves = draft_results[1] def_strength = draft_results[2] def_guard = draft_results[3] elif def_strat_chosen == "turtle": turtle_results = turtle(def_force, def_reserves) def_force = turtle_results[0] def_reserves = turtle_results[1] def_strength = turtle_results[2] def_guard = turtle_results[3] elif def_strat_chosen == "none": print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan")) print("It appears that the enemy will employ standard tactics...") def_force = def_force def_reserves = def_reserves def_strength = def_force def_guard = def_force print("Defending force strength:", def_force) print("Forces kept in reserve:", def_reserves) # Attacker Setup if att_strat_chosen == "blitz": blitz_results = blitz(att_force, att_reserves) att_force = blitz_results[0] att_reserves = blitz_results[1] att_strength = blitz_results[2] att_guard = blitz_results[3] elif att_strat_chosen == "guerilla": guerilla_results = guerilla(att_force, att_reserves) att_force = guerilla_results[0] att_reserves = guerilla_results[1] att_strength = guerilla_results[2] att_guard = guerilla_results[3] # Combat # Attacker damage def_guard = np.random.normal(def_guard, def_guard/10) * 0.50 att_strength = att_strength - def_guard if att_strength < 0: att_strength = 0 def_force = def_force - np.random.normal(att_strength, att_strength/10)//2 - (0.1*att_strength)//1 if def_force < 0: def_force = 0 # Defender damage att_guard = np.random.normal(att_guard, att_guard/10) * 0.50 - 0.1 def_strength = def_strength - att_guard if def_strength < 0: def_strength = 0 att_force = att_force - np.random.normal(def_strength, def_strength/10)//2 - (0.1*def_strength)//1 if att_force < 0: att_force = 0 # Post-wave results: print(colored("########## POST-WAVE RESULTS ##########", on_color = "on_cyan")) print(colored("Defenders:", on_color = "on_blue")) print("Surviving defensive forces:", def_force) print("Defenseive forces kept in reserve:", def_reserves) print("Defender strength estimate:", def_strength) print("Defender guard estimate:", def_guard) print(colored("Attackers:", on_color = "on_red")) print("Surviving attacker forces:", att_force) print("Attacker forces kept in reserve:", att_reserves) print("Attacker strength estimate:", att_strength) print("Attacker guard estimate:", att_guard) # Reset allocations # Defender reallocations: def_reserves = def_reserves + def_force def_force = 0 if def_reserves >= 1250: def_reserves = def_reserves - 1250 def_force = 1250 def_guard = def_force else: def_force = def_reserves def_reserves = 0 def_guard = def_force # Attacker reallocations: att_reserves = att_reserves + att_force att_force = 0 if att_reserves >= 900: att_reserves = att_reserves - 900 att_force = 900 att_guard = att_force else: att_force = att_reserves att_reserves = 0 att_guard = att_force defenders = def_force + def_reserves attackers = att_force + att_reserves # End of wave conditionals if (attackers > 0) and (defenders > 0) and (player == "A"): fightflight = input(colored("Continue or retreat?: [continue/retreat]:", "yellow")) if fightflight == "retreat": print(colored("########## WITHDRAWAL ##########", on_color = "on_blue")) print("You choose to withdraw your troops...") print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan")) print("Troops remaining:", attackers) print("Total losses:", (1900 - attackers)) print("Survival rate:", (attackers)/1900) print("Total assault waves:", wave) break else: print("The battle will continue next turn...") elif attackers <= 0 and player == "A": print(colored("########## FAILURE! ##########", on_color = "on_red")) print("Your assault has been repelled!") print("You return home, wondering what punishment for your failure awaits...") print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan")) print("Troops remaining:", attackers) print("Total losses:", (1900 - attackers)) print("Survival rate:", (attackers)/1900) print("Total assault waves:", wave) elif defenders <= 0 and player == "A": print(colored("########## SUCCESS! ##########", on_color = "on_green")) print("The defenders have been routed!") print("You may now decide the fate of the defending population...") print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan")) print("Troops remaining:", attackers) print("Total losses:", (1900 - attackers)) print("Survival rate:", (attackers)/1900) print("Total assault waves:", wave) elif (attackers > 0) and (defenders > 0) and (player == "D"): fightflight = input(colored("Defend or retreat?: [defend/retreat]:", "yellow")) if fightflight == "retreat": print(colored("########## WITHDRAWAL ##########", on_color = "on_blue")) print("You choose to withdraw your troops from the region...") print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan")) print("Troops remaining:", defenders) print("Total losses:", (1900 - defenders)) print("Survival rate:", (defenders)/1900) print("Total assault waves:", wave) break else: print("The battle will continue next turn...") elif defenders <= 0 and player == "D": print(colored("########## FAILURE! ##########", on_color = "on_red")) print("Your defense has been broken!") print("Enemy troops now occupy your lands and have claimed dominion...") print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan")) print("Troops remaining:", defenders) print("Total losses:", (1650 - defenders)) print("Survival rate:", (defenders)/1650) print("Total assault waves:", wave) elif attackers <= 0 and player == "D": print(colored("########## SUCCESS! ##########", on_color = "on_green")) print("The attackers have been repelled!") print("The storm has passed, and your people live another day...") print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan")) print("Troops remaining:", defenders) print("Total losses:", (1650 - defenders)) print("Survival rate:", (defenders)/1650) print("Total assault waves:", wave) print("#############################")
41.961765
107
0.604892
0
0
0
0
0
0
0
0
5,302
0.371627
37d53dc9e4eafc3370db20f7342e6ffdb10aeb9f
24,609
py
Python
src/pretalx/orga/urls.py
martinheidegger/pretalx
d812e665c1c5ce29df3eafc1985af08e4d986fef
[ "Apache-2.0" ]
null
null
null
src/pretalx/orga/urls.py
martinheidegger/pretalx
d812e665c1c5ce29df3eafc1985af08e4d986fef
[ "Apache-2.0" ]
null
null
null
src/pretalx/orga/urls.py
martinheidegger/pretalx
d812e665c1c5ce29df3eafc1985af08e4d986fef
[ "Apache-2.0" ]
null
null
null
from django.conf.urls import include, url from django.views.generic.base import RedirectView from pretalx.event.models.event import SLUG_CHARS from pretalx.orga.views import cards from .views import ( admin, auth, cfp, dashboard, event, mails, organiser, person, plugins, review, schedule, speaker, submission, ) app_name = "orga" urlpatterns = [ url("^login/$", auth.LoginView.as_view(), name="login"), url("^logout/$", auth.logout_view, name="logout"), url("^reset/$", auth.ResetView.as_view(), name="auth.reset"), url(r"^reset/(?P<token>\w+)$", auth.RecoverView.as_view(), name="auth.recover"), url("^$", RedirectView.as_view(url="event", permanent=False)), url("^admin/$", admin.AdminDashboard.as_view(), name="admin.dashboard"), url("^admin/update/$", admin.UpdateCheckView.as_view(), name="admin.update"), url("^me$", event.UserSettings.as_view(), name="user.view"), url("^me/subuser$", person.SubuserView.as_view(), name="user.subuser"), url( r"^invitation/(?P<code>\w+)$", event.InvitationView.as_view(), name="invitation.view", ), url( "^organiser/$", dashboard.DashboardOrganiserListView.as_view(), name="organiser.list", ), url( "^organiser/new$", organiser.OrganiserDetail.as_view(), name="organiser.create" ), url( f"^organiser/(?P<organiser>[{SLUG_CHARS}]+)/", include( [ url("^$", organiser.OrganiserDetail.as_view(), name="organiser.view"), url( "^delete$", organiser.OrganiserDelete.as_view(), name="organiser.delete", ), url("^teams/$", organiser.TeamDetail.as_view(), name="organiser.teams"), url( "^teams/new$", organiser.TeamDetail.as_view(), name="organiser.teams.create", ), url( "^teams/(?P<pk>[0-9]+)/$", organiser.TeamDetail.as_view(), name="organiser.teams.view", ), url( "^teams/(?P<pk>[0-9]+)/delete$", organiser.TeamDelete.as_view(), name="organiser.teams.delete", ), url( "^teams/(?P<pk>[0-9]+)/tracks$", organiser.TeamTracks.as_view(), name="organiser.teams.tracks", ), url( "^teams/(?P<pk>[0-9]+)/delete/(?P<user_pk>[0-9]+)$", organiser.TeamDelete.as_view(), name="organiser.teams.delete_member", ), url( "^teams/(?P<pk>[0-9]+)/reset/(?P<user_pk>[0-9]+)$", organiser.TeamResetPassword.as_view(), name="organiser.team.password_reset", ), url( "^teams/(?P<pk>[0-9]+)/uninvite$", organiser.TeamUninvite.as_view(), name="organiser.teams.uninvite", ), url( "^teams/(?P<pk>[0-9]+)/resend$", organiser.TeamResend.as_view(), name="organiser.teams.resend", ), ] ), ), url("^event/new/$", event.EventWizard.as_view(), name="event.create"), url("^event/typeahead/$", event.event_list, name="event.typeahead"), url("^event/$", dashboard.DashboardEventListView.as_view(), name="event.list"), url( f"^event/(?P<event>[{SLUG_CHARS}]+)/", include( [ url( "^$", dashboard.EventDashboardView.as_view(), name="event.dashboard" ), url("^login/$", auth.LoginView.as_view(), name="event.login"), url("^reset/$", auth.ResetView.as_view(), name="event.auth.reset"), url( r"^reset/(?P<token>\w+)$", auth.RecoverView.as_view(), name="event.auth.recover", ), url("^delete$", event.EventDelete.as_view(), name="event.delete"), url("^live$", event.EventLive.as_view(), name="event.live"), url("^api/users$", person.UserList.as_view(), name="event.user_list"), url( "^cfp/$", RedirectView.as_view(pattern_name="orga:cfp.text.view"), name="cfp", ), url("^cfp/flow/$", cfp.CfPFlowEditor.as_view(), name="cfp.flow"), url( "^cfp/questions/$", cfp.CfPQuestionList.as_view(), name="cfp.questions.view", ), url( "^cfp/questions/new$", cfp.CfPQuestionDetail.as_view(), name="cfp.questions.create", ), url( "^cfp/questions/remind$", cfp.CfPQuestionRemind.as_view(), name="cfp.questions.remind", ), url( "^cfp/questions/(?P<pk>[0-9]+)/$", cfp.CfPQuestionDetail.as_view(), name="cfp.question.view", ), url( "^cfp/questions/(?P<pk>[0-9]+)/up$", cfp.question_move_up, name="cfp.questions.up", ), url( "^cfp/questions/(?P<pk>[0-9]+)/down$", cfp.question_move_down, name="cfp.questions.down", ), url( "^cfp/questions/(?P<pk>[0-9]+)/delete$", cfp.CfPQuestionDelete.as_view(), name="cfp.question.delete", ), url( "^cfp/questions/(?P<pk>[0-9]+)/edit$", cfp.CfPQuestionDetail.as_view(), name="cfp.question.edit", ), url( "^cfp/questions/(?P<pk>[0-9]+)/toggle$", cfp.CfPQuestionToggle.as_view(), name="cfp.question.toggle", ), url("^cfp/text$", cfp.CfPTextDetail.as_view(), name="cfp.text.view"), url( "^cfp/types/$", cfp.SubmissionTypeList.as_view(), name="cfp.types.view", ), url( "^cfp/types/new$", cfp.SubmissionTypeDetail.as_view(), name="cfp.types.create", ), url( "^cfp/types/(?P<pk>[0-9]+)/$", cfp.SubmissionTypeDetail.as_view(), name="cfp.type.view", ), url( "^cfp/types/(?P<pk>[0-9]+)/delete$", cfp.SubmissionTypeDelete.as_view(), name="cfp.type.delete", ), url( "^cfp/types/(?P<pk>[0-9]+)/default$", cfp.SubmissionTypeDefault.as_view(), name="cfp.type.default", ), url("^cfp/tracks/$", cfp.TrackList.as_view(), name="cfp.tracks.view"), url( "^cfp/tracks/new$", cfp.TrackDetail.as_view(), name="cfp.track.create", ), url( "^cfp/tracks/(?P<pk>[0-9]+)/$", cfp.TrackDetail.as_view(), name="cfp.track.view", ), url( "^cfp/tracks/(?P<pk>[0-9]+)/delete$", cfp.TrackDelete.as_view(), name="cfp.track.delete", ), url( "^cfp/access-codes/$", cfp.AccessCodeList.as_view(), name="cfp.access_code.view", ), url( "^cfp/access-codes/new$", cfp.AccessCodeDetail.as_view(), name="cfp.access_code.create", ), url( "^cfp/access-codes/(?P<code>[A-z0-9]+)/$", cfp.AccessCodeDetail.as_view(), name="cfp.access_code.view", ), url( "^cfp/access-codes/(?P<code>[A-z0-9]+)/send$", cfp.AccessCodeSend.as_view(), name="cfp.access_code.send", ), url( "^cfp/access-codes/(?P<code>[A-z0-9]+)/delete$", cfp.AccessCodeDelete.as_view(), name="cfp.access_code.delete", ), url( "^mails/", include( [ url( "^(?P<pk>[0-9]+)/$", mails.MailDetail.as_view(), name="mails.outbox.mail.view", ), url( "^(?P<pk>[0-9]+)/copy$", mails.MailCopy.as_view(), name="mails.outbox.mail.copy", ), url( "^(?P<pk>[0-9]+)/delete$", mails.OutboxPurge.as_view(), name="mails.outbox.mail.delete", ), url( "^(?P<pk>[0-9]+)/send$", mails.OutboxSend.as_view(), name="mails.outbox.mail.send", ), url( "^templates/$", mails.TemplateList.as_view(), name="mails.templates.list", ), url( "^templates/new$", mails.TemplateDetail.as_view(), name="mails.templates.create", ), url( "^templates/(?P<pk>[0-9]+)/$", mails.TemplateDetail.as_view(), name="mails.templates.view", ), url( "^templates/(?P<pk>[0-9]+)/delete$", mails.TemplateDelete.as_view(), name="mails.templates.delete", ), url( "^compose$", mails.ComposeMail.as_view(), name="mails.compose", ), url("^sent$", mails.SentMail.as_view(), name="mails.sent"), url( "^outbox/$", mails.OutboxList.as_view(), name="mails.outbox.list", ), url( "^outbox/send$", mails.OutboxSend.as_view(), name="mails.outbox.send", ), url( "^outbox/purge$", mails.OutboxPurge.as_view(), name="mails.outbox.purge", ), ] ), ), url( "^submissions/$", submission.SubmissionList.as_view(), name="submissions.list", ), url( "^submissions/new$", submission.SubmissionContent.as_view(), name="submissions.create", ), url( "^submissions/cards/$", cards.SubmissionCards.as_view(), name="submissions.cards", ), url( "^submissions/feed/$", submission.SubmissionFeed(), name="submissions.feed", ), url( "^submissions/statistics/$", submission.SubmissionStats.as_view(), name="submissions.statistics", ), url( "^submissions/feedback/$", submission.AllFeedbacksList.as_view(), name="submissions.feedback", ), url( r"^submissions/(?P<code>[\w-]+)/", include( [ url( "^$", submission.SubmissionContent.as_view(), name="submissions.content.view", ), url( "^submit$", submission.SubmissionStateChange.as_view(), name="submissions.submit", ), url( "^accept$", submission.SubmissionStateChange.as_view(), name="submissions.accept", ), url( "^reject$", submission.SubmissionStateChange.as_view(), name="submissions.reject", ), url( "^confirm", submission.SubmissionStateChange.as_view(), name="submissions.confirm", ), url( "^withdraw$", submission.SubmissionStateChange.as_view(), name="submissions.withdraw", ), url( "^delete", submission.SubmissionStateChange.as_view(), name="submissions.delete", ), url( "^cancel", submission.SubmissionStateChange.as_view(), name="submissions.cancel", ), url( "^speakers/$", submission.SubmissionSpeakers.as_view(), name="submissions.speakers.view", ), url( "^speakers/add$", submission.SubmissionSpeakersAdd.as_view(), name="submissions.speakers.add", ), url( "^speakers/delete$", submission.SubmissionSpeakersDelete.as_view(), name="submissions.speakers.delete", ), url( "^reviews/$", review.ReviewSubmission.as_view(), name="submissions.reviews", ), url( "^reviews/delete$", review.ReviewSubmissionDelete.as_view(), name="submissions.reviews.submission.delete", ), url( "^feedback/$", submission.FeedbackList.as_view(), name="submissions.feedback.list", ), url( "^toggle_featured$", submission.ToggleFeatured.as_view(), name="submissions.toggle_featured", ), url( "^anonymise/$", submission.Anonymise.as_view(), name="submissions.anonymise", ), ] ), ), url("^speakers/$", speaker.SpeakerList.as_view(), name="speakers.list"), url( "^speakers/(?P<pk>[0-9]+)/$", speaker.SpeakerDetail.as_view(), name="speakers.view", ), url( "^speakers/(?P<pk>[0-9]+)/reset$", speaker.SpeakerPasswordReset.as_view(), name="speakers.reset", ), url( "^speakers/(?P<pk>[0-9]+)/toggle-arrived$", speaker.SpeakerToggleArrived.as_view(), name="speakers.arrived", ), url( "^info/$", speaker.InformationList.as_view(), name="speakers.information.list", ), url( "^info/new$", speaker.InformationDetail.as_view(), name="speakers.information.create", ), url( "^info/(?P<pk>[0-9]+)/$", speaker.InformationDetail.as_view(), name="speakers.information.view", ), url( "^info/(?P<pk>[0-9]+)/delete$", speaker.InformationDelete.as_view(), name="speakers.information.delete", ), url( "^reviews/$", review.ReviewDashboard.as_view(), name="reviews.dashboard", ), url( "^reviews/regenerate/$", review.RegenerateDecisionMails.as_view(), name="reviews.regenerate", ), url( "^settings/$", event.EventDetail.as_view(), name="settings.event.view", ), url( "^settings/mail$", event.EventMailSettings.as_view(), name="settings.mail.view", ), url( "^settings/plugins$", plugins.EventPluginsView.as_view(), name="settings.plugins.select", ), url( "^settings/widget$", event.WidgetSettings.as_view(), name="settings.widget", ), url( "^settings/review/$", event.EventReviewSettings.as_view(), name="settings.review", ), url( "^settings/review/phase/(?P<pk>[0-9]+)/up$", event.phase_move_up, name="settings.review.phase.up", ), url( "^settings/review/phase/(?P<pk>[0-9]+)/down$", event.phase_move_down, name="settings.review.phase.down", ), url( "^settings/review/phase/(?P<pk>[0-9]+)/delete$", event.PhaseDelete.as_view(), name="settings.review.phasedelete", ), url( "^settings/review/phase/(?P<pk>[0-9]+)/activate$", event.PhaseActivate.as_view(), name="settings.review.phasedelete", ), url( "^schedule/$", schedule.ScheduleView.as_view(), name="schedule.main" ), url( "^schedule/export/$", schedule.ScheduleExportView.as_view(), name="schedule.export", ), url( "^schedule/export/trigger$", schedule.ScheduleExportTriggerView.as_view(), name="schedule.export.trigger", ), url( "^schedule/export/download$", schedule.ScheduleExportDownloadView.as_view(), name="schedule.export.download", ), url( "^schedule/release$", schedule.ScheduleReleaseView.as_view(), name="schedule.release", ), url( r"^schedule/quick/(?P<code>\w+)/$", schedule.QuickScheduleView.as_view(), name="schedule.quick", ), url( "^schedule/reset$", schedule.ScheduleResetView.as_view(), name="schedule.reset", ), url( "^schedule/toggle$", schedule.ScheduleToggleView.as_view(), name="schedule.toggle", ), url( "^schedule/resend_mails$", schedule.ScheduleResendMailsView.as_view(), name="schedule.resend_mails", ), url( "^schedule/rooms/$", schedule.RoomList.as_view(), name="schedule.rooms.list", ), url( "^schedule/rooms/new$", schedule.RoomDetail.as_view(), name="schedule.rooms.create", ), url( "^schedule/rooms/(?P<pk>[0-9]+)/$", schedule.RoomDetail.as_view(), name="schedule.rooms.view", ), url( "^schedule/rooms/(?P<pk>[0-9]+)/delete$", schedule.RoomDelete.as_view(), name="schedule.rooms.delete", ), url( "^schedule/rooms/(?P<pk>[0-9]+)/up$", schedule.room_move_up, name="schedule.rooms.up", ), url( "^schedule/rooms/(?P<pk>[0-9]+)/down$", schedule.room_move_down, name="schedule.rooms.down", ), url( "^schedule/api/talks/$", schedule.TalkList.as_view(), name="schedule.api.talks", ), url( "^schedule/api/talks/(?P<pk>[0-9]+)/$", schedule.TalkUpdate.as_view(), name="schedule.api.update", ), url( "^schedule/api/availabilities/(?P<talkid>[0-9]+)/(?P<roomid>[0-9]+)/$", schedule.RoomTalkAvailabilities.as_view(), name="schedule.api.availabilities", ), ] ), ), ]
40.54201
91
0.358771
0
0
0
0
0
0
0
0
5,680
0.23081
37d57b222d4daa1969049535271df3dff47b0edb
1,925
py
Python
ws2122-lspm/Lib/site-packages/pm4py/statistics/overlap/utils/compute.py
Malekhy/ws2122-lspm
e4dc8b801d12f862b8ef536a0f125f346f085a00
[ "MIT" ]
1
2022-01-19T04:02:46.000Z
2022-01-19T04:02:46.000Z
ws2122-lspm/Lib/site-packages/pm4py/statistics/overlap/utils/compute.py
Malekhy/ws2122-lspm
e4dc8b801d12f862b8ef536a0f125f346f085a00
[ "MIT" ]
1
2021-11-19T07:21:48.000Z
2021-11-19T07:21:48.000Z
ws2122-lspm/Lib/site-packages/pm4py/statistics/overlap/utils/compute.py
Malekhy/ws2122-lspm
e4dc8b801d12f862b8ef536a0f125f346f085a00
[ "MIT" ]
1
2022-01-14T17:15:38.000Z
2022-01-14T17:15:38.000Z
''' This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de). PM4Py is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PM4Py is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with PM4Py. If not, see <https://www.gnu.org/licenses/>. ''' from enum import Enum from typing import Optional, Dict, Any, Tuple, List, Union from intervaltree import Interval, IntervalTree from pm4py.util import exec_utils class Parameters(Enum): EPSILON = "epsilon" def apply(points: List[Tuple[float, float]], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> List[int]: """ Computes the overlap statistic given a list of points, expressed as (min_timestamp, max_timestamp) Parameters ----------------- points List of points with the aforementioned features parameters Parameters of the method, including: - Parameters.EPSILON Returns ----------------- overlap List associating to each point the number of intersecting points """ if parameters is None: parameters = {} epsilon = exec_utils.get_param_value(Parameters.EPSILON, parameters, 10 ** (-5)) points = [(x[0] - epsilon, x[1] + epsilon) for x in points] sorted_points = sorted(points) tree = IntervalTree() for p in sorted_points: tree.add(Interval(p[0], p[1])) overlap = [] for p in points: overlap.append(len(tree[p[0]:p[1]])) return overlap
31.048387
122
0.676883
47
0.024416
0
0
0
0
0
0
1,154
0.599481
37d597714762fd1b5295ccfa14750529f2501042
1,775
py
Python
webapp/apps/Base Quiz/baseui_gen.py
sk-Prime/webapp
c21d7d49de4e4442f9af29ba9f08f37b5abbd20d
[ "MIT" ]
4
2021-12-11T16:01:10.000Z
2021-12-22T19:47:51.000Z
webapp/apps/Base Quiz/baseui_gen.py
sk-Prime/webapp
c21d7d49de4e4442f9af29ba9f08f37b5abbd20d
[ "MIT" ]
null
null
null
webapp/apps/Base Quiz/baseui_gen.py
sk-Prime/webapp
c21d7d49de4e4442f9af29ba9f08f37b5abbd20d
[ "MIT" ]
null
null
null
from htmlman import HTMLMan from styleman import Template page=HTMLMan() page.make_responsive() page.add_title("Base Quiz") style=Template('antartica') page.add_body_class(style['page']) page.add_js("baseui.js") page.create_section('main',append=True) page['main'].add_style_class(style['main']) title=page.create_section('title') title.add_style_class(style['title']) title.add_content("Base Quiz") widget=page.create_section("widget") widget.add_style_class(style['widget']) label = page.create_section('label',ID='label') #label.add_style_class(style['center']) label.add_style(name='label',mode="class") label.style_to_cssman(style) label.style( "font-size","20pt", "font-family","monospace", "height","50px", "border-bottom","1px solid #ccd", ) label.add_content("0x0") answer_l=page.create_section("answer_l1",ID="label_t") answer_l.add_style_class(style["label"]) answer_l2=page.create_section("answer_l2",ID="label_b") answer_l2.add_style_class(style["label"]) controls = page.create_section("control") controls.add_style(name="control",mode="class",cssman_obj=style) controls.style( "display","grid", "grid-template-columns","1fr 1fr", "gap","10px", "padding","10px" ) rand_b=page.create_section('random',tag="button",inner_html="Random") rand_b.config_attr("type","button","onclick","randomize()") answer_b=page.create_section('answer_b',tag="button",inner_html="Answer") answer_b.config_attr("type","button","onclick","answer()") controls.add_content(rand_b) controls.add_content(answer_b) widget.add_content(label) widget.add_content(answer_l) widget.add_content(answer_l2) widget.add_content(controls) page['main'].add_content(title) page['main'].add_content(widget) page.render(style,html_path="baseui.html")
26.102941
73
0.750423
0
0
0
0
0
0
0
0
552
0.310986
37d5b6f804f5b3c1c18198672cc73bf3cc33a2a6
514
py
Python
cluster_config/cluster.py
srcc-msu/job_statistics
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
[ "MIT" ]
null
null
null
cluster_config/cluster.py
srcc-msu/job_statistics
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
[ "MIT" ]
null
null
null
cluster_config/cluster.py
srcc-msu/job_statistics
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
[ "MIT" ]
null
null
null
name = "cluster" num_cores = 1000 GENERAL_PARTITIONS = ["regular"] GPU_PARTITIONS = ["gpu"] PARTITIONS = GENERAL_PARTITIONS + GPU_PARTITIONS ACTIVE_JOB_STATES = ["RUNNING", "COMPLETING"] FINISHED_JOB_STATES = ["COMPLETED", "NODE_FAIL", "TIMEOUT", "FAILED", "CANCELLED"] JOB_STATES = ACTIVE_JOB_STATES + FINISHED_JOB_STATES def node2int(node): """custom function to convert nodename to int this one removes all chars from names like node1-001-01""" return int(''.join(filter(lambda x: x.isdigit(), node)))
27.052632
82
0.741245
0
0
0
0
0
0
0
0
201
0.391051
37d62e06868fc1146c429cff23d726ebbfa8afd8
7,146
py
Python
room_assistance/indico_room_assistance/plugin.py
OmeGak/indico-plugins-cern
6e32bc158877080085ceffd021ab1d2247192f75
[ "MIT" ]
4
2019-02-12T05:08:56.000Z
2022-03-09T23:43:18.000Z
room_assistance/indico_room_assistance/plugin.py
OmeGak/indico-plugins-cern
6e32bc158877080085ceffd021ab1d2247192f75
[ "MIT" ]
40
2017-11-08T15:08:50.000Z
2022-03-28T15:09:51.000Z
room_assistance/indico_room_assistance/plugin.py
OmeGak/indico-plugins-cern
6e32bc158877080085ceffd021ab1d2247192f75
[ "MIT" ]
15
2017-11-08T12:35:59.000Z
2022-01-13T15:16:42.000Z
# This file is part of the CERN Indico plugins. # Copyright (C) 2014 - 2021 CERN # # The CERN Indico plugins are free software; you can redistribute # them and/or modify them under the terms of the MIT License; see # the LICENSE file for more details. import dateutil.parser import pytz from flask import flash, request, session from flask_pluginengine import render_plugin_template, url_for_plugin from indico.core import signals from indico.core.config import config from indico.core.plugins import IndicoPlugin from indico.core.settings.converters import ModelListConverter from indico.modules.events.requests.models.requests import Request, RequestState from indico.modules.events.requests.views import WPRequestsEventManagement from indico.modules.rb.models.rooms import Room from indico.modules.users import User from indico.util.string import natural_sort_key from indico.web.forms.base import IndicoForm from indico.web.forms.fields import EmailListField, IndicoQuerySelectMultipleField, PrincipalListField from indico.web.menu import TopMenuItem from indico_room_assistance import _ from indico_room_assistance.blueprint import blueprint from indico_room_assistance.definition import RoomAssistanceRequest from indico_room_assistance.util import (can_request_assistance_for_event, event_has_room_with_support_attached, is_room_assistance_support) def _order_func(object_list): return sorted(object_list, key=lambda r: natural_sort_key(r[1].full_name)) class RoomAssistanceForm(IndicoForm): _fieldsets = [ ('Startup assistance emails', ['room_assistance_recipients', 'rooms_with_assistance', 'room_assistance_support']), ] room_assistance_recipients = EmailListField(_('Recipients'), description=_('Notifications about room assistance requests are sent ' 'to these email addresses (one per line)')) rooms_with_assistance = IndicoQuerySelectMultipleField('Rooms', query_factory=lambda: Room.query, description=_('Rooms for which users can request startup ' 'assistance'), get_label='full_name', collection_class=set, render_kw={'size': 20}, modify_object_list=_order_func) room_assistance_support = PrincipalListField(_('Room assistance support'), allow_groups=True, description=_('List of users who can view the list of events with ' 'room startup assistance.')) class RoomAssistancePlugin(IndicoPlugin): """Room assistance request This plugin lets users request assistance for meeting rooms. """ configurable = True settings_form = RoomAssistanceForm settings_converters = { 'rooms_with_assistance': ModelListConverter(Room) } acl_settings = {'room_assistance_support'} default_settings = { 'room_assistance_recipients': [], 'rooms_with_assistance': [], } def init(self): super().init() self.inject_bundle('main.css', WPRequestsEventManagement, subclasses=False, condition=lambda: request.view_args.get('type') == RoomAssistanceRequest.name) self.template_hook('event-actions', self._room_assistance_action) self.connect(signals.menu.items, self._extend_services_menu, sender='top-menu') self.connect(signals.plugin.get_event_request_definitions, self._get_room_assistance_request) self.connect(signals.event.updated, self._on_event_update) def get_blueprints(self): return blueprint def _room_assistance_action(self, event, **kwargs): return render_plugin_template('room_assistance_action.html', event=event, can_request_assistance=can_request_assistance_for_event(event)) def _extend_services_menu(self, reservation, **kwargs): if not session.user or not is_room_assistance_support(session.user): return return TopMenuItem('services-cern-room-assistance', _('Room assistance'), url_for_plugin('room_assistance.request_list'), section='services') def _get_room_assistance_request(self, sender, **kwargs): return RoomAssistanceRequest def _on_event_update(self, event, **kwargs): changes = kwargs['changes'] if not changes.keys() & {'location_data', 'start_dt', 'end_dt'}: return request = Request.find_latest_for_event(event, RoomAssistanceRequest.name) if not request or request.state != RequestState.accepted: return if 'location_data' in changes and not event_has_room_with_support_attached(event): request.definition.reject(request, {'comment': render_plugin_template('auto_reject_no_supported_room.txt')}, User.get_system_user()) request.data = dict(request.data, occurrences=[]) flash(_("The new event location is not in the list of the rooms supported by the room assistance team. " "Room assistance request has been rejected and support will not be provided."), 'warning') if changes.keys() & {'start_dt', 'end_dt'}: tz = pytz.timezone(config.DEFAULT_TIMEZONE) occurrences = {dateutil.parser.parse(occ).astimezone(tz) for occ in request.data['occurrences']} req_dates = {occ.date() for occ in occurrences} event_dates = set(event.iter_days()) old_dates = req_dates - event_dates has_overlapping_dates = req_dates & event_dates if not has_overlapping_dates: request.definition.reject(request, {'comment': render_plugin_template('auto_reject_no_overlapping_dates.txt')}, User.get_system_user()) request.data = dict(request.data, occurrences=[]) flash(_("The new event dates don't overlap with the existing room assistance request for this event. " "Room assistance request has been rejected and support will not be provided."), 'warning') elif old_dates and has_overlapping_dates: new_data = dict(request.data) new_data['occurrences'] = [occ.astimezone(pytz.utc).isoformat() for occ in occurrences if occ.date() in req_dates & event_dates] request.data = new_data flash(_("Room assistance had been requested for days that are not between the updated start/end " "dates. Support will not be provided on these days anymore."), 'warning')
52.160584
120
0.645116
5,629
0.787713
0
0
0
0
0
0
1,711
0.239435
37d69c9affc9004808d91089e961fe9861840f56
6,808
py
Python
datamart/materializers/wikidata_spo_materializer.py
liangmuxin/datamart
495a21588db39c9ad239409208bec701dca07f30
[ "MIT" ]
7
2018-10-02T01:32:23.000Z
2020-10-08T00:42:35.000Z
datamart/materializers/wikidata_spo_materializer.py
liangmuxin/datamart
495a21588db39c9ad239409208bec701dca07f30
[ "MIT" ]
47
2018-10-02T05:41:13.000Z
2021-02-02T21:50:31.000Z
datamart/materializers/wikidata_spo_materializer.py
liangmuxin/datamart
495a21588db39c9ad239409208bec701dca07f30
[ "MIT" ]
19
2018-10-01T22:27:20.000Z
2019-02-28T18:59:53.000Z
from datamart.materializers.materializer_base import MaterializerBase import os import urllib.request import sys import csv import copy import json from typing import List from pprint import pprint import re import typing from pandas import DataFrame import traceback class WikidataSPOMaterializer(MaterializerBase): property = "" def __init__(self, **kwargs): """ initialization and loading the city name to city id map """ MaterializerBase.__init__(self, **kwargs) def get(self, metadata: dict = None, constrains: dict = None ) -> typing.Optional[DataFrame]: materialization_arguments = metadata["materialization"].get("arguments", {}) self.property = materialization_arguments.get("property", "") materialization_arguments = metadata["materialization"].get("arguments", {}) self.property = materialization_arguments.get("property", "") prefix = 'http://sitaware.isi.edu:8080/bigdata/namespace/wdq/sparql?query=' format = '&format=json' result = dict() property_label = "" main_query_encoded = self._encode_url(self._formulate_main_query(self.property)) try: # print(prefix + main_query_encoded + format) main_query_req = urllib.request.Request(prefix + main_query_encoded + format) result, property_label = self._process_main_query(self._get_query_result(main_query_req)) except Exception as err: print(err) traceback.print_tb(err.__traceback__) count = 0 while(True): try: main_query_encoded = self._encode_url(self._next(self._formulate_main_query(self.property), offset=count)) main_query_req = urllib.request.Request(prefix + main_query_encoded + format) temp, property_label = self._process_main_query(self._get_query_result(main_query_req)) # property_label = re.sub(r"\s+", '_', property_label) count += 1 result.update(temp) except: # print("property ", property, "count ", count) break property_label = re.sub(r"\s+", '_', property_label) sep = ";" values = list(result.values()) columns = ["source", "subject_label", "category", "prop_value", "value_label"] # for val in values: # col_name = col_name.union(set(val.keys())) # columns = list(col_name) rows = list() for k, v in result.items(): v['value_label'] = list(filter(None, v['value_label'])) v['value_label'] = list() if not any(v['value_label']) else list(v['value_label']) for k1, v1 in v.items(): if k1 != "source": # print(k1, v1) v[k1] = sep.join(v1) rows.append(v) df = DataFrame(rows, columns=columns) # print(df) return df @staticmethod def _formulate_main_query(property): main_query = 'select distinct ?source ?source_l ?category ?prop_l ?prop_value ?know_as where{\ ?source wdt:' + property + ' ?prop_value.\ ?source rdfs:label ?source_l.\ ?source wdt:P31/rdfs:label ?category.\ filter (lang(?category)="en")\ filter (lang(?source_l)="en")\ wd:' + property + ' rdfs:label ?prop_l.\ filter (lang(?prop_l)="en")\ optional {?prop_value rdfs:label ?know_as.\ filter (lang(?know_as)="en")}\ }' return main_query @staticmethod def _formulate_id_category_query(property): id_category_query = \ 'select distinct ?identifier ?l where{\ ?source wdt:' + property + ' ?value.\ ?source ?id ?idValue.\ ?identifier ?ref ?id.\ optional {?value rdfs:label ?know_as.\ filter (lang(?know_as)="en")}\ ?identifier wikibase:directClaim ?id.\ ?identifier wikibase:propertyType wikibase:ExternalId.\ ?identifier rdfs:label ?l.\ ?identifier schema:description ?desc.\ filter (lang(?desc)="en")\ filter (lang(?l)="en")\ }\ ORDER BY ?identifier' return id_category_query @staticmethod def _next(query_sent, offset): query_sent = query_sent + " LIMIT 1000 " + "OFFSET " + str(1000 * offset) return query_sent @staticmethod def _encode_url(url): encoded_url = urllib.parse.quote(url) return encoded_url @staticmethod def _get_query_result(query_req) -> List[dict]: data = {} with urllib.request.urlopen(query_req) as r: data = json.loads(r.read().decode('utf-8')) result = data['results']['bindings'] return result @staticmethod def _process_id_category_query(data): ids = dict() for item in data: identifier = item['l']['value'] ids[identifier] = set() return ids @staticmethod def _process_main_query(data): result = {} property_label = "" for item in data: category = item['category']['value'].strip() property_label = item['prop_l']['value'].strip() source = item['source']['value'].strip() prop_value = item['prop_value']['value'].strip() know_as = item['know_as']['value'].strip() if 'know_as' in item.keys() else None subject_l = item['source_l']['value'].strip() # id = item['id']['value'].strip() # id_l = item['id_l']['value'].strip() # id_value = item['id_value']['value'].strip() if source not in result.keys(): result[source] = dict() result[source]['source'] = source result[source]['category'] = set() result[source]['prop_value'] = set() result[source]['subject_label'] = set() result[source]['value_label'] = set() # result[source].update(copy.deepcopy(ids)) result[source]['prop_value'].add(prop_value) result[source]['category'].add(category) result[source]['subject_label'].add(subject_l) result[source]['value_label'].add(know_as) # result[source][id_l].add(id_value) # pprint("ss", result) return result, property_label
38.03352
126
0.552732
6,536
0.960047
0
0
3,723
0.546857
0
0
2,431
0.35708
37d6ae677936f62a1cad64182feb228714d24c7d
1,402
py
Python
axelrod/load_data_.py
danilobellini/Axelrod
2c9212553e06095c24adcb82a5979279cbdf45fb
[ "MIT" ]
null
null
null
axelrod/load_data_.py
danilobellini/Axelrod
2c9212553e06095c24adcb82a5979279cbdf45fb
[ "MIT" ]
1
2019-01-22T09:59:52.000Z
2019-01-22T09:59:52.000Z
axelrod/load_data_.py
danilobellini/Axelrod
2c9212553e06095c24adcb82a5979279cbdf45fb
[ "MIT" ]
null
null
null
from typing import Dict, List, Tuple import pkg_resources def load_file(filename: str, directory: str) -> List[List[str]]: """Loads a data file stored in the Axelrod library's data subdirectory, likely for parameters for a strategy.""" path = "/".join((directory, filename)) data_bytes = pkg_resources.resource_string(__name__, path) data = data_bytes.decode("UTF-8", "replace") rows = [] for line in data.split("\n"): if line.startswith("#") or len(line) == 0: continue s = line.split(", ") rows.append(s) return rows def load_weights( filename: str = "ann_weights.csv", directory: str = "data" ) -> Dict[str, Tuple[int, int, List[float]]]: """Load Neural Network Weights.""" rows = load_file(filename, directory) d = dict() for row in rows: name = str(row[0]) num_features = int(row[1]) num_hidden = int(row[2]) weights = list(map(float, row[3:])) d[name] = (num_features, num_hidden, weights) return d def load_pso_tables(filename="pso_gambler.csv", directory="data"): """Load lookup tables.""" rows = load_file(filename, directory) d = dict() for row in rows: name, a, b, c, = str(row[0]), int(row[1]), int(row[2]), int(row[3]) values = list(map(float, row[4:])) d[(name, int(a), int(b), int(c))] = values return d
31.155556
75
0.601284
0
0
0
0
0
0
0
0
251
0.17903
37d85e09c27d6497523862946e45ed0db97f77b6
5,248
py
Python
prescryptchain/api/views.py
genobank-io/CryptoVault
7c2f6c4c55df7d9e172058aad334a26786ea839f
[ "Apache-2.0" ]
3
2018-05-03T18:40:48.000Z
2019-06-09T19:04:44.000Z
prescryptchain/api/views.py
genobank-io/CryptoVault
7c2f6c4c55df7d9e172058aad334a26786ea839f
[ "Apache-2.0" ]
6
2018-06-27T00:14:46.000Z
2018-10-29T20:51:45.000Z
prescryptchain/api/views.py
genobank-io/CryptoVault
7c2f6c4c55df7d9e172058aad334a26786ea839f
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals # REST from rest_framework.viewsets import ViewSetMixin from rest_framework import routers, serializers, viewsets from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication from rest_framework.permissions import IsAuthenticated, BasePermission from rest_framework.decorators import api_view, authentication_classes, permission_classes from rest_framework.views import APIView from rest_framework import mixins, generics from rest_framework.response import Response from rest_framework.authtoken.models import Token # our models from blockchain.models import Block, Prescription, Transaction, Address from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri from .exceptions import NonValidPubKey # Define router router = routers.DefaultRouter() class PrescriptionSerializer(serializers.ModelSerializer): """ Prescription serializer """ timestamp = serializers.DateTimeField(read_only=False) data = serializers.JSONField(binary=False, read_only=False, required=False) files = serializers.JSONField(binary=False, read_only=False, required=False) previous_hash = serializers.CharField(read_only=False, required=False, default="0") class Meta: model = Prescription fields = ( 'id', 'public_key', 'data', "files", 'timestamp', 'signature', 'previous_hash', 'raw_size', 'hash_id', 'is_valid', 'transaction', 'readable', ) read_only_fields = ('id', 'hash_id', 'is_valid',' transaction',) def validate(self, data): ''' Method to control Extra Keys on Payload!''' extra_keys = set(self.initial_data.keys()) - set(self.fields.keys()) if extra_keys: print(extra_keys) return data def create(self, validated_data): return Transaction.objects.create_tx(data=validated_data) class PrescriptionViewSet(viewsets.ModelViewSet): """ Prescription Viewset """ # Temporally without auth # authentication_classes = (TokenAuthentication, BasicAuthentication, ) # permission_classes = (IsAuthenticated, ) serializer_class = PrescriptionSerializer lookup_field = "hash_id" http_method_names = ['get', 'post', 'options'] def get_queryset(self): ''' Custom Get queryset ''' raw_public_key = self.request.query_params.get('public_key', None) if raw_public_key: try: pub_key = pubkey_string_to_rsa(raw_public_key) except: pub_key , raw_public_key = pubkey_base64_to_rsa(raw_public_key) hex_raw_pub_key = savify_key(pub_key) return Prescription.objects.filter(public_key=hex_raw_pub_key).order_by('-id') else: return Prescription.objects.all().order_by('-id') # add patient filter by email, after could modify with other router.register(r'rx-endpoint', PrescriptionViewSet, 'prescription-endpoint') class BlockSerializer(serializers.ModelSerializer): """ Prescription serializer """ class Meta: model = Block fields = ( 'id', 'hash_block', 'previous_hash', 'raw_size', 'data', 'timestamp', 'merkleroot', 'hashcash', 'nonce', ) read_only_fields = ('id', 'hash_block','timestamp','previous_hash', 'raw_size', 'data', 'merkleroot','hashcash','nonce',) class BlockViewSet(viewsets.ModelViewSet): """ Prescription Viewset """ serializer_class = BlockSerializer def get_queryset(self): return Block.objects.all().order_by('-timestamp') # add patient filter by email, after could modify with other router.register(r'block', BlockViewSet, 'block-endpoint') class AddressSerializer(serializers.ModelSerializer): """ Address serializer """ pub_key = serializers.CharField(read_only=True,allow_null=True, source="get_pub_key" ) class Meta: model = Address fields = ( 'public_key_b64', 'address', 'is_valid', 'pub_key', ) read_only_fields = ('address','pub_key', ) class AddressViewSet(viewsets.ModelViewSet): """ Prescription Viewset """ serializer_class = AddressSerializer lookup_field = "address" http_method_names = ['get', 'options'] def get_queryset(self): ''' Custom Get queryset ''' raw_public_key = self.request.query_params.get('public_key', None) if raw_public_key: try: pub_key_b64 = pubkey_base64_from_uri(raw_public_key) except Exception as e: raise NonValidPubKey else: _address = Address.objects.get_or_create_rsa_address(pub_key_b64) return Address.objects.filter(address=_address) else: return Address.objects.all() # add patient filter by email, after could modify with other router.register(r'address', AddressViewSet, 'address_endpoint')
33.858065
129
0.664444
3,933
0.749428
0
0
0
0
0
0
1,241
0.236471
37d92a06667232ad4a4f6ca14ad0257dd6a2e56a
2,484
py
Python
client/commands/incremental.py
stvreumi/pyre-check
94d13c8df37b53843ae92544b81042347b64315d
[ "MIT" ]
null
null
null
client/commands/incremental.py
stvreumi/pyre-check
94d13c8df37b53843ae92544b81042347b64315d
[ "MIT" ]
null
null
null
client/commands/incremental.py
stvreumi/pyre-check
94d13c8df37b53843ae92544b81042347b64315d
[ "MIT" ]
null
null
null
# Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import atexit import logging import os import subprocess import sys from typing import List from .command import ClientException, ExitCode, State from .reporting import Reporting from .start import Start LOG = logging.getLogger(__name__) class Incremental(Reporting): NAME = "incremental" def __init__(self, arguments, configuration, analysis_directory) -> None: super(Incremental, self).__init__(arguments, configuration, analysis_directory) def _run(self) -> None: if self._state() == State.DEAD: LOG.warning("Starting server at `%s`.", self._analysis_directory.get_root()) arguments = self._arguments arguments.terminal = False arguments.no_watchman = False Start(arguments, self._configuration, self._analysis_directory).run() if self._state() != State.DEAD: LOG.info("Waiting for server...") result = self._call_client(command=self.NAME) try: result.check() errors = self._get_errors(result) self._print(errors) except ClientException as exception: LOG.error("Error while waiting for server.") LOG.error("Run `%s restart` in order to restart the server.", sys.argv[0]) self._exit_code = ExitCode.FAILURE def _flags(self) -> List[str]: flags = super()._flags() flags.extend( [ "-typeshed", self._configuration.typeshed, "-expected-binary-version", self._configuration.version_hash, ] ) search_path = self._configuration.search_path if search_path: flags.extend(["-search-path", ",".join(search_path)]) return flags # pyre-ignore: T31696900 def _read_stderr(self, _stream, analysis_directory) -> None: stderr_file = os.path.join(analysis_directory, ".pyre/server/server.stdout") with subprocess.Popen( ["tail", "-f", stderr_file], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, ) as stderr_tail: atexit.register(stderr_tail.terminate) super(Incremental, self)._read_stderr( stderr_tail.stdout, analysis_directory )
31.846154
88
0.625201
2,068
0.832528
0
0
0
0
0
0
428
0.172303
37da81bd71be1d388df7554cdc71e1b8d0bef4e9
26,540
py
Python
main_random_policy.py
rish-raghu/Object-Goal-Navigation
d2c882f3a97396c691fc75b46bd94bb7077f7d0f
[ "MIT" ]
null
null
null
main_random_policy.py
rish-raghu/Object-Goal-Navigation
d2c882f3a97396c691fc75b46bd94bb7077f7d0f
[ "MIT" ]
null
null
null
main_random_policy.py
rish-raghu/Object-Goal-Navigation
d2c882f3a97396c691fc75b46bd94bb7077f7d0f
[ "MIT" ]
null
null
null
from collections import deque, defaultdict import os import sys import logging import time import json import gym import torch.nn as nn import torch import numpy as np import matplotlib.pyplot as plt from model import RL_Policy, Semantic_Mapping from utils.storage import GlobalRolloutStorage from envs import make_vec_envs from arguments import get_args import algo os.environ["OMP_NUM_THREADS"] = "1" def main(): args = get_args() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Setup Logging log_dir = "{}/models/{}/".format(args.dump_location, args.exp_name) dump_dir = "{}/dump/{}/".format(args.dump_location, args.exp_name) if not os.path.exists(log_dir): os.makedirs(log_dir) if not os.path.exists(dump_dir): os.makedirs(dump_dir) logging.basicConfig( filename=log_dir + 'train.log', level=logging.INFO) print("Dumping at {}".format(log_dir)) print(args) logging.info(args) # Logging and loss variables num_scenes = args.num_processes num_episodes = int(args.num_eval_episodes) device = args.device = torch.device("cuda:0" if args.cuda else "cpu") g_masks = torch.ones(num_scenes).float().to(device) best_g_reward = -np.inf # one episode per process for both train and eval # for eval, one scene per process if args.eval: episode_success = [] episode_spl = [] episode_dist = [] for _ in range(args.num_processes): episode_success.append(deque(maxlen=num_episodes)) episode_spl.append(deque(maxlen=num_episodes)) episode_dist.append(deque(maxlen=num_episodes)) # for train, different episodes of same scene per process else: episode_success = deque(maxlen=1000) episode_spl = deque(maxlen=1000) episode_dist = deque(maxlen=1000) finished = np.zeros((args.num_processes)) wait_env = np.zeros((args.num_processes)) g_episode_rewards = deque(maxlen=1000) g_value_losses = deque(maxlen=1000) g_action_losses = deque(maxlen=1000) g_dist_entropies = deque(maxlen=1000) per_step_g_rewards = deque(maxlen=1000) g_process_rewards = np.zeros((num_scenes)) # Starting environments torch.set_num_threads(1) envs = make_vec_envs(args) obs, infos = envs.reset() full_episode_data = [] episode_data = [None] * num_scenes for e, info in enumerate(infos): cInfo = info.copy() cInfo["episode_data"]["positions"] = [] cInfo["episode_data"]["gt_positions"] = [] cInfo["episode_data"]["goal_rewards"] = [] cInfo["episode_data"]["explore_rewards"] = [] cInfo["episode_data"]["policy_goals"] = [] cInfo["episode_data"]["used_policy"] = [] episode_data[e] = cInfo["episode_data"] torch.set_grad_enabled(False) # Initialize map variables: # Full map consists of multiple channels containing the following: # 1. Obstacle Map # 2. Exploread Area (places that are known to be free or occupied) # 3. Current Agent Location # 4. Past Agent Locations # 5,6,7,.. : Semantic Categories nc = args.num_sem_categories + 4 # num channels # Calculating full and local map sizes map_size = args.map_size_cm // args.map_resolution full_w, full_h = map_size, map_size local_w = int(full_w / args.global_downscaling) local_h = int(full_h / args.global_downscaling) # Initializing full and local map full_map = torch.zeros(num_scenes, nc, full_w, full_h).float().to(device) local_map = torch.zeros(num_scenes, nc, local_w, local_h).float().to(device) # Initial full and local pose full_pose = torch.zeros(num_scenes, 3).float().to(device) local_pose = torch.zeros(num_scenes, 3).float().to(device) # Origin of local map origins = np.zeros((num_scenes, 3)) # Local Map Boundaries lmb = np.zeros((num_scenes, 4)).astype(int) # Planner pose inputs has 7 dimensions # 1-3 store continuous global agent location # 4-7 store local map boundaries planner_pose_inputs = np.zeros((num_scenes, 7)) # get local boundary (x1, x2, y1, y2) given local agent position (x, y) and map size def get_local_map_boundaries(agent_loc, local_sizes, full_sizes): loc_r, loc_c = agent_loc local_w, local_h = local_sizes full_w, full_h = full_sizes if args.global_downscaling > 1: gx1, gy1 = loc_r - local_w // 2, loc_c - local_h // 2 gx2, gy2 = gx1 + local_w, gy1 + local_h if gx1 < 0: gx1, gx2 = 0, local_w if gx2 > full_w: gx1, gx2 = full_w - local_w, full_w if gy1 < 0: gy1, gy2 = 0, local_h if gy2 > full_h: gy1, gy2 = full_h - local_h, full_h else: gx1, gx2, gy1, gy2 = 0, full_w, 0, full_h return [gx1, gx2, gy1, gy2] # initialize global and local maps and poses given that initial position # is at map center with 0 orientation def init_map_and_pose(): full_map.fill_(0.) full_pose.fill_(0.) full_pose[:, :2] = args.map_size_cm / 100.0 / 2.0 locs = full_pose.cpu().numpy() planner_pose_inputs[:, :3] = locs for e in range(num_scenes): r, c = locs[e, 1], locs[e, 0] loc_r, loc_c = [int(r * 100.0 / args.map_resolution), int(c * 100.0 / args.map_resolution)] # 3x3 grid around agent location is considered explored full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0 lmb[e] = get_local_map_boundaries((loc_r, loc_c), (local_w, local_h), (full_w, full_h)) planner_pose_inputs[e, 3:] = lmb[e] origins[e] = [lmb[e][2] * args.map_resolution / 100.0, lmb[e][0] * args.map_resolution / 100.0, 0.] for e in range(num_scenes): local_map[e] = full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] local_pose[e] = full_pose[e] - \ torch.from_numpy(origins[e]).to(device).float() # identical to above, except for specific environment def init_map_and_pose_for_env(e): full_map[e].fill_(0.) full_pose[e].fill_(0.) full_pose[e, :2] = args.map_size_cm / 100.0 / 2.0 locs = full_pose[e].cpu().numpy() planner_pose_inputs[e, :3] = locs r, c = locs[1], locs[0] loc_r, loc_c = [int(r * 100.0 / args.map_resolution), int(c * 100.0 / args.map_resolution)] full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0 lmb[e] = get_local_map_boundaries((loc_r, loc_c), (local_w, local_h), (full_w, full_h)) planner_pose_inputs[e, 3:] = lmb[e] origins[e] = [lmb[e][2] * args.map_resolution / 100.0, lmb[e][0] * args.map_resolution / 100.0, 0.] local_map[e] = full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] local_pose[e] = full_pose[e] - \ torch.from_numpy(origins[e]).to(device).float() # reward is the newly explored area in a given step (in m^2) def update_intrinsic_rew(e): prev_explored_area = full_map[e, 1].sum(1).sum(0) full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \ local_map[e] curr_explored_area = full_map[e, 1].sum(1).sum(0) intrinsic_rews[e] = curr_explored_area - prev_explored_area intrinsic_rews[e] *= (args.map_resolution / 100.)**2 # to m^2 def get_random_goal(e): for _ in range(20): goal = np.random.rand(2) goal = [int(goal[0] * local_w), int(goal[1] * local_w)] goal = [min(goal[0], int(local_w-1)), min(goal[1], int(local_w-1))] if not local_map[e, 1, goal[0], goal[1]]: break return goal init_map_and_pose() # Global policy observation space ngc = 8 + args.num_sem_categories es = 2 g_observation_space = gym.spaces.Box(0, 1, # binary local map (ngc, local_w, local_h), dtype='uint8') # Semantic Mapping sem_map_module = Semantic_Mapping(args).to(device) sem_map_module.eval() intrinsic_rews = torch.zeros(num_scenes).to(device) # Predict semantic map from frame 1 poses = torch.from_numpy(np.asarray( [infos[env_idx]['sensor_pose'] for env_idx in range(num_scenes)]) ).float().to(device) # args (obs, pose_obs, maps_last, poses_last) _, local_map, _, local_pose = \ sem_map_module(obs, poses, local_map, local_pose) locs = local_pose.cpu().numpy() for e in range(num_scenes): r, c = locs[e, 1], locs[e, 0] loc_r, loc_c = [int(r * 100.0 / args.map_resolution), int(c * 100.0 / args.map_resolution)] local_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1. episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])]) episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"])) global_goals = [get_random_goal(e) for e in range(num_scenes)] goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)] for e in range(num_scenes): goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1 episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0)) episode_data[e]["used_policy"].append(True) planner_inputs = [{} for e in range(num_scenes)] for e, p_input in enumerate(planner_inputs): p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy() # obstacles p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy() # explored p_input['pose_pred'] = planner_pose_inputs[e] # global location+local map bounds p_input['goal'] = goal_maps[e] # global_goals[e] p_input['new_goal'] = 1 p_input['found_goal'] = 0 p_input['wait'] = wait_env[e] or finished[e] if args.visualize or args.print_images: local_map[e, -1, :, :] = 1e-5 # TODO: what is this? # single channel where each grid loc is cat ID p_input['sem_map_pred'] = local_map[e, 4:, :, : ].argmax(0).cpu().numpy() obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs) start = time.time() g_reward = 0 torch.set_grad_enabled(False) spl_per_category = defaultdict(list) success_per_category = defaultdict(list) for step in range(args.num_training_frames // args.num_processes + 1): if finished.sum() == args.num_processes: break g_step = (step // args.num_local_steps) % args.num_global_steps # global step num in PPO l_step = step % args.num_local_steps # local step num in global step # ------------------------------------------------------------------ # Reinitialize variables when episode ends l_masks = torch.FloatTensor([0 if x else 1 for x in done]).to(device) g_masks *= l_masks for e, x in enumerate(done): if x: spl = infos[e]['spl'] success = infos[e]['success'] dist = infos[e]['distance_to_goal'] spl_per_category[infos[e]['goal_name']].append(spl) success_per_category[infos[e]['goal_name']].append(success) if args.eval: episode_success[e].append(success) episode_spl[e].append(spl) episode_dist[e].append(dist) if len(episode_success[e]) == num_episodes: finished[e] = 1 episode_data[e]["success"] = success episode_data[e]["spl"] = spl episode_data[e]["distance_to_goal"] = dist full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = local_map[e] episode_data[e]["explored_area"] = full_map[e, 1].sum(1).sum(0).item() scene = episode_data[e]["scene_id"][16:-4] if args.save_maps: np.save('{}/maparr_{}_{}'.format(dump_dir, scene, episode_data[e]['episode_id']), full_map[e].cpu().numpy()) full_episode_data.append(episode_data[e]) cInfo = infos[e].copy() cInfo["episode_data"]["positions"] = [] cInfo["episode_data"]["gt_positions"] = [] cInfo["episode_data"]["goal_rewards"] = [] cInfo["episode_data"]["explore_rewards"] = [] cInfo["episode_data"]["policy_goals"] = [] cInfo["episode_data"]["used_policy"] = [] episode_data[e] = cInfo["episode_data"] else: episode_success.append(success) episode_spl.append(spl) episode_dist.append(dist) wait_env[e] = 1. update_intrinsic_rew(e) init_map_and_pose_for_env(e) # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Semantic Mapping Module poses = torch.from_numpy(np.asarray( [infos[env_idx]['sensor_pose'] for env_idx in range(num_scenes)]) ).float().to(device) _, local_map, _, local_pose = \ sem_map_module(obs, poses, local_map, local_pose) locs = local_pose.cpu().numpy() planner_pose_inputs[:, :3] = locs + origins local_map[:, 2, :, :].fill_(0.) # Resetting current location channel # update current location for e in range(num_scenes): r, c = locs[e, 1], locs[e, 0] loc_r, loc_c = [int(r * 100.0 / args.map_resolution), int(c * 100.0 / args.map_resolution)] local_map[e, 2:4, loc_r - 2:loc_r + 3, loc_c - 2:loc_c + 3] = 1. if args.eval and not wait_env[e]: episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])]) episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"])) # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Global Policy if l_step == args.num_local_steps - 1: # For every global step, update the full and local maps for e in range(num_scenes): if wait_env[e] == 1: # New episode wait_env[e] = 0. else: update_intrinsic_rew(e) # update global map and pose based on new position in old local frame full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \ local_map[e] full_pose[e] = local_pose[e] + \ torch.from_numpy(origins[e]).to(device).float() # center the local frame based on new position locs = full_pose[e].cpu().numpy() r, c = locs[1], locs[0] loc_r, loc_c = [int(r * 100.0 / args.map_resolution), int(c * 100.0 / args.map_resolution)] lmb[e] = get_local_map_boundaries((loc_r, loc_c), (local_w, local_h), (full_w, full_h)) # compute new local map and pose based on new local frame planner_pose_inputs[e, 3:] = lmb[e] origins[e] = [lmb[e][2] * args.map_resolution / 100.0, lmb[e][0] * args.map_resolution / 100.0, 0.] local_map[e] = full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] local_pose[e] = full_pose[e] - \ torch.from_numpy(origins[e]).to(device).float() locs = local_pose.cpu().numpy() # Get exploration reward and metrics g_reward = torch.from_numpy(np.asarray( [infos[env_idx]['g_reward'] for env_idx in range(num_scenes)]) ).float().to(device) g_reward += args.intrinsic_rew_coeff * intrinsic_rews.detach() for e in range(num_scenes): if args.eval and not wait_env[e]: episode_data[e]["goal_rewards"].append(infos[e]["g_reward"]) episode_data[e]["explore_rewards"].append(intrinsic_rews[e].item()) g_process_rewards += g_reward.cpu().numpy() g_total_rewards = g_process_rewards * \ (1 - g_masks.cpu().numpy()) g_process_rewards *= g_masks.cpu().numpy() per_step_g_rewards.append(np.mean(g_reward.cpu().numpy())) if np.sum(g_total_rewards) != 0: for total_rew in g_total_rewards: if total_rew != 0: g_episode_rewards.append(total_rew) global_goals = [get_random_goal(e) for e in range(num_scenes)] for e in range(num_scenes): if args.eval and not wait_env[e]: episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0)) g_reward = 0 g_masks = torch.ones(num_scenes).float().to(device) # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Update long-term goal if target object is found found_goal = [0 for _ in range(num_scenes)] goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)] # If goal category not found in map, goal is the location sampled by # policy for e in range(num_scenes): goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1 if args.eval and not wait_env[e]: episode_data[e]["used_policy"].append(True) # Else if goal category found in map, use all locations where prob of goal # obj existing is > 0 as the goal map for planner for e in range(num_scenes): cn = infos[e]['goal_cat_id'] + 4 if local_map[e, cn, :, :].sum() != 0.: cat_semantic_map = local_map[e, cn, :, :].cpu().numpy() cat_semantic_scores = cat_semantic_map cat_semantic_scores[cat_semantic_scores > 0] = 1. goal_maps[e] = cat_semantic_scores found_goal[e] = 1 if args.eval and not wait_env[e]: episode_data[e]["used_policy"][-1] = False # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Take action and get next observation planner_inputs = [{} for e in range(num_scenes)] for e, p_input in enumerate(planner_inputs): p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy() p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy() p_input['pose_pred'] = planner_pose_inputs[e] p_input['goal'] = goal_maps[e] # global_goals[e] p_input['new_goal'] = l_step == args.num_local_steps - 1 p_input['found_goal'] = found_goal[e] p_input['wait'] = wait_env[e] or finished[e] if args.visualize or args.print_images: local_map[e, -1, :, :] = 1e-5 p_input['sem_map_pred'] = local_map[e, 4:, :, :].argmax(0).cpu().numpy() obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs) # ------------------------------------------------------------------ # Logging if len(full_episode_data) % args.episode_save_interval == 0: with open('{}/{}_episode_data.json'.format( dump_dir, args.split), 'w') as f: json.dump(full_episode_data, f) if step % args.log_interval == 0: end = time.time() time_elapsed = time.gmtime(end - start) log = " ".join([ "Time: {0:0=2d}d".format(time_elapsed.tm_mday - 1), "{},".format(time.strftime("%Hh %Mm %Ss", time_elapsed)), "num timesteps {},".format(step * num_scenes), "FPS {},".format(int(step * num_scenes / (end - start))) ]) log += "\n\tRewards:" if len(g_episode_rewards) > 0: log += " ".join([ " Global step mean/med rew:", "{:.4f}/{:.4f},".format( np.mean(per_step_g_rewards), np.median(per_step_g_rewards)), " Global eps mean/med/min/max eps rew:", "{:.3f}/{:.3f}/{:.3f}/{:.3f},".format( np.mean(g_episode_rewards), np.median(g_episode_rewards), np.min(g_episode_rewards), np.max(g_episode_rewards)) ]) if args.eval: total_success = [] total_spl = [] total_dist = [] for e in range(args.num_processes): for acc in episode_success[e]: total_success.append(acc) for dist in episode_dist[e]: total_dist.append(dist) for spl in episode_spl[e]: total_spl.append(spl) if len(total_spl) > 0: log += " ObjectNav succ/spl/dtg:" log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format( np.mean(total_success), np.mean(total_spl), np.mean(total_dist), len(total_spl)) else: if len(episode_success) > 100: log += " ObjectNav succ/spl/dtg:" log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format( np.mean(episode_success), np.mean(episode_spl), np.mean(episode_dist), len(episode_spl)) log += "\n\tLosses:" if len(g_value_losses) > 0 and not args.eval: log += " ".join([ " Policy Loss value/action/dist:", "{:.3f}/{:.3f}/{:.3f},".format( np.mean(g_value_losses), np.mean(g_action_losses), np.mean(g_dist_entropies)) ]) print(log) logging.info(log) # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Save best models if (step * num_scenes) % args.save_interval < \ num_scenes: if len(g_episode_rewards) >= 1000 and \ (np.mean(g_episode_rewards) >= best_g_reward) \ and not args.eval: torch.save(g_policy.state_dict(), os.path.join(log_dir, "model_best.pth")) best_g_reward = np.mean(g_episode_rewards) # Save periodic models if (step * num_scenes) % args.save_periodic < \ num_scenes: total_steps = step * num_scenes if not args.eval: torch.save(g_policy.state_dict(), os.path.join(dump_dir, "periodic_{}.pth".format(total_steps))) # ------------------------------------------------------------------ # Print and save model performance numbers during evaluation if args.eval: print("Dumping eval details...") total_success = [] total_spl = [] total_dist = [] for e in range(args.num_processes): for acc in episode_success[e]: total_success.append(acc) for dist in episode_dist[e]: total_dist.append(dist) for spl in episode_spl[e]: total_spl.append(spl) if len(total_spl) > 0: log = "Final ObjectNav succ/spl/dtg:" log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format( np.mean(total_success), np.mean(total_spl), np.mean(total_dist), len(total_spl)) print(log) logging.info(log) # Save the spl per category log = "Success | SPL per category\n" for key in success_per_category: log += "{}: {} | {}\n".format(key, sum(success_per_category[key]) / len(success_per_category[key]), sum(spl_per_category[key]) / len(spl_per_category[key])) print(log) logging.info(log) with open('{}/{}_spl_per_cat_pred_thr.json'.format( dump_dir, args.split), 'w') as f: json.dump(spl_per_category, f) with open('{}/{}_success_per_cat_pred_thr.json'.format( dump_dir, args.split), 'w') as f: json.dump(success_per_category, f) with open('{}/{}_episode_data.json'.format( dump_dir, args.split), 'w') as f: json.dump(full_episode_data, f) if __name__ == "__main__": main()
41.020093
132
0.511492
0
0
0
0
0
0
0
0
4,860
0.18312
37db93135f06b7cc7a06b9ea9f0839b0af335d54
6,889
py
Python
src/ITN/srmg/core/RiemannianRight.py
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
[ "MIT" ]
1
2022-03-24T06:54:36.000Z
2022-03-24T06:54:36.000Z
src/ITN/srmg/core/RiemannianRight.py
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
[ "MIT" ]
null
null
null
src/ITN/srmg/core/RiemannianRight.py
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding=utf-8 ''' Author: Shuangchi He / Yulv Email: yulvchi@qq.com Date: 2022-03-19 10:33:38 Motto: Entities should not be multiplied unnecessarily. LastEditors: Shuangchi He LastEditTime: 2022-03-23 00:52:55 FilePath: /Awesome-Ultrasound-Standard-Plane-Detection/src/ITN/srmg/core/RiemannianRight.py Description: Modify here please Init from https://github.com/yuanwei1989/plane-detection Author: Yuanwei Li (3 Oct 2018) # Copyright (c) 2006-2017, Nina Milone, Bishesh Kanal, Benjamin Hou # Copyright (c) 2006-2017, Imperial College of Science, Technology and Medicine # Produced at Biomedical Image Analysis Group # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holders nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. Statistics on Riemannian Manifolds and Groups --------------------------------------------- This is a set of codes to compare the computing of the different types of means on Lie groups. These codes can be used to reproduce the experiments illustrated in the video developed for the MICCAI Educational challenge 2014, available at: url of the video. :Authors: `Nina Miolane <website>` `Bishesh Khanal <website>` :Organization: Asclepios Team, INRIA Sophia Antipolis. :Version: 2017.07.05 Requirements ------------ * `Numpy 1.11 <http://www.numpy.org>`_ Notes ----- ---------- (1) Defining a mean on Lie group. Nina Miolane. Medical Imaging. 2013. <hal-00938320> ''' import numpy import math from srmg.common.group import * from srmg.common.util import * EPS = 1e-5 def riemExpR(a,f0,v): """ start: TODO What the function does clearer function name ? Inputs description: Outputs description: end: TODO Riemannian exponential and logarithm from any point f0 (for left- and right-invariant metric) """ f = grpCompose((riemExpIdR(a, numpy.linalg.lstsq(jR(f0),v)[0])), f0) return f def riemExpIdR(a,v): """ start: TODO What the function does clearer function name ? Inputs description: Outputs description: end: TODO Riemannian exponential and logarithm from Id (for left- and right-invariant metric) """ v=grpReg(-v); f = numpy.zeros(6) f[0:3] = v[0:3] f[3:6] = a * v[3:6] f = grpInv(f) return f def sigma2R(a,m,tabf,tabw): """ start: TODO What the function does clearer function name ? Inputs description: Outputs description: end: TODO """ siz = tabf.shape[0] if siz < 2: print('Error: Calculating variance requires at least 2 points') return 0 s = 0 for i in range(0,siz): s = s + tabw[i] * normA2R(a,m,riemLogR(a,m,tabf[i,:])); return s def riemLogR(a,f0,f): """ DESCRIPTION Attributes: a: ????? f0: ???? f: ???? Return: v: ????? """ v=numpy.dot(jR(f0),riemLogIdR(a,grpCompose(f,grpInv(f0)))) return v def riemLogIdR(a,f): """ DESCRIPTION Attributes: a: ????? f: ???? Return: v: ????? """ v = numpy.zeros(6) v[0:3] = f[0:3] v[3:6] = numpy.dot(rotMat(-f[0:3]),f[3:6]); return v def qR(a,f): """ Left- and right- invariant inner product in the principal chart (propagation of Frobenius inner product) Attributes: a: ????? f: ???? Return: g: ????? """ f = grpReg(f) g0 = numpy.zeros([6,6]) g0[0:3,0:3] = numpy.eye(3) g0[3:6,3:6] = a * numpy.eye(3) g = numpy.dot(numpy.dot(numpy.linalg.inv(jR(f).T) , g0) , numpy.linalg.inv(jR(f))) return g def jR(f): """ Differentials of the left and right translations for SO(3) in the principal chart Attributes: r: ????? Return: Jl: ????? """ #f = makeColVector(f,6); # unnecessary if 1D f = grpReg(f); Jr = numpy.zeros([6,6]) Jr[0:3,0:3] = jRotR(f[0:3]); Jr[3:6,0:3] = -skew(f[3:6]); Jr[3:6,3:6] = numpy.eye(3); return Jr def normA2R(a,f,v): """ This function calculates the normalised left Attributes: a: ????? f: ????? v: ????? Return: n: normalised vector """ v=grpReg(v); n=numpy.dot(numpy.dot(v.T,qR(a,f)),v); return n def frechetR(a,tabf,tabw): """ This function computes the frechet-L mean Attributes: img: The fixed image that will be transformed (simpleitk type) a: ????? tabf: SE3 data points (Nx6 vector) tabw: data point weights (Nx1 vector) Return: m: The mean """ siz = tabf.shape[0] if siz < 2: print('Error: Calculating mean requires at least 2 points') m = tabf[0,:] # Iteration 0 mbis=m; print('mbisR=' + str(mbis)) aux=numpy.zeros(6); for i in range (0,siz): aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]); m=riemExpR(a,mbis,aux); # Iteration 1 until converges while (normA2R(a,mbis,riemLogR(a,mbis,m))>EPS*sigma2R(a,mbis,tabf,tabw)): mbis=m; print('mbisR=' + str(mbis)) aux=numpy.zeros(6); for i in range (0,siz): aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]); m=riemExpR(a,mbis,aux); return m
27.556
108
0.609958
0
0
0
0
0
0
0
0
4,961
0.720134
37dc25007d47db4fa96ca0730b82167ce6738233
4,658
py
Python
v0449gRpc_pb2.py
StormDev87/VPH_bot_python
ae83a0b61e234912c0136ef0f176e7a88603ff28
[ "MIT" ]
1
2022-02-28T16:20:33.000Z
2022-02-28T16:20:33.000Z
v0449gRpc_pb2.py
StormDev87/VPH_bot_python
ae83a0b61e234912c0136ef0f176e7a88603ff28
[ "MIT" ]
null
null
null
v0449gRpc_pb2.py
StormDev87/VPH_bot_python
ae83a0b61e234912c0136ef0f176e7a88603ff28
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: v0449gRpc.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0fv0449gRpc.proto\x12\tv0449gRpc\"\x1b\n\x0b\x64\x61taRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\x08\x64\x61ta2Plc\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1f\n\x0cslaveReq2Plc\x12\x0f\n\x07request\x18\x01 \x01(\x05\"\x1a\n\x08\x64\x61ta2Hmi\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1b\n\ndata2PlcJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1b\n\ndata2HmiJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1c\n\ndata2PlcPb\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1d\n\ndataAnswer\x12\x0f\n\x07message\x18\x01 \x01(\t2\x93\x01\n\x0cv0449gRpcSvc\x12=\n\x0bxchRtDataJs\x12\x15.v0449gRpc.data2PlcJs\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x12\x44\n\x10xchRtDataJsSlave\x12\x17.v0449gRpc.slaveReq2Plc\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x62\x06proto3') _DATAREQUEST = DESCRIPTOR.message_types_by_name['dataRequest'] _DATA2PLC = DESCRIPTOR.message_types_by_name['data2Plc'] _SLAVEREQ2PLC = DESCRIPTOR.message_types_by_name['slaveReq2Plc'] _DATA2HMI = DESCRIPTOR.message_types_by_name['data2Hmi'] _DATA2PLCJS = DESCRIPTOR.message_types_by_name['data2PlcJs'] _DATA2HMIJS = DESCRIPTOR.message_types_by_name['data2HmiJs'] _DATA2PLCPB = DESCRIPTOR.message_types_by_name['data2PlcPb'] _DATAANSWER = DESCRIPTOR.message_types_by_name['dataAnswer'] dataRequest = _reflection.GeneratedProtocolMessageType('dataRequest', (_message.Message,), { 'DESCRIPTOR' : _DATAREQUEST, '__module__' : 'v0449gRpc_pb2' # @@protoc_insertion_point(class_scope:v0449gRpc.dataRequest) }) _sym_db.RegisterMessage(dataRequest) data2Plc = _reflection.GeneratedProtocolMessageType('data2Plc', (_message.Message,), { 'DESCRIPTOR' : _DATA2PLC, '__module__' : 'v0449gRpc_pb2' # @@protoc_insertion_point(class_scope:v0449gRpc.data2Plc) }) _sym_db.RegisterMessage(data2Plc) slaveReq2Plc = _reflection.GeneratedProtocolMessageType('slaveReq2Plc', (_message.Message,), { 'DESCRIPTOR' : _SLAVEREQ2PLC, '__module__' : 'v0449gRpc_pb2' # @@protoc_insertion_point(class_scope:v0449gRpc.slaveReq2Plc) }) _sym_db.RegisterMessage(slaveReq2Plc) data2Hmi = _reflection.GeneratedProtocolMessageType('data2Hmi', (_message.Message,), { 'DESCRIPTOR' : _DATA2HMI, '__module__' : 'v0449gRpc_pb2' # @@protoc_insertion_point(class_scope:v0449gRpc.data2Hmi) }) _sym_db.RegisterMessage(data2Hmi) data2PlcJs = _reflection.GeneratedProtocolMessageType('data2PlcJs', (_message.Message,), { 'DESCRIPTOR' : _DATA2PLCJS, '__module__' : 'v0449gRpc_pb2' # @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcJs) }) _sym_db.RegisterMessage(data2PlcJs) data2HmiJs = _reflection.GeneratedProtocolMessageType('data2HmiJs', (_message.Message,), { 'DESCRIPTOR' : _DATA2HMIJS, '__module__' : 'v0449gRpc_pb2' # @@protoc_insertion_point(class_scope:v0449gRpc.data2HmiJs) }) _sym_db.RegisterMessage(data2HmiJs) data2PlcPb = _reflection.GeneratedProtocolMessageType('data2PlcPb', (_message.Message,), { 'DESCRIPTOR' : _DATA2PLCPB, '__module__' : 'v0449gRpc_pb2' # @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcPb) }) _sym_db.RegisterMessage(data2PlcPb) dataAnswer = _reflection.GeneratedProtocolMessageType('dataAnswer', (_message.Message,), { 'DESCRIPTOR' : _DATAANSWER, '__module__' : 'v0449gRpc_pb2' # @@protoc_insertion_point(class_scope:v0449gRpc.dataAnswer) }) _sym_db.RegisterMessage(dataAnswer) _V0449GRPCSVC = DESCRIPTOR.services_by_name['v0449gRpcSvc'] if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _DATAREQUEST._serialized_start=30 _DATAREQUEST._serialized_end=57 _DATA2PLC._serialized_start=59 _DATA2PLC._serialized_end=85 _SLAVEREQ2PLC._serialized_start=87 _SLAVEREQ2PLC._serialized_end=118 _DATA2HMI._serialized_start=120 _DATA2HMI._serialized_end=146 _DATA2PLCJS._serialized_start=148 _DATA2PLCJS._serialized_end=175 _DATA2HMIJS._serialized_start=177 _DATA2HMIJS._serialized_end=204 _DATA2PLCPB._serialized_start=206 _DATA2PLCPB._serialized_end=234 _DATAANSWER._serialized_start=236 _DATAANSWER._serialized_end=265 _V0449GRPCSVC._serialized_start=268 _V0449GRPCSVC._serialized_end=415 # @@protoc_insertion_point(module_scope)
43.12963
790
0.800129
0
0
0
0
0
0
0
0
1,944
0.417347
37dd454cd95fe5c19347e66dba4d2c8da8d4857f
14,423
py
Python
api/resources_portal/test/views/test_search_endpoint.py
AlexsLemonade/resources-portal
d91c6c8d6135461faccbc78ef2b0be3f9b358f21
[ "BSD-3-Clause" ]
null
null
null
api/resources_portal/test/views/test_search_endpoint.py
AlexsLemonade/resources-portal
d91c6c8d6135461faccbc78ef2b0be3f9b358f21
[ "BSD-3-Clause" ]
536
2019-11-13T15:49:03.000Z
2022-03-28T20:17:24.000Z
api/resources_portal/test/views/test_search_endpoint.py
AlexsLemonade/resources-portal
d91c6c8d6135461faccbc78ef2b0be3f9b358f21
[ "BSD-3-Clause" ]
1
2020-04-03T02:07:29.000Z
2020-04-03T02:07:29.000Z
import datetime from django.core.management import call_command from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from resources_portal.management.commands.populate_dev_database import populate_dev_database from resources_portal.models import Material, Organization, User class SearchMaterialsEndpointTestCase(APITestCase): """ Tests /search/materials operations. """ @classmethod def setUpClass(cls): super(SearchMaterialsEndpointTestCase, cls).setUpClass() populate_dev_database() # Put newly created materials in the search index call_command("search_index", "-f", "--rebuild") cls.primary_prof = User.objects.get(username="PrimaryProf") cls.secondary_prof = User.objects.get(username="SecondaryProf") cls.post_doc = User.objects.get(username="PostDoc") cls.primary_lab = Organization.objects.get(name="PrimaryLab") cls.material1 = Material.objects.get(title="Melanoma Reduction Plasmid") cls.material2 = Material.objects.get(title="Allele Extraction Protocol") @classmethod def tearDownClass(cls): super(SearchMaterialsEndpointTestCase, cls).tearDownClass() # Rebuild search index with what's actaully in the django database call_command("search_index", "-f", "--rebuild") def test_search_for_title_finds_a_given_material(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-materials-list") + "?search=" + self.material1.title response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) first_result_id = int(response.json()["results"][0]["id"]) self.assertEqual(first_result_id, self.material1.id) def test_filter_on_organization_retrieves_all_organization_materials(self): # Archive one material to make sure it goes to the bottom of the list. archived_material = Material.objects.first() archived_material.is_archived = True archived_material.save() self.client.force_authenticate(user=self.primary_prof) search_url = ( reverse("search-materials-list") + "?organization=" + self.primary_lab.name + "&limit=25" ) response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) response_json = response.json() material_count = int(response_json["count"]) # Make sure archived materials are last: self.assertEqual(response_json["results"][-1]["id"], archived_material.id) material_titles = [] for material in response_json["results"]: material_titles.append(material["title"]) self.assertEqual(material_count, len(self.primary_lab.materials.all())) for title in material_titles: self.assertTrue( Material.objects.filter(title=title, organization=self.primary_lab).exists() ) def test_filter_on_category_retrieves_all_materials_of_a_given_category(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-materials-list") + "?category=" + "MODEL_ORGANISM" response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) material_count = int(response.json()["count"]) material_titles = [] for material in response.json()["results"]: material_titles.append(material["title"]) self.assertEqual(material_count, len(Material.objects.filter(category="MODEL_ORGANISM"))) for title in material_titles: self.assertTrue( Material.objects.filter(title=title, category="MODEL_ORGANISM").exists() ) def test_filter_on_organisms_retrieves_all_materials_with_one_organism(self): self.client.force_authenticate(user=self.primary_prof) # Search with one organism name search_url = reverse("search-materials-list") + "?organisms=" + "danio rerio" response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) organism_count = int(response.json()["count"]) material_titles = [] for material in response.json()["results"]: material_titles.append(material["title"]) database_organism_count = 0 database_titles = [] for material in Material.objects.all(): if material.organisms: if "Danio rerio" in material.organisms: database_organism_count += 1 database_titles.append(material.title) self.assertEqual(organism_count, database_organism_count) for title in material_titles: self.assertTrue(title in database_titles) def test_filter_on_organisms_retrieves_all_materials_with_multiple_organisms(self): self.client.force_authenticate(user=self.primary_prof) # Search with one organism name search_url = ( reverse("search-materials-list") + "?organisms=" + "danio rerio" + "&organisms=" + "mus musculus" ) response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) organism_count = int(response.json()["count"]) material_titles = [] for material in response.json()["results"]: material_titles.append(material["title"]) database_organism_count = 0 database_titles = [] for material in Material.objects.all(): if material.organisms: if ("Danio rerio" in material.organisms) or ("Mus musculus" in material.organisms): database_organism_count += 1 database_titles.append(material.title) self.assertEqual(organism_count, database_organism_count) for title in material_titles: self.assertTrue(title in database_titles) def test_ordering_on_updated_at_succeeds(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-materials-list") + "?ordering=" + "updated_at" response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) material_dates = [] for material in response.json()["results"]: date = datetime.datetime.strptime( material["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z" ).date() material_dates.append(date) self.assertEqual(material_dates, sorted(material_dates)) def test_combine_search_and_filter_and_ordering_succeeds(self): self.client.force_authenticate(user=self.primary_prof) search_url = ( reverse("search-materials-list") + "?search=MODEL_ORGANISM" + "ordering=updated_at" + "has_pre_print=true" ) response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) material_dates = [] material_titles = [] for material in response.json()["results"]: material_titles.append(material["title"]) date = datetime.datetime.strptime( material["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z" ).date() material_dates.append(date) self.assertEqual(material_dates, sorted(material_dates)) for title in material_titles: self.assertTrue( Material.objects.filter( title=title, category="MODEL_ORGANISM", has_pre_print=True ).exists() ) def test_facets_return_number_of_materials(self): self.client.force_authenticate(user=self.primary_prof) # Search with no params search_url = reverse("search-materials-list") response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) model_organism_count = int(response.json()["facets"]["category"]["MODEL_ORGANISM"]) self.assertEqual( model_organism_count, len(Material.objects.filter(category="MODEL_ORGANISM")) ) # Search for only danio rerio organisms search_url = reverse("search-materials-list") + "?search=danio rerio" response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) model_organism_count = int(response.json()["facets"]["category"]["MODEL_ORGANISM"]) database_count = 0 for material in Material.objects.all(): if material.organisms: if ("Danio rerio" in material.organisms) and ( material.category == "MODEL_ORGANISM" ): database_count += 1 self.assertEqual(model_organism_count, database_count) def test_empty_search_returns_no_results(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-materials-list") + "?search=" response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) material_count = int(response.json()["count"]) self.assertEqual(material_count, 0) class SearchUsersEndpointTestCase(APITestCase): """ Tests /search/users operations. """ @classmethod def setUpClass(cls): super(SearchUsersEndpointTestCase, cls).setUpClass() populate_dev_database() # Put newly created materials in the search index call_command("search_index", "-f", "--rebuild") cls.primary_prof = User.objects.get(username="PrimaryProf") @classmethod def tearDownClass(cls): super(SearchUsersEndpointTestCase, cls).tearDownClass() # Rebuild search index with what's actaully in the django database call_command("search_index", "-f", "--rebuild") def test_search_for_name_returns_given_user(self): self.client.force_authenticate(user=self.primary_prof) search_url = ( reverse("search-users-list") + "?search=" + self.primary_prof.first_name + " " + self.primary_prof.last_name ) response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) first_result_id = response.json()["results"][0]["id"] self.assertEqual(first_result_id, str(self.primary_prof.id)) def test_order_by_published_name_succeeds(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-users-list") + "?ordering=published_name" response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) user_published_names = [] for user in response.json()["results"]: if user["published_name"]: user_published_names.append(user["published_name"]) self.assertEqual(user_published_names, sorted(user_published_names)) def test_empty_search_returns_no_results(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-users-list") + "?search=" response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) user_count = int(response.json()["count"]) self.assertEqual(user_count, 0) class SearchOrganizationsEndpointTestCase(APITestCase): """ Tests /search/organizations operations. """ @classmethod def setUpClass(cls): super(SearchOrganizationsEndpointTestCase, cls).setUpClass() populate_dev_database() # Put newly created materials in the search index call_command("search_index", "-f", "--rebuild") cls.primary_prof = User.objects.get(username="PrimaryProf") cls.primary_lab = Organization.objects.get(name="PrimaryLab") @classmethod def tearDownClass(cls): super(SearchOrganizationsEndpointTestCase, cls).tearDownClass() # Rebuild search index with what's actaully in the django database call_command("search_index", "-f", "--rebuild") def test_search_for_organization_name_returns_given_organization(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-organizations-list") + "?search=" + self.primary_lab.name response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) first_result_id = int(response.json()["results"][0]["id"]) self.assertEqual(first_result_id, self.primary_lab.id) def test_search_for_owner_attribute_returns_related_organizations(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-organizations-list") + "?search=" + self.primary_prof.email response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) organization_count = int(response.json()["count"]) organization_names = [] for org in response.json()["results"]: organization_names.append(org["name"]) self.assertEqual( organization_count, len(Organization.objects.filter(owner=self.primary_prof)) ) for name in organization_names: self.assertTrue( Organization.objects.filter(name=name, owner=self.primary_prof).exists() ) def test_ordering_on_updated_at_succeeds(self): self.client.force_authenticate(user=self.primary_prof) search_url = reverse("search-organizations-list") + "?ordering=" + "updated_at" response = self.client.get(search_url) self.assertEqual(response.status_code, status.HTTP_200_OK) organization_dates = [] for org in response.json()["results"]: date = datetime.datetime.strptime(org["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z").date() organization_dates.append(date) self.assertEqual(organization_dates, sorted(organization_dates))
35.264059
99
0.662969
14,080
0.976219
0
0
2,113
0.146502
0
0
2,338
0.162102
37ddb9f83521ff471c035e9cd6a4902772e590bf
5,107
py
Python
mindarmour/utils/logger.py
hboshnak/mindarmour
0609a4eaea875a84667bed279add9305752880cc
[ "Apache-2.0" ]
139
2020-03-28T02:37:07.000Z
2022-03-24T15:35:39.000Z
mindarmour/utils/logger.py
hboshnak/mindarmour
0609a4eaea875a84667bed279add9305752880cc
[ "Apache-2.0" ]
2
2020-04-02T09:50:21.000Z
2020-05-09T06:52:57.000Z
mindarmour/utils/logger.py
hboshnak/mindarmour
0609a4eaea875a84667bed279add9305752880cc
[ "Apache-2.0" ]
12
2020-03-28T02:52:42.000Z
2021-07-15T08:05:06.000Z
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Util for log module. """ import logging _LOGGER = logging.getLogger('MA') def _find_caller(): """ Bind findCaller() method, which is used to find the stack frame of the caller so that we can note the source file name, line number and function name. """ return _LOGGER.findCaller() class LogUtil: """ Logging module. Raises: SyntaxError: If create this class. """ _instance = None _logger = None _extra_fmt = ' [%s] [%s] ' def __init__(self): raise SyntaxError('can not instance, please use get_instance.') @staticmethod def get_instance(): """ Get instance of class `LogUtil`. Returns: Object, instance of class `LogUtil`. """ if LogUtil._instance is None: LogUtil._instance = object.__new__(LogUtil) LogUtil._logger = _LOGGER LogUtil._init_logger() return LogUtil._instance @staticmethod def _init_logger(): """ Initialize logger. """ LogUtil._logger.setLevel(logging.WARNING) log_fmt = '[%(levelname)s] %(name)s(%(process)d:%(thread)d,' \ '%(processName)s):%(asctime)s%(message)s' log_fmt = logging.Formatter(log_fmt) # create console handler with a higher log level console_handler = logging.StreamHandler() console_handler.setFormatter(log_fmt) # add the handlers to the logger LogUtil._logger.handlers = [] LogUtil._logger.addHandler(console_handler) LogUtil._logger.propagate = False def set_level(self, level): """ Set the logging level of this logger, level must be an integer or a string. Supported levels are 'NOTSET'(integer: 0), 'ERROR'(integer: 1-40), 'WARNING'('WARN', integer: 1-30), 'INFO'(integer: 1-20) and 'DEBUG'(integer: 1-10). For example, if logger.set_level('WARNING') or logger.set_level(21), then logger.warn() and logger.error() in scripts would be printed while running, while logger.info() or logger.debug() would not be printed. Args: level (Union[int, str]): Level of logger. """ self._logger.setLevel(level) def add_handler(self, handler): """ Add other handler supported by logging module. Args: handler (logging.Handler): Other handler supported by logging module. Raises: ValueError: If handler is not an instance of logging.Handler. """ if isinstance(handler, logging.Handler): self._logger.addHandler(handler) else: raise ValueError('handler must be an instance of logging.Handler,' ' but got {}'.format(type(handler))) def debug(self, tag, msg, *args): """ Log '[tag] msg % args' with severity 'DEBUG'. Args: tag (str): Logger tag. msg (str): Logger message. args (Any): Auxiliary value. """ caller_info = _find_caller() file_info = ':'.join([caller_info[0], str(caller_info[1])]) self._logger.debug(self._extra_fmt + msg, file_info, tag, *args) def info(self, tag, msg, *args): """ Log '[tag] msg % args' with severity 'INFO'. Args: tag (str): Logger tag. msg (str): Logger message. args (Any): Auxiliary value. """ caller_info = _find_caller() file_info = ':'.join([caller_info[0], str(caller_info[1])]) self._logger.info(self._extra_fmt + msg, file_info, tag, *args) def warn(self, tag, msg, *args): """ Log '[tag] msg % args' with severity 'WARNING'. Args: tag (str): Logger tag. msg (str): Logger message. args (Any): Auxiliary value. """ caller_info = _find_caller() file_info = ':'.join([caller_info[0], str(caller_info[1])]) self._logger.warning(self._extra_fmt + msg, file_info, tag, *args) def error(self, tag, msg, *args): """ Log '[tag] msg % args' with severity 'ERROR'. Args: tag (str): Logger tag. msg (str): Logger message. args (Any): Auxiliary value. """ caller_info = _find_caller() file_info = ':'.join([caller_info[0], str(caller_info[1])]) self._logger.error(self._extra_fmt + msg, file_info, tag, *args)
32.119497
91
0.595457
4,204
0.823184
0
0
1,013
0.198355
0
0
2,959
0.579401
37de891f427c0291be7aba179849ea2f6a86e5c6
281
py
Python
Python/Programming Basics/Simple Calculations/17. Daily Earnings.py
teodoramilcheva/softuni-software-engineering
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
[ "MIT" ]
null
null
null
Python/Programming Basics/Simple Calculations/17. Daily Earnings.py
teodoramilcheva/softuni-software-engineering
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
[ "MIT" ]
null
null
null
Python/Programming Basics/Simple Calculations/17. Daily Earnings.py
teodoramilcheva/softuni-software-engineering
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
[ "MIT" ]
null
null
null
workdays = float(input()) daily_tips = float(input()) exchange_rate = float(input()) salary = workdays * daily_tips annual_income = salary * 12 + salary * 2.5 net_income = annual_income - annual_income * 25 / 100 result = net_income / 365 * exchange_rate print('%.2f' % result)
23.416667
53
0.711744
0
0
0
0
0
0
0
0
6
0.021352
37e09e1c599fd41f037cb54000938dba1d33127b
7,483
py
Python
bert_rerannker_eval.py
satya77/transformer_rankers
0d2c20bd26041d887fb65102020a0b609ec967fc
[ "MIT" ]
null
null
null
bert_rerannker_eval.py
satya77/transformer_rankers
0d2c20bd26041d887fb65102020a0b609ec967fc
[ "MIT" ]
null
null
null
bert_rerannker_eval.py
satya77/transformer_rankers
0d2c20bd26041d887fb65102020a0b609ec967fc
[ "MIT" ]
null
null
null
from transformer_rankers.trainers import transformer_trainer from transformer_rankers.datasets import dataset, preprocess_scisumm_ranked from transformer_rankers.eval import results_analyses_tools from transformers import BertTokenizer, BertForSequenceClassification from sacred.observers import FileStorageObserver from sacred import Experiment import numpy as np import torch import pandas as pd import argparse import logging import sys ex = Experiment('BERT-ranker experiment') logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s", handlers=[ logging.StreamHandler(sys.stdout) ] ) @ex.main def run_experiment(args): args.run_id = str(ex.current_run._id) tokenizer = BertTokenizer.from_pretrained('bert-base-cased') train, valid, test = preprocess_scisumm_ranked.transform_to_dfs( args.path_to_ranked_file,args.path_to_ranked_test,args.path_to_ranked_dev) # Choose the negative candidate sampler ns_train=None ns_val=None # Create the loaders for the datasets, with the respective negative samplers dataloader = dataset.QueryDocumentDataLoader(train, valid, test, tokenizer, ns_train, ns_val, 'classification', args.val_batch_size, args.val_batch_size, 512, 0, args.data_folder + "/scisumm_ranked") with_ranked_list=True train_loader, val_loader, test_loader = dataloader.get_pytorch_dataloaders(with_ranked_list) # Instantiate transformer model to be used model = BertForSequenceClassification.from_pretrained('bert-base-cased') model.resize_token_embeddings(len(dataloader.tokenizer)) e = torch.load(args.model_dir) model.load_state_dict(e) model.eval() # Instantiate trainer that handles fitting. trainer = transformer_trainer.TransformerTrainer(model, train_loader, val_loader, test_loader, 0, "classification", tokenizer, False, 0, 0 ,0, 0) # Predict for test logging.info("Predicting") preds, labels, doc_ids, all_queries, preds_without_acc = trainer.test() # res = results_analyses_tools.evaluate_and_aggregate(preds, labels, ['R_10@1', # 'R_10@2', # 'R_10@5', # 'R_2@1', # 'accuracy_0.3', # 'accuracy_0.3_upto_1', # 'precision_0.3', # 'recall_0.3', # 'f_score_0.3', # 'accuracy_0.4', # 'accuracy_0.4_upto_1', # 'precision_0.4', # 'recall_0.4', # 'f_score_0.4', # 'accuracy_0.5', # 'accuracy_0.5_upto_1', # 'precision_0.5', # 'recall_0.5', # 'f_score_0.5' # ]) # for metric, v in res.items(): # logging.info("Test {} : {:4f}".format(metric, v)) # # Saving predictions and labels to a file # max_preds_column = max([len(l) for l in preds]) # preds_df = pd.DataFrame(preds, columns=["prediction_" + str(i) for i in range(max_preds_column)]) # preds_df.to_csv(args.output_dir + "/" + args.run_id + "/predictions.csv", index=False) # # labels_df = pd.DataFrame(labels, columns=["label_" + str(i) for i in range(max_preds_column)]) # labels_df.to_csv(args.output_dir + "/" + args.run_id + "/labels.csv", index=False) # # predict on the test set # preds, labels, doc_ids, all_queries, preds_without_acc = trainer.test() new_preds=list((np.array(preds_without_acc)> 0.4).astype(int)) d = {'query': all_queries, 'doc_id': doc_ids,'label': new_preds, 'similiarity':preds_without_acc} df_doc_ids = pd.DataFrame(d) import pdb pdb.set_trace() df_doc_ids = df_doc_ids.groupby('query').agg(list).reset_index() # df_doc_ids_ones = df_doc_ids[df_doc_ids['label']==1] # df_doc_ids_ones = df_doc_ids_ones.groupby('query').agg(list).reset_index() # df_doc_ids_non_ones = df_doc_ids.groupby('query').agg(list).reset_index() # new_df=[] # for i,row in df_doc_ids_non_ones.iterrows(): # if all([v == 0 for v in row['label']]): # highest_value=[x for _, x in sorted(zip(row['similiarity'], row['doc_id']), key=lambda pair: pair[0])] # highest_value_sim=[x for x in sorted(row['similiarity'])] # # row['label'] = [1] # row[ 'doc_id'] = [highest_value[0]] # row[ 'similiarity'] = [highest_value_sim[0]] # # new_df.append(row) # result = pd.concat([df_doc_ids,pd.DataFrame(new_df)]) df_doc_ids.to_csv(args.output_dir + "/" + args.run_id + "/doc_ids_test_all_results.csv", index=False, sep='\t') return trainer.best_ndcg def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_folder", default=None, type=str, required=True, help="the folder containing data") parser.add_argument("--model_dir", default=None, type=str, required=True, help="the folder that the model is saved in.") parser.add_argument("--val_batch_size", default=32, type=int, required=False, help="Validation and test batch size.") parser.add_argument("--path_to_ranked_file", default=None, type=str, required=False, help="if there is a ranked file this will be the path to it. ") parser.add_argument("--path_to_ranked_test", default=None, type=str, required=False, help="if there is a ranked test file this will be the path to it. ") parser.add_argument("--path_to_ranked_dev", default=None, type=str, required=False, help="if there is a ranked test file this will be the path to it. ") parser.add_argument("--output_dir", default=None, type=str, required=True, help="the folder to output predictions") args = parser.parse_args() args.sacred_ex = ex ex.observers.append(FileStorageObserver(args.output_dir)) ex.add_config({'args': args}) return ex.run() if __name__ == "__main__": main()
47.967949
116
0.526928
0
0
0
0
5,447
0.727917
0
0
3,938
0.52626
37e0cdbd73052a4cfa66dd46c357ae89f7505242
424
py
Python
python/p21.py
tonyfg/project_euler
3a9e6352a98faaa506056b42160c91bffe93838c
[ "WTFPL" ]
null
null
null
python/p21.py
tonyfg/project_euler
3a9e6352a98faaa506056b42160c91bffe93838c
[ "WTFPL" ]
null
null
null
python/p21.py
tonyfg/project_euler
3a9e6352a98faaa506056b42160c91bffe93838c
[ "WTFPL" ]
null
null
null
#Q: Evaluate the sum of all the amicable numbers under 10000. #A: 31626 def divisor_sum(n): return sum([i for i in xrange (1, n//2+1) if not n%i]) def sum_amicable(start, end): sum = 0 for i in xrange(start, end): tmp = divisor_sum(i) if i == divisor_sum(tmp) and i != tmp: sum += i+tmp return sum/2 #each pair is found twice, so divide by 2 ;) print sum_amicable(1,10000)
26.5
61
0.610849
0
0
0
0
0
0
0
0
114
0.268868
37e13b4fd890037fc4d7192b2e7467ef9a1cb201
4,033
py
Python
check.py
Dysoncat/student-services-slas-chat-bot
5d9c7105cef640c34018d260249b6a05b959e73f
[ "MIT" ]
null
null
null
check.py
Dysoncat/student-services-slas-chat-bot
5d9c7105cef640c34018d260249b6a05b959e73f
[ "MIT" ]
null
null
null
check.py
Dysoncat/student-services-slas-chat-bot
5d9c7105cef640c34018d260249b6a05b959e73f
[ "MIT" ]
null
null
null
import long_responses as long # Returns the probability of a message matching the responses that we have def messageProb(userMessage, recognizedWords, isSingleResponse=False, requiredWords=[]): messageCertainty = 0 hasRequiredWords = True # Counts how many words are present in each predefined message for word in userMessage: if word in recognizedWords: messageCertainty += 1 # Calculates the percent of recognized words in a user message percentage = float(messageCertainty) / float(len(recognizedWords)) # Checks that the required words are in the string for word in requiredWords: if word not in userMessage: hasRequiredWords = False break # Must either have the required words, or be a single response if hasRequiredWords or isSingleResponse: return int(percentage * 100) else: return 0 # Checks all the responses using the probability of the messages def checkAllMesages(message): highest_prob_list = {} ignore_list = {} def ignoreResponse(bot_response, list_of_words, single_response=False, required_words=[]): nonlocal ignore_list ignore_list[bot_response] = messageProb( message, list_of_words, single_response, required_words) # Simplifies response creation / adds it to the dict def response(bot_response, list_of_words, single_response=False, required_words=[]): nonlocal highest_prob_list highest_prob_list[bot_response] = messageProb( message, list_of_words, single_response, required_words) # Responses ------------------------------------------------------------------------------------------------------- response('Hello!', ['hello', 'hi', 'hey', 'sup', 'heyo'], single_response=True) response('See you!', ['bye', 'goodbye'], single_response=True) response('I\'m doing fine, and you?', [ 'how', 'are', 'you', 'doing'], required_words=['how', "you"]) response('You\'re welcome!', ['thank', 'thanks'], single_response=True) response("You can borrow a computer from room 315", ["how", "do", "i", "borrow", "a", "computer"], required_words=["borrow", "computer"]) response("You can apply for a new locker key in room 310", ["how", "can", "i", "apply", "for", "a", "new", "locker", "key"], ["new", "locker", "key"]) response("The guidance office is on the third floor", [ "where", "is", "the", "guidance", "office"], required_words=["guidance", "office"]) response("You can apply for the ID in room 310", [ "how", "can", "i", "get", "new", "id"], ["new", "id"]) response("A student ID costs 25 RMB, and it has to be in cash", [ "how", "much", "does", "a", "new", "id", "cost"], ["id", "cost"]) response("The secondary computer classroom is on the fifth floor, and is number 521", [ "where", "is", "the", "secondary", "computer", "classroom"], ["secondary", "computer"]) response("Don't worry about it.", ["sorry", "sry"], ["sorry", "sry"]) # Ignored Responses ignoreResponse("Good to hear", [ "i", "doing", "good", "fine", "ok"], required_words=["i", "good"]) best_ignore_match = max(ignore_list, key=ignore_list.get) # Longer responses response(long.R_ADVICE, ['give', 'advice'], required_words=['advice']) response(long.R_EATING, ['what', 'you', 'eat'], required_words=['you', 'eat']) response(long.R_SWEARING, [ "fuck", "shit", "motherfucker", "fuck", "you"]) best_match = max(highest_prob_list, key=highest_prob_list.get) # DEBUGGING TOOLS IF NEEDED print(highest_prob_list) print("") print( f'Best match = {best_match} | Score: {highest_prob_list[best_match]}') if highest_prob_list[best_match] < ignore_list[best_ignore_match]: return best_ignore_match elif highest_prob_list[best_match] < 1: return long.unknown() else: return best_match
40.33
154
0.623109
0
0
0
0
0
0
0
0
1,623
0.40243
37e16ab061f36f12398b74b8a1440f3cc6768529
1,446
py
Python
image_predictor/utils.py
jdalzatec/streamlit-manizales-tech-talks
619af5edc79a22ed4cc9f50dd2d0379399357549
[ "MIT" ]
2
2022-02-05T15:48:55.000Z
2022-02-05T15:57:40.000Z
image_predictor/utils.py
jdalzatec/streamlit-manizales-tech-talks
619af5edc79a22ed4cc9f50dd2d0379399357549
[ "MIT" ]
null
null
null
image_predictor/utils.py
jdalzatec/streamlit-manizales-tech-talks
619af5edc79a22ed4cc9f50dd2d0379399357549
[ "MIT" ]
4
2022-02-05T15:49:02.000Z
2022-02-05T15:58:14.000Z
from io import StringIO import numpy as np from h5py import File from keras.models import load_model as keras_load_model from PIL import Image, ImageOps def predict(image, model): # Create the array of the right shape to feed into the keras model # The 'length' or number of images you can put into the array is # determined by the first position in the shape tuple, in this case 1. data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) # Replace this with the path to your image image = Image.open(image) # resize the image to a 224x224 with the same strategy as in TM2: # resizing the image to be at least 224x224 and then cropping from the center size = (224, 224) image = ImageOps.fit(image, size, Image.ANTIALIAS) # turn the image into a numpy array image_array = np.asarray(image) # Normalize the image normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # Load the image into the array data[0] = normalized_image_array # run the inference prediction = model.predict(data) return prediction[0] def read_labels(labels_file): labels = [] lines = StringIO(labels_file.getvalue().decode()).readlines() for line in lines: _, *remaining = line.split() label = " ".join(remaining).strip() labels.append(label) return labels def load_model(model_file): return keras_load_model(File(model_file))
32.133333
81
0.697095
0
0
0
0
0
0
0
0
493
0.340941
37e2c12beb329286ae2d567a8dedde433414f28a
417
py
Python
client/setup.py
emilywoods/docker-workshop
46fef25ed06ab33f653bebffdd837ee4cc31c373
[ "MIT" ]
1
2022-03-21T07:32:36.000Z
2022-03-21T07:32:36.000Z
client/setup.py
emilywoods/docker-workshop
46fef25ed06ab33f653bebffdd837ee4cc31c373
[ "MIT" ]
null
null
null
client/setup.py
emilywoods/docker-workshop
46fef25ed06ab33f653bebffdd837ee4cc31c373
[ "MIT" ]
null
null
null
from setuptools import setup setup( name="workshop-client", install_requires=["flask==1.1.1", "requests==2.22.0"], python_requires=">=3.7", classifiers=[ "Development Status :: 1 - Beta", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], )
27.8
58
0.580336
0
0
0
0
0
0
0
0
243
0.582734
37e304a5ab34e95d070c76d96d91559914adff14
561
py
Python
tests/facebook/models/test_photo.py
Socian-Ltd/python-facebook-1
e9a4f626b37541103c9534a29342ef6033c09c06
[ "Apache-2.0" ]
2
2021-03-16T02:58:10.000Z
2021-03-16T16:53:23.000Z
tests/facebook/models/test_photo.py
nedsons/python-facebook
bf2b4a70ef0e0a67a142f5856586ea318f9807ea
[ "Apache-2.0" ]
null
null
null
tests/facebook/models/test_photo.py
nedsons/python-facebook
bf2b4a70ef0e0a67a142f5856586ea318f9807ea
[ "Apache-2.0" ]
1
2021-06-02T07:15:35.000Z
2021-06-02T07:15:35.000Z
import json import unittest import pyfacebook.models as models class PhotoModelTest(unittest.TestCase): BASE_PATH = "testdata/facebook/models/photos/" with open(BASE_PATH + 'photo.json', 'rb') as f: PHOTO_INFO = json.loads(f.read().decode('utf-8')) def testPhoto(self): m = models.Photo.new_from_json_dict(self.PHOTO_INFO) self.assertEqual(m.id, "166370841591183") self.assertEqual(m.album.id, "108824087345859") self.assertEqual(len(m.images), 8) self.assertEqual(m.webp_images[0].height, 800)
28.05
60
0.686275
494
0.88057
0
0
0
0
0
0
91
0.16221
37e4a1783cf1d5a9318a74c7d860d1f54e64ee4e
5,837
py
Python
airbyte-integrations/connectors/source-scaffold-source-python/source_scaffold_source_python/source.py
curanaj/airbyte-dbt-demo
f6b8ccd8f8e57b7ea84caf814b14d836338e8007
[ "MIT" ]
null
null
null
airbyte-integrations/connectors/source-scaffold-source-python/source_scaffold_source_python/source.py
curanaj/airbyte-dbt-demo
f6b8ccd8f8e57b7ea84caf814b14d836338e8007
[ "MIT" ]
null
null
null
airbyte-integrations/connectors/source-scaffold-source-python/source_scaffold_source_python/source.py
curanaj/airbyte-dbt-demo
f6b8ccd8f8e57b7ea84caf814b14d836338e8007
[ "MIT" ]
null
null
null
# MIT License # # Copyright (c) 2020 Airbyte # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import json from datetime import datetime from typing import Dict, Generator from airbyte_cdk.logger import AirbyteLogger from airbyte_cdk.models import ( AirbyteCatalog, AirbyteConnectionStatus, AirbyteMessage, AirbyteRecordMessage, AirbyteStream, ConfiguredAirbyteCatalog, Status, Type, ) from airbyte_cdk.sources import Source class SourceScaffoldSourcePython(Source): def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus: """ Tests if the input configuration can be used to successfully connect to the integration e.g: if a provided Stripe API token can be used to connect to the Stripe API. :param logger: Logging object to display debug/info/error to the logs (logs will not be accessible via airbyte UI if they are not passed to this logger) :param config: Json object containing the configuration of this source, content of this json is as specified in the properties of the spec.json file :return: AirbyteConnectionStatus indicating a Success or Failure """ try: # Not Implemented return AirbyteConnectionStatus(status=Status.SUCCEEDED) except Exception as e: return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}") def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog: """ Returns an AirbyteCatalog representing the available streams and fields in this integration. For example, given valid credentials to a Postgres database, returns an Airbyte catalog where each postgres table is a stream, and each table column is a field. :param logger: Logging object to display debug/info/error to the logs (logs will not be accessible via airbyte UI if they are not passed to this logger) :param config: Json object containing the configuration of this source, content of this json is as specified in the properties of the spec.json file :return: AirbyteCatalog is an object describing a list of all available streams in this source. A stream is an AirbyteStream object that includes: - its stream name (or table name in the case of Postgres) - json_schema providing the specifications of expected schema for this stream (a list of columns described by their names and types) """ streams = [] stream_name = "TableName" # Example json_schema = { # Example "$schema": "http://json-schema.org/draft-07/schema#", "type": "object", "properties": {"columnName": {"type": "string"}}, } # Not Implemented streams.append(AirbyteStream(name=stream_name, json_schema=json_schema)) return AirbyteCatalog(streams=streams) def read( self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any] ) -> Generator[AirbyteMessage, None, None]: """ Returns a generator of the AirbyteMessages generated by reading the source with the given configuration, catalog, and state. :param logger: Logging object to display debug/info/error to the logs (logs will not be accessible via airbyte UI if they are not passed to this logger) :param config: Json object containing the configuration of this source, content of this json is as specified in the properties of the spec.json file :param catalog: The input catalog is a ConfiguredAirbyteCatalog which is almost the same as AirbyteCatalog returned by discover(), but in addition, it's been configured in the UI! For each particular stream and field, there may have been provided with extra modifications such as: filtering streams and/or columns out, renaming some entities, etc :param state: When a Airbyte reads data from a source, it might need to keep a checkpoint cursor to resume replication in the future from that saved checkpoint. This is the object that is provided with state from previous runs and avoid replicating the entire set of data everytime. :return: A generator that produces a stream of AirbyteRecordMessage contained in AirbyteMessage object. """ stream_name = "TableName" # Example data = {"columnName": "Hello World"} # Example # Not Implemented yield AirbyteMessage( type=Type.RECORD, record=AirbyteRecordMessage(stream=stream_name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000), )
47.072581
122
0.704471
4,367
0.748158
1,818
0.311461
0
0
0
0
4,314
0.739078
37e4f2c4b90817314cd77bae4c4800a1c5a1cfd8
11,933
py
Python
alerter/src/monitorables/nodes/chainlink_node.py
SimplyVC/panic
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
[ "Apache-2.0" ]
41
2019-08-23T12:40:42.000Z
2022-03-28T11:06:02.000Z
alerter/src/monitorables/nodes/chainlink_node.py
SimplyVC/panic
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
[ "Apache-2.0" ]
147
2019-08-30T22:09:48.000Z
2022-03-30T08:46:26.000Z
alerter/src/monitorables/nodes/chainlink_node.py
SimplyVC/panic
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
[ "Apache-2.0" ]
3
2019-09-03T21:12:28.000Z
2021-08-18T14:27:56.000Z
from datetime import datetime from typing import Optional, Dict, List, Union from schema import Schema, Or from src.monitorables.nodes.node import Node from src.utils.exceptions import InvalidDictSchemaException class ChainlinkNode(Node): def __init__(self, node_name: str, node_id: str, parent_id: str) -> None: super().__init__(node_name, node_id, parent_id) # Metrics self._went_down_at_prometheus = None self._current_height = None self._total_block_headers_received = None self._max_pending_tx_delay = None self._process_start_time_seconds = None self._total_gas_bumps = None self._total_gas_bumps_exceeds_limit = None self._no_of_unconfirmed_txs = None self._total_errored_job_runs = None self._current_gas_price_info = { 'percentile': None, 'price': None, } self._eth_balance_info = {} # This variable stores the url of the source used to get prometheus node # data. Note that this had to be done because multiple prometheus # sources can be associated with the same node, where at the same time # only one source is available, and sources switch from time to time. self._last_prometheus_source_used = None # This stores the timestamp of the last successful monitoring round. self._last_monitored_prometheus = None @property def is_down_prometheus(self) -> bool: return self._went_down_at_prometheus is not None @property def went_down_at_prometheus(self) -> Optional[float]: return self._went_down_at_prometheus @property def current_height(self) -> Optional[int]: return self._current_height @property def total_block_headers_received(self) -> Optional[int]: return self._total_block_headers_received @property def max_pending_tx_delay(self) -> Optional[int]: return self._max_pending_tx_delay @property def process_start_time_seconds(self) -> Optional[float]: return self._process_start_time_seconds @property def total_gas_bumps(self) -> Optional[int]: return self._total_gas_bumps @property def total_gas_bumps_exceeds_limit(self) -> Optional[int]: return self._total_gas_bumps_exceeds_limit @property def no_of_unconfirmed_txs(self) -> Optional[int]: return self._no_of_unconfirmed_txs @property def total_errored_job_runs(self) -> Optional[int]: return self._total_errored_job_runs @property def current_gas_price_info(self) -> Dict[str, Optional[float]]: return self._current_gas_price_info @property def eth_balance_info(self) -> Dict[str, Union[str, float]]: return self._eth_balance_info @property def last_prometheus_source_used(self) -> Optional[str]: return self._last_prometheus_source_used @property def last_monitored_prometheus(self) -> Optional[float]: return self._last_monitored_prometheus @staticmethod def get_int_prometheus_metric_attributes() -> List[str]: """ :return: A list of all variable names representing integer prometheus : metrics. """ return [ 'current_height', 'total_block_headers_received', 'max_pending_tx_delay', 'total_gas_bumps', 'total_gas_bumps_exceeds_limit', 'no_of_unconfirmed_txs', 'total_errored_job_runs' ] @staticmethod def get_float_prometheus_metric_attributes() -> List[str]: """ :return: A list of all variable names representing float prometheus : metrics. """ return [ 'went_down_at_prometheus', 'process_start_time_seconds', 'last_monitored_prometheus' ] @staticmethod def get_dict_prometheus_metric_attributes() -> List[str]: """ :return: A list of all variable names representing dict prometheus : metrics. """ return ['current_gas_price_info', 'eth_balance_info'] @staticmethod def get_str_prometheus_metric_attributes() -> List[str]: """ :return: A list of all variable names representing string prometheus : metrics. """ return ['last_prometheus_source_used'] def get_all_prometheus_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing prometheus metrics """ str_prometheus_metric_attributes = \ self.get_str_prometheus_metric_attributes() int_prometheus_metric_attributes = \ self.get_int_prometheus_metric_attributes() float_prometheus_metric_attributes = \ self.get_float_prometheus_metric_attributes() dict_prometheus_metric_attributes = \ self.get_dict_prometheus_metric_attributes() return [ *str_prometheus_metric_attributes, *int_prometheus_metric_attributes, *float_prometheus_metric_attributes, *dict_prometheus_metric_attributes ] def get_int_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing int metrics. """ int_prometheus_metric_attributes = \ self.get_int_prometheus_metric_attributes() return [*int_prometheus_metric_attributes] def get_float_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing float metrics. """ float_prometheus_metric_attributes = \ self.get_float_prometheus_metric_attributes() return [*float_prometheus_metric_attributes] def get_dict_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing dict metrics. """ dict_prometheus_metric_attributes = \ self.get_dict_prometheus_metric_attributes() return [*dict_prometheus_metric_attributes] def get_str_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing str metrics. """ str_prometheus_metric_attributes = \ self.get_str_prometheus_metric_attributes() return [*str_prometheus_metric_attributes] def get_all_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing metrics """ prometheus_metric_attributes = \ self.get_all_prometheus_metric_attributes() return [*prometheus_metric_attributes] def set_went_down_at_prometheus( self, went_down_at_prometheus: Optional[float]) -> None: self._went_down_at_prometheus = went_down_at_prometheus def set_prometheus_as_down(self, downtime: Optional[float]) -> None: """ This function sets the node's prometheus interface as down. It sets the time that the interface was initially down to the parameter 'downtime' if it is not None, otherwise it sets it to the current timestamp. :param downtime: :return: """ if downtime is None: self.set_went_down_at_prometheus(datetime.now().timestamp()) else: self.set_went_down_at_prometheus(downtime) def set_prometheus_as_up(self) -> None: """ This function sets a node's prometheus interface as up. A node's interface is said to be up if went_down_at_prometheus is None. :return: None """ self.set_went_down_at_prometheus(None) def set_current_height(self, new_height: Optional[int]) -> None: self._current_height = new_height def set_total_block_headers_received( self, new_total_block_headers_received: Optional[int]) -> None: self._total_block_headers_received = new_total_block_headers_received def set_max_pending_tx_delay( self, new_max_pending_tx_delay: Optional[int]) -> None: self._max_pending_tx_delay = new_max_pending_tx_delay def set_process_start_time_seconds( self, new_process_start_time_seconds: Optional[float]) -> None: self._process_start_time_seconds = new_process_start_time_seconds def set_total_gas_bumps(self, new_total_gas_bumps: Optional[int]) -> None: self._total_gas_bumps = new_total_gas_bumps def set_total_gas_bumps_exceeds_limit( self, new_total_gas_bumps_exceeds_limit: Optional[int]) -> None: self._total_gas_bumps_exceeds_limit = new_total_gas_bumps_exceeds_limit def set_no_of_unconfirmed_txs( self, new_no_of_unconfirmed_txs: Optional[int]) -> None: self._no_of_unconfirmed_txs = new_no_of_unconfirmed_txs def set_total_errored_job_runs( self, new_total_errored_job_runs: Optional[int]) -> None: self._total_errored_job_runs = new_total_errored_job_runs def set_current_gas_price_info(self, new_percentile: Optional[float], new_price: Optional[float]) -> None: """ This method sets the current_gas_price_info dict based on the new percentile and price. This is done in this way to protect the Dict schema. :param new_percentile: The new percentile to be stored :param new_price: The new gas to be stored :return: None """ self._current_gas_price_info['percentile'] = new_percentile self._current_gas_price_info['price'] = new_price @staticmethod def _new_eth_balance_info_valid(new_eth_balance_info: Dict) -> bool: """ This method checks that the new eth_balance_info dict obeys the required schema. :param new_eth_balance_info: The dict to check :return: True if the dict obeys the required schema : False otherwise """ schema = Schema(Or({ 'address': str, 'balance': float, 'latest_usage': float, }, {})) return schema.is_valid(new_eth_balance_info) def set_eth_balance_info( self, new_eth_balance_info: Dict[str, Union[str, float]]) -> None: """ This method sets the new_eth_balance_info. It first checks that the new dict obeys the required schema. If not, an InvalidDictSchemaException is raised. :param new_eth_balance_info: The new eth_balance_info to store. :return: None """"" if self._new_eth_balance_info_valid(new_eth_balance_info): self._eth_balance_info = new_eth_balance_info else: raise InvalidDictSchemaException('new_eth_balance_info') def set_last_prometheus_source_used( self, new_last_prometheus_source_used: Optional[str]) -> None: self._last_prometheus_source_used = new_last_prometheus_source_used def set_last_monitored_prometheus( self, new_last_monitored_prometheus: Optional[float]) -> None: self._last_monitored_prometheus = new_last_monitored_prometheus def reset(self) -> None: """ This method resets all metrics to their initial state :return: None """ self.set_went_down_at_prometheus(None) self.set_current_height(None) self.set_total_block_headers_received(None) self.set_max_pending_tx_delay(None) self.set_process_start_time_seconds(None) self.set_total_gas_bumps(None) self.set_total_gas_bumps_exceeds_limit(None) self.set_no_of_unconfirmed_txs(None) self.set_total_errored_job_runs(None) self.set_current_gas_price_info(None, None) self.set_eth_balance_info({}) self.set_last_prometheus_source_used(None) self.set_last_monitored_prometheus(None)
37.407524
80
0.67636
11,716
0.981815
0
0
3,409
0.285678
0
0
3,190
0.267326
37e57878ec351c326eab8dff88096e5a9b705681
8,983
py
Python
experiments/vgg16/VGG16_utils.py
petrapoklukar/DCA
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
[ "MIT" ]
2
2022-02-14T15:54:22.000Z
2022-02-15T18:43:36.000Z
experiments/vgg16/VGG16_utils.py
petrapoklukar/DCA
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
[ "MIT" ]
null
null
null
experiments/vgg16/VGG16_utils.py
petrapoklukar/DCA
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
[ "MIT" ]
null
null
null
import pickle import numpy as np import os def _analyze_query_point_assignment( query_data_dict: dict, init_Rdata_dict: dict, init_Edata_dict: dict, num_R: int, query_point_assignment_array: np.ndarray, root: str, n_points_to_copy=50, ): """ Analyzes and visualizes qDCA results. :param query_data_dict: raw query data. :param init_Rdata_dict: raw R data. :param init_Edata_dict: raw E data. :param num_R: total number of R points. :param query_point_assignment_array: query point assignments results. :param root: root directory of the experiment. :param n_points_to_copy: number of images to save. :return: accuracy of qDCA assignments; list of (R, query) points with same label; list of (R, query) points with different label """ true_query_data_labels = query_data_dict["labels"] assigned_R = query_point_assignment_array[ query_point_assignment_array[:, 1] < num_R, 1 ] assigned_E = query_point_assignment_array[ query_point_assignment_array[:, 1] >= num_R, 1 ] assigned_R_labels = init_Rdata_dict["labels"][assigned_R] assigned_E_labels = init_Edata_dict["labels"][assigned_E - num_R] assigned_query_data_labels = np.empty( shape=query_point_assignment_array.shape[0] ).astype(np.int32) assigned_query_data_labels[ query_point_assignment_array[:, 1] < num_R ] = assigned_R_labels assigned_query_data_labels[ query_point_assignment_array[:, 1] >= num_R ] = assigned_E_labels accuracy = ( true_query_data_labels == assigned_query_data_labels ).sum() / assigned_query_data_labels.shape[0] same_label_idx = np.where(true_query_data_labels == assigned_query_data_labels)[0] wrong_label_idx = np.where(true_query_data_labels != assigned_query_data_labels)[0] correct_pairs = [] for i in query_point_assignment_array[same_label_idx]: query_idx, init_idx = i if init_idx < num_R: correct_pairs.append( [ query_data_dict["paths"].astype(object)[query_idx], init_Rdata_dict["paths"].astype(object)[init_idx], query_data_dict["labels"][query_idx], init_Rdata_dict["labels"][init_idx], ] ) else: correct_pairs.append( [ query_data_dict["paths"].astype(object)[query_idx], init_Edata_dict["paths"].astype(object)[init_idx - num_R], query_data_dict["labels"][query_idx], init_Edata_dict["labels"][init_idx - num_R], ] ) wrong_pairs = [] for i in query_point_assignment_array[wrong_label_idx]: query_idx, init_idx = i if init_idx < num_R: wrong_pairs.append( [ query_data_dict["paths"].astype(object)[query_idx], init_Rdata_dict["paths"].astype(object)[init_idx], query_data_dict["labels"][query_idx], init_Rdata_dict["labels"][init_idx], ] ) else: wrong_pairs.append( [ query_data_dict["paths"].astype(object)[query_idx], init_Edata_dict["paths"].astype(object)[init_idx - num_R], query_data_dict["labels"][query_idx], init_Edata_dict["labels"][init_idx - num_R], ] ) with open( os.path.join(root, "logs", "analyzed_query_point_assignments.pkl"), "wb" ) as f: pickle.dump( { "accuracy": accuracy, "same_label_idx": same_label_idx, "wrong_label_idx": wrong_label_idx, "correct_pairs": correct_pairs, "wrong_pairs": wrong_pairs, "query_point_assignment_array": query_point_assignment_array, }, f, ) same_label_image_path = os.path.join(root, "visualization", "same_label_images") wrong_label_image_path = os.path.join(root, "visualization", "wrong_label_images") if not os.path.exists(wrong_label_image_path): os.mkdir(wrong_label_image_path) if not os.path.exists(same_label_image_path): os.mkdir(same_label_image_path) for i in range(n_points_to_copy): query_image_path, init_image_path, query_label, init_label = correct_pairs[i] path_to_copy = os.path.join( same_label_image_path, "i{0}_init_image_querylabel{1}_initlabel{2}.png".format( str(i), str(query_label), str(init_label) ), ) os.system("cp {0} {1}".format(init_image_path, path_to_copy)) path_to_copy2 = os.path.join( same_label_image_path, "i{0}_query_image_querylabel{1}_initlabel{2}.png".format( str(i), str(query_label), str(init_label) ), ) os.system("cp {0} {1}".format(query_image_path, path_to_copy2)) ( w_query_image_path, w_init_image_path, w_query_label, w_init_label, ) = wrong_pairs[i] path_to_copy_w = os.path.join( wrong_label_image_path, "i{0}_init_image_querylabel{1}_initlabel{2}.png".format( str(i), str(w_query_label), str(w_init_label) ), ) os.system("cp {0} {1}".format(w_init_image_path, path_to_copy_w)) path_to_copy_w2 = os.path.join( wrong_label_image_path, "i{0}_query_image_querylabel{1}_initlabel{2}.png".format( i, w_query_label, w_init_label ), ) os.system("cp {0} {1}".format(w_query_image_path, path_to_copy_w2)) return accuracy, correct_pairs, wrong_pairs def _generate_query_sets(version: str, N: int = 5000): """ Generates query sets for qDCA experiment in Section 4.3. :param version: either version1 (dogs vs kitchen utils) or version2 (random). :param N: number of points to sample for R used in DCA. """ with open(f"representations/vgg16/{version}/Rfeatures.pkl", "rb") as f: Rdata_v1 = pickle.load(f) with open(f"representations/vgg16/{version}/Efeatures.pkl", "rb") as f: Edata_v1 = pickle.load(f) init_Ridxs = np.random.choice( np.arange(len(Rdata_v1["feat_lin1"])), size=N, replace=False ) query_Ridxs = np.setdiff1d(np.arange(len(Rdata_v1["feat_lin1"])), init_Ridxs) init_Eidxs = np.random.choice( np.arange(len(Edata_v1["feat_lin1"])), size=N, replace=False ) query_Eidxs = np.setdiff1d(np.arange(len(Edata_v1["feat_lin1"])), init_Eidxs) with open(f"representations/vgg16/{version}/sampled_Rfeatures.pkl", "wb") as f: pickle.dump( { "feat_lin1": Rdata_v1["feat_lin1"][init_Ridxs], "feat_lin2": Rdata_v1["feat_lin2"][init_Ridxs], "labels": Rdata_v1["labels"][init_Ridxs], "paths": np.array(Rdata_v1["paths"])[init_Ridxs], "init_Ridx": init_Ridxs, "query_Ridx": query_Ridxs, }, f, ) with open(f"representations/vgg16/{version}/sampled_Efeatures.pkl", "wb") as f: pickle.dump( { "feat_lin1": Edata_v1["feat_lin1"][init_Eidxs], "feat_lin2": Edata_v1["feat_lin2"][init_Eidxs], "labels": Edata_v1["labels"][init_Eidxs], "paths": np.array(Edata_v1["paths"])[init_Eidxs], "init_Eidx": init_Eidxs, "query_Eidx": query_Eidxs, }, f, ) with open(f"representations/vgg16/{version}/query_features.pkl", "wb") as f: pickle.dump( { "feat_lin1": np.concatenate( [ Rdata_v1["feat_lin1"][query_Ridxs], Edata_v1["feat_lin1"][query_Eidxs], ] ), "feat_lin2": np.concatenate( [ Rdata_v1["feat_lin2"][query_Ridxs], Edata_v1["feat_lin2"][query_Eidxs], ] ), "labels": np.concatenate( [Rdata_v1["labels"][query_Ridxs], Edata_v1["labels"][query_Eidxs]] ), "paths": np.concatenate( [ np.array(Rdata_v1["paths"])[query_Ridxs], np.array(Edata_v1["paths"])[query_Eidxs], ] ), "init_Eidxs": init_Eidxs, "query_Eidxs": query_Eidxs, "init_Ridxs": init_Ridxs, "query_Ridxs": query_Ridxs, }, f, )
36.815574
87
0.571969
0
0
0
0
0
0
0
0
2,036
0.22665
37e640e884ea7efdcb34d9809f129977c3b8f796
2,905
py
Python
back-end/RawFishSheep/app_cart/views.py
Coldarra/RawFishSheep
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
[ "Apache-2.0" ]
null
null
null
back-end/RawFishSheep/app_cart/views.py
Coldarra/RawFishSheep
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
[ "Apache-2.0" ]
4
2021-10-06T22:49:52.000Z
2022-02-27T12:28:18.000Z
back-end/RawFishSheep/app_cart/views.py
Coldarra/RawFishSheep
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
[ "Apache-2.0" ]
null
null
null
from .models import * from decorator import * from app_goods.views import getGoodsByID # 查询当前用户所有的购物车信息 def getCartByUser(user_id=None): if user_id == None: raise ParamException() return Cart.objects.filter(user_id=user_id) def getSelectedCart(user_id=None): if user_id == None: raise ParamException() return Cart.objects.filter(user_id=user_id, selection="1") def getCartByGoods(user_id=None, goods_id=None): if None in [user_id, goods_id]: raise ParamException() if Cart.objects.filter(user_id=user_id, goods_id=goods_id).count() <= 0: raise RFSException("40012", "无效购物车商品") return Cart.objects.get(user_id=user_id, goods_id=goods_id) def checkCartByGoods(user_id, goods_id): return Cart.objects.filter(user_id=user_id, goods_id=goods_id).count() > 0 def createCart(user_id=None, goods_id=None, amount=None): if None in [user_id, goods_id, amount]: raise ParamException() if checkCartByGoods(user_id, goods_id): appendToCart(user_id, goods_id, amount) return Cart.objects.create( user_id=user_id, goods_id=goods_id, amount=amount) def appendToCart(user_id=None, goods_id=None, amount=None): if None in [user_id, goods_id, amount]: raise ParamException() amount = int(amount) if getGoodsByID(goods_id).remain < amount: raise RFSException("40013", "商品余辆不足") if checkCartByGoods(user_id, goods_id): cart_obj = getCartByGoods(user_id, goods_id) cart_obj.amount += amount cart_obj.save() return cart_obj else: return createCart(user_id, goods_id, amount) def deleteCartByGoods(user_id=None, goods_id=None): if None in [user_id, goods_id]: raise ParamException() Cart.objects.filter(user_id=user_id, goods_id=goods_id).delete() def deleteCartByUser(user_id=None): if None in [user_id, goods_id]: raise ParamException() Cart.objects.filter(user_id=user_id).delete() def deleteSelectedCart(user_id=None): if user_id == None: raise ParamException() Cart.objects.filter(user_id=user_id, selection="1").delete() def setCartAmount(user_id=None, goods_id=None, amount=None): if None in [user_id, goods_id, amount]: raise ParamException() amount = int(amount) cart = getCartByGoods(user_id, goods_id) if amount <= 0: raise RFSException("40033", "购物车商品数量非法") cart.amount = amount cart.save() return cart def setCartSelection(user_id=None, goods_id=None, selection=None): # 检测参数是否合法 if None in [user_id, goods_id, selection]: raise ParamException() cart = getCartByGoods(user_id, goods_id) # 检测商品状态是否合法 if cart.selection != "0" and cart.selection != "1": raise RFSException("40033", "状态非法") # 改变商品状态 cart.selection = selection cart.save() return cart
29.05
78
0.685714
0
0
0
0
0
0
0
0
248
0.081767
37e6a1c12c2e7ca4fa6cc0bc35bd20189bfd7063
7,704
py
Python
extensions/catsum.py
johannesgiorgis/my-timewarrior-extensions
1a8b83359298d3cbf002148f02b5ef6f1693a797
[ "MIT" ]
null
null
null
extensions/catsum.py
johannesgiorgis/my-timewarrior-extensions
1a8b83359298d3cbf002148f02b5ef6f1693a797
[ "MIT" ]
1
2022-02-14T16:53:54.000Z
2022-02-14T16:53:54.000Z
extensions/catsum.py
xoiopure/my-timewarrior-extensions
1a8b83359298d3cbf002148f02b5ef6f1693a797
[ "MIT" ]
1
2021-08-29T00:32:18.000Z
2021-08-29T00:32:18.000Z
#!/usr/bin/env python3 ############################################################################### # # Category Summaries # # ############################################################################### import datetime import io import json import logging import pprint import sys from typing import Dict, Any from dateutil import tz # set logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # create handler c_handler = logging.StreamHandler() c_handler.setLevel(logging.INFO) # Create formatters and add it to handlers LOG_FORMAT = "[%(asctime)s - %(levelname)-8s - %(module)s:%(name)s ] %(message)s" c_format = logging.Formatter(LOG_FORMAT) c_handler.setFormatter(c_format) # Add handlers to the logger logger.addHandler(c_handler) DATE_FORMAT = "%Y%m%dT%H%M%SZ" # TODO: Convert to defaultdict # https://www.accelebrate.com/blog/using-defaultdict-python # https://stackoverflow.com/questions/9358983/dictionaries-and-default-values # https://docs.python.org/2/library/collections.html#collections.defaultdict CATEGORIES: dict = { "PT": "Personal Time", "PW": "Planned Work", "UW": "Unplanned Work", "OW": "Other Work", } def main(): print("~" * 100) totals = calculate_totals(sys.stdin) # print(totals) if not totals: sys.exit(0) categories_total = extract_categories(totals) # All Categories Statistics category_percent_breakdown = get_category_percent_breakdown(categories_total) formatted_category_breakdown = format_category_breakdown(category_percent_breakdown) display_category_breakdown(formatted_category_breakdown) # remove personal category categories_total.pop("Personal Time", None) work_category_percent_breakdown = get_category_percent_breakdown(categories_total) formatted_work_category_breakdown = format_category_breakdown(work_category_percent_breakdown) display_category_breakdown(formatted_work_category_breakdown) # formatted_category_breakdown.pop("Personal Time", None) # formatted # print(type(formatted_category_breakdown)) # print(formatted_category_breakdown.keys()) def format_seconds(seconds: int) -> str: """ Convert seconds to a formatted string Convert seconds: 3661 To formatted: " 1:01:01" """ # print(seconds, type(seconds)) hours = seconds // 3600 minutes = seconds % 3600 // 60 seconds = seconds % 60 return f"{hours:4d}:{minutes:02d}:{seconds:02d}" def calculate_totals(input_stream: io.TextIOWrapper) -> Dict[str, datetime.timedelta]: from_zone = tz.tzutc() to_zone = tz.tzlocal() # Extract the configuration settings. header = 1 configuration = dict() body = "" for line in input_stream: if header: if line == "\n": header = 0 else: fields = line.strip().split(": ", 2) if len(fields) == 2: configuration[fields[0]] = fields[1] else: configuration[fields[0]] = "" else: body += line # Sum the seconds tracked by tag totals = dict() untagged = None j = json.loads(body) for object in j: start = datetime.datetime.strptime(object["start"], DATE_FORMAT) if "end" in object: end = datetime.datetime.strptime(object["end"], DATE_FORMAT) else: end = datetime.datetime.utcnow() tracked = end - start if "tags" not in object or object["tags"] == []: if untagged is None: untagged = tracked else: untagged += tracked else: for tag in object["tags"]: if tag in totals: totals[tag] += tracked else: totals[tag] = tracked if "temp.report.start" not in configuration: print("There is no data in the database") return totals start_utc = datetime.datetime.strptime(configuration["temp.report.start"], DATE_FORMAT) start_utc = start_utc.replace(tzinfo=from_zone) start = start_utc.astimezone(to_zone) if "temp.report.end" in configuration: end_utc = datetime.datetime.strptime(configuration["temp.report.end"], DATE_FORMAT) end_utc = end_utc.replace(tzinfo=from_zone) end = end_utc.astimezone(to_zone) else: end = datetime.datetime.now() if len(totals) == 0 and untagged is None: print(f"No data in the range {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}") return totals print(f"\nCategory Summary Data for {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}") return totals def extract_categories(totals: Dict[str, datetime.timedelta]) -> Dict[str, datetime.timedelta]: categories_total = {} for category, category_full_name in CATEGORIES.items(): categories_total[category_full_name] = totals.get(category, datetime.timedelta(0)) return categories_total def get_category_percent_breakdown( category_run_times: Dict[str, datetime.timedelta] ) -> Dict[str, Any]: logger.debug("Getting category percentage breakdown...") total_time = sum([run_time.total_seconds() for run_time in category_run_times.values()]) logger.debug(f"Total Time:{total_time}") category_percentage_breakdown: dict = {} for category, run_time in category_run_times.items(): category_percent = run_time.total_seconds() / total_time category_percentage_breakdown[category] = { "percent": category_percent, "duration": run_time.total_seconds() / 60, "run_time": format_seconds(int(run_time.total_seconds())), } # add total time statistics category_percentage_breakdown["Total"] = { "percent": total_time / total_time, "duration": total_time / 60, "run_time": format_seconds(int(total_time)), } logger.debug(pprint.pformat(category_percentage_breakdown)) return category_percentage_breakdown def format_category_breakdown(category_breakdown: dict) -> Dict[str, Any]: # print(type(category_breakdown)) # pprint.pprint(category_breakdown) formatted_category_breakdown = {} for category, category_statistics in category_breakdown.items(): formatted_category_breakdown[category] = { # convert duration to mins "duration": round(category_statistics["duration"], 2), "percent": round(category_statistics["percent"] * 100, 2), "run_time": category_statistics["run_time"], } return formatted_category_breakdown def display_category_breakdown(category_breakdown: dict, title: str = "Category Breakdown"): # Determine largest width max_width = len("Category") for category_statistics in category_breakdown.values(): if len(category_statistics) > max_width: max_width = len(category_statistics) print_dotted_line() print(f"\t\t{title.capitalize():>{max_width}}") print( f"{'Category':{max_width}}\t" f"{'Duration':{max_width}}\t" f"{'Run_Time':>{max_width + 2}}\t" f"{'Percent':{max_width + 1}}" ) for category, category_statistics in category_breakdown.items(): print( f"{category:{max_width}}\t" f"{category_statistics['duration']:{max_width}}\t" f"{category_statistics['run_time']:}\t" f"{category_statistics['percent']}%" ) print_dotted_line() def print_dotted_line(width: int = 72): """Print a dotted (rather 'dashed') line""" print("-" * width) if __name__ == "__main__": main()
31.57377
98
0.641874
0
0
0
0
0
0
0
0
2,241
0.290888
37e90c8995ed6a6f4dbc2bb7d6d0c967a69b04ab
3,881
py
Python
resources/hotel.py
jnascimentocode/REST-API-COM-PYTHON-E-FLASK
c55dca53f3a864c6c1aba8bbde63dcadc3c19347
[ "MIT" ]
null
null
null
resources/hotel.py
jnascimentocode/REST-API-COM-PYTHON-E-FLASK
c55dca53f3a864c6c1aba8bbde63dcadc3c19347
[ "MIT" ]
null
null
null
resources/hotel.py
jnascimentocode/REST-API-COM-PYTHON-E-FLASK
c55dca53f3a864c6c1aba8bbde63dcadc3c19347
[ "MIT" ]
null
null
null
from typing import ParamSpecArgs from flask_restful import Resource, reqparse from models.hotel import HotelModel from flask_jwt_extended import jwt_required from models.site import SiteModel from resources.filtros import * import sqlite3 path_params = reqparse.RequestParser() path_params.add_argument('cidade', type=str) path_params.add_argument('estrelas_min', type=float) path_params.add_argument('estrelas_max', type=float) path_params.add_argument('diaria_min', type=float) path_params.add_argument('diaria_max', type=float) path_params.add_argument('limit', type=float) path_params.add_argument('offset', type=float) class Hoteis(Resource): def get(self): connection = sqlite3.connect('banco.db') cursor = connection.cursor() dados = path_params.parse_args() dados_validos = {chave:dados[chave] for chave in dados if dados[chave] is not None} parametros = normalize_path_params(**dados_validos) if not parametros.get('cidade'): tupla = tuple([parametros[chave] for chave in parametros]) resultado = cursor.execute(consulta_sem_cidade, tupla) else: tupla = tuple([parametros[chave] for chave in parametros]) resultado = cursor.execute(consulta_com_cidade, tupla) hoteis = [] for linha in resultado: hoteis.append({ 'hotel_id': linha[0], 'nome': linha[1], 'estrelas': linha[2], 'diaria': linha[3], 'cidade': linha[4], 'site_id': linha[5] }) return {'hoteis': hoteis} class Hotel(Resource): argumentos = reqparse.RequestParser() argumentos.add_argument('nome', type=str, required=True, help="The field 'nome' cannot be left blank") argumentos.add_argument('estrelas', type=float, required=True, help="The field 'estrelas' cannot be left blank") argumentos.add_argument('diaria') argumentos.add_argument('cidade') argumentos.add_argument('site_id', type=int, required=True, help="Every hotel needs to be linked with site") def get(self, hotel_id): hotel = HotelModel.find_hotel(hotel_id) if hotel: return hotel.json() return {'message': 'Hotel not found.'}, 404 @jwt_required() def post(self, hotel_id): if HotelModel.find_hotel(hotel_id): return {"message": "Hotel id '{}' already exists.".format(hotel_id)}, 400 dados = Hotel.argumentos.parse_args() hotel = HotelModel(hotel_id, **dados) if not SiteModel.find_by_id(dados.get('site_id')): return {'message': 'The hotel must be associated to a valid site id'}, 400 try: hotel.save_hotel() except: return {'message': 'An internal error occurred trying to save hotel.'}, 500 return hotel.json() @jwt_required() def put(self, hotel_id): dados = Hotel.argumentos.parse_args() hotel_encontrado = HotelModel.find_hotel(hotel_id) if hotel_encontrado: hotel_encontrado.update_hotel(**dados) hotel_encontrado.save_hotel() return hotel_encontrado.json(), 200 hotel = HotelModel(hotel_id, **dados) try: hotel.save_hotel() except: return {'message': 'An internal error occurred trying to save hotel.'}, 500 return hotel.json(), 201 #created @jwt_required() def delete(self, hotel_id): global hoteis hotel = HotelModel.find_hotel(hotel_id) if hotel: try: hotel.delete_hotel() except: return {'message': 'An error occurred trying to delete hotel.'}, 500 return {'message': 'Hotel deleted.'} return {'message': 'Hotel not found.'}, 404
34.345133
116
0.631538
3,249
0.837155
0
0
1,571
0.404793
0
0
681
0.17547
37e97b75428a1033eda5441303e4da93aa132446
221
py
Python
src/wormhole/__main__.py
dmgolembiowski/magic-wormhole
d517a10282d5e56f300db462b1a6eec517202af7
[ "MIT" ]
2,801
2021-01-10T16:37:14.000Z
2022-03-31T19:02:50.000Z
src/wormhole/__main__.py
dmgolembiowski/magic-wormhole
d517a10282d5e56f300db462b1a6eec517202af7
[ "MIT" ]
52
2021-01-10T01:54:00.000Z
2022-03-11T13:12:41.000Z
src/wormhole/__main__.py
dmgolembiowski/magic-wormhole
d517a10282d5e56f300db462b1a6eec517202af7
[ "MIT" ]
106
2021-01-21T14:32:22.000Z
2022-03-18T10:33:09.000Z
from __future__ import absolute_import, print_function, unicode_literals if __name__ == "__main__": from .cli import cli cli.wormhole() else: # raise ImportError('this module should not be imported') pass
27.625
72
0.737557
0
0
0
0
0
0
0
0
67
0.303167
37eaf107409d84d5c2fde68eaa08ffa5c4d85c18
2,413
py
Python
testing/berge_equilibrium_cndp.py
Eliezer-Beczi/CNDP
73decdfaef1c9e546ad94dd7448c89078af27034
[ "MIT" ]
1
2021-08-13T09:14:40.000Z
2021-08-13T09:14:40.000Z
testing/berge_equilibrium_cndp.py
Eliezer-Beczi/CNDP
73decdfaef1c9e546ad94dd7448c89078af27034
[ "MIT" ]
null
null
null
testing/berge_equilibrium_cndp.py
Eliezer-Beczi/CNDP
73decdfaef1c9e546ad94dd7448c89078af27034
[ "MIT" ]
null
null
null
import networkx as nx import utils.connectivity_metrics as connectivity_metric from platypus import NSGAII, EpsMOEA, NSGAIII, EpsNSGAII, Problem, Dominance, Subset, TournamentSelector, \ HypervolumeFitnessEvaluator, Archive import statistics import multiprocessing as mp G = nx.read_adjlist("input/Ventresca/BarabasiAlbert_n500m1.txt") k = 50 num_of_tests = 10 def get_pairwise_connectivity(exclude=None): if exclude is None: exclude = {} S = set(exclude) subgraph = nx.subgraph_view(G, filter_node=lambda n: n not in S) return connectivity_metric.pairwise_connectivity(subgraph) class CNDP(Problem): def __init__(self): super(CNDP, self).__init__(1, 1) self.types[:] = Subset(list(G), k) def evaluate(self, solution): solution.objectives[0] = get_pairwise_connectivity(solution.variables[0]) class BergeDominance(Dominance): def __init__(self): super(BergeDominance, self).__init__() def compare(self, x, y): k1 = 0 k2 = 0 nodes_x = x.variables[0][:] nodes_y = y.variables[0][:] metric_x = x.objectives[0] metric_y = y.objectives[0] for i in range(k): tmp = nodes_y[i] nodes_y[i] = nodes_x[i] if get_pairwise_connectivity(nodes_y) < metric_x: k1 += 1 nodes_y[i] = tmp for i in range(k): tmp = nodes_x[i] nodes_x[i] = nodes_y[i] if get_pairwise_connectivity(nodes_x) < metric_y: k2 += 1 nodes_x[i] = tmp if k1 < k2: return -1 elif k1 > k2: return 1 else: return 0 class BergeArchive(Archive): def __init__(self): super(BergeArchive, self).__init__(dominance=BergeDominance()) def get_critical_nodes(): algorithm = NSGAII(CNDP(), selector=TournamentSelector(dominance=BergeDominance()), archive=BergeArchive()) algorithm.run(1000) fitness = algorithm.result[0].objectives[0] print(fitness) return fitness if __name__ == '__main__': pool = mp.Pool(mp.cpu_count()) samples = pool.starmap_async(get_critical_nodes, [() for _ in range(num_of_tests)]).get() pool.close() avg = sum(samples) / len(samples) stdev = statistics.stdev(samples) print(f"Average: {avg}") print(f"Standard Deviation: {stdev}")
25.135417
111
0.63075
1,216
0.503937
0
0
0
0
0
0
100
0.041442