hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b396d7ef57b81a9da0897b5335821bdb75365f3
| 703
|
py
|
Python
|
final_project/machinetranslation/tests/test_translator.py
|
iamnikaa/xzceb-flask_eng_fr
|
7bfcfc14c301b783cd2fa12b77970d8dcd11e02c
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/tests/test_translator.py
|
iamnikaa/xzceb-flask_eng_fr
|
7bfcfc14c301b783cd2fa12b77970d8dcd11e02c
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/tests/test_translator.py
|
iamnikaa/xzceb-flask_eng_fr
|
7bfcfc14c301b783cd2fa12b77970d8dcd11e02c
|
[
"Apache-2.0"
] | null | null | null |
"""Unittest module for testing translator module"""
import unittest
from translator import englishToFrench
from translator import frenchToEnglish
class TranslatorTest(unittest.TestCase):
"""Test functions for english to french and french to english translator functions"""
def test_englishToFrench(self):
"""Tests english_to_french function"""
self.assertNotEqual(englishToFrench("Hello"), "")
self.assertEqual(englishToFrench("Hello"), "Bonjour")
def test_frenchToEnglish(self):
"""Tests french_to_english function"""
self.assertNotEqual(frenchToEnglish("Hello"), "")
self.assertEqual(frenchToEnglish("Bonjour"), "Hello")
unittest.main()
| 35.15
| 89
| 0.72973
|
cc8d41d87b3706845606fa4c1c65216f03301cb9
| 1,870
|
py
|
Python
|
StringMatching.py
|
tasselcui/misc
|
4dfe0d9386b472a574d419492df0cdc9dedb164a
|
[
"MIT"
] | null | null | null |
StringMatching.py
|
tasselcui/misc
|
4dfe0d9386b472a574d419492df0cdc9dedb164a
|
[
"MIT"
] | null | null | null |
StringMatching.py
|
tasselcui/misc
|
4dfe0d9386b472a574d419492df0cdc9dedb164a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 21:30:42 2018
@author: lenovo
"""
P = 'ababaca'
T = 'cuiababacababaca'
# 3 and 9 shifts
#------------------------------------------------------------------------------
def Naive(T, P):
n = len(T)
m = len(P)
for s in range(n - m + 1):
if P == T[s : s + m]:
print(P,' occurs in ', T, ' with ', s, ' shifts')
#------------------------------------------------------------------------------
def RabinKarp(T, P, d, q):
n = len(T)
m = len(P)
h = pow(d, m - 1) % q
p = 0
t = 0
for i in range(m):
p = (d * p + ord(P[i]) - 97) % q
t = (d * t + ord(T[i]) - 97) % q
for s in range(n - m + 1):
if p == t:
if P == T[s : s + m]:
print(P,' occurs in ', T, ' with ', s, ' shifts')
if s < n - m:
t = (d * (t - h * (ord(T[s]) - 97)) + (ord(T[s + m]) - 97)) % q
#------------------------------------------------------------------------------
def compute(P):
m = len(P)
pi = [0 for i in range(m)]
k = -1
for q in range(1, m):
while k > -1 and P[k + 1] != P[q]:
k = pi[k]
if P[k + 1] == P[q]:
k += 1
k += 1
pi[q] = k
print(k)
return pi
#------------------------------------------------------------------------------
def KMP(T, P):
n = len(T)
m = len(P)
pi = compute(P)
q = 0
for i in range(n):
while q > 0 and T[q + 1] != T[i]:
q = pi[q]
if T[q + 1] == T[i]:
q += 1
if q == m - 1:
print(P,' occurs in ', T, ' with ', i - m + 2, ' shifts')
q = pi[q]
#Naive(T, P)
#RabinKarp(T, P, 26, 13)
test = compute(P)
#KMP(T, P)
| 26.714286
| 80
| 0.28877
|
ef71e4a404c2846a5bd9c00b112836b1c4378c11
| 1,220
|
py
|
Python
|
perfkitbenchmarker/scripts/object_storage_api_test_scripts/gcs.py
|
msidana/PerfKitBenchmarker
|
2784642d3e6b20b3f474c4e27edb1ef163804f66
|
[
"Apache-2.0"
] | 1
|
2018-08-28T19:33:21.000Z
|
2018-08-28T19:33:21.000Z
|
perfkitbenchmarker/scripts/object_storage_api_test_scripts/gcs.py
|
msidana/PerfKitBenchmarker
|
2784642d3e6b20b3f474c4e27edb1ef163804f66
|
[
"Apache-2.0"
] | 1
|
2021-03-26T00:41:05.000Z
|
2021-03-26T00:41:05.000Z
|
perfkitbenchmarker/scripts/object_storage_api_test_scripts/gcs.py
|
msidana/PerfKitBenchmarker
|
2784642d3e6b20b3f474c4e27edb1ef163804f66
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface to Google Cloud Storage, using the boto library."""
import time
from absl import flags
import boto_service
import gcs_oauth2_boto_plugin # noqa
FLAGS = flags.FLAGS
class GCSService(boto_service.BotoService):
def __init__(self):
super(GCSService, self).__init__('gs', host_to_connect=None)
def WriteObjectFromBuffer(self, bucket, object, stream, size):
stream.seek(0)
start_time = time.time()
object_uri = self._StorageURI(bucket, object)
object_uri.set_contents_from_file(stream, size=size)
latency = time.time() - start_time
return start_time, latency
| 32.105263
| 74
| 0.759016
|
12d392a9f839c3bb37aa64da265c1d6d4d7f5aed
| 7,552
|
py
|
Python
|
train.py
|
jmnickerson05/Udacity_ImageClassifier_CLI
|
9305db86ed3789e187548d52197e7c7c857e3701
|
[
"MIT"
] | null | null | null |
train.py
|
jmnickerson05/Udacity_ImageClassifier_CLI
|
9305db86ed3789e187548d52197e7c7c857e3701
|
[
"MIT"
] | null | null | null |
train.py
|
jmnickerson05/Udacity_ImageClassifier_CLI
|
9305db86ed3789e187548d52197e7c7c857e3701
|
[
"MIT"
] | null | null | null |
from workspace_utils import active_session
import torch
from torch import nn, optim
from torchvision import datasets, transforms, models
import os, copy, time, json, argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_directory')
parser.add_argument('--save_dir', default='.')
parser.add_argument('--learning_rate', default=0.01)
parser.add_argument('--epochs', default=25)
parser.add_argument('--gpu', default=True)
# NOPE -- Not sure I would just change models on the fly in the real world.
# parser.add_argument('--arch', default='vgg16')
# IS THIS NEEDED?
# parser.add_argument('--hidden_units', default=512)
global args
args = parser.parse_args()
model = initialize_model(num_classes=102, feature_extract=True)[0]
train_and_save(model)
# Adapted From: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(num_classes, feature_extract, use_pretrained=True):
model_ft = None
input_size = 0
model_ft = models.vgg16_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
input_size = 224
return model_ft, input_size
# Adapted From: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
device = torch.device("cuda:0" if (torch.cuda.is_available() and args.gpu is True) else "cpu")
print(device)
model.to(device)
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'valid']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4 * loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'valid':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
# Adapted From: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
def train_and_save(model):
with active_session():
data_dir = args.data_directory
data_transforms = {'train': transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'test': transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'test', 'valid']}
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=8, shuffle=True,
num_workers=4) for x in ['train', 'valid']}
feature_extract = True
params_to_update = model.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model.named_parameters():
if param.requires_grad == True:
print("\t", name)
optimizer_ft = optim.SGD(params_to_update, lr=args.learning_rate, momentum=0.9)
criterion = nn.CrossEntropyLoss()
model, hist = train_model(model, dataloaders_dict,
num_epochs=args.epochs,
criterion=criterion,
optimizer=optimizer_ft)
# Uncomment this to save the model during an unattended run
torch.save(model, f'{args.save_dir}/cli_checkpoint.pth')
main()
| 44.163743
| 124
| 0.521319
|
2d17582f56c15c4883c16e11279f690712571482
| 833
|
py
|
Python
|
CCC-2019/WaterlooWinningScoreProblem2019J1.py
|
akkik04/University-of-Waterloo-CCC-Solutions
|
c5398a0aff36ccfc472413390a293db20637a5be
|
[
"MIT"
] | 3
|
2021-06-26T21:47:35.000Z
|
2021-08-25T01:37:11.000Z
|
CCC-2019/WaterlooWinningScoreProblem2019J1.py
|
akkik04/University-of-Waterloo-CCC-Solutions
|
c5398a0aff36ccfc472413390a293db20637a5be
|
[
"MIT"
] | null | null | null |
CCC-2019/WaterlooWinningScoreProblem2019J1.py
|
akkik04/University-of-Waterloo-CCC-Solutions
|
c5398a0aff36ccfc472413390a293db20637a5be
|
[
"MIT"
] | null | null | null |
# 2019 CCC PROBLEM J1'S SOLUTION:
# declaring variables to store input for the apples team.
three_point_apples = int(input())
two_point_apples = int(input())
one_point_apples = int(input())
# declaring variables to store input for the bananas team.
three_point_bananas = int(input())
two_point_bananas = int(input())
one_point_bananas = int(input())
# arithmetic calculation to determine the sum of points for both of the teams.
sum_points_apple = (3 * three_point_apples) + (2 * two_point_apples) + one_point_apples
sum_points_bananas = (3 * three_point_bananas) + (2 * two_point_bananas) + one_point_bananas
# if-statement to determine which team has more points.
if sum_points_apple > sum_points_bananas:
print("A")
elif sum_points_bananas > sum_points_apple:
print("B")
else:
print("T")
| 33.32
| 93
| 0.735894
|
cbf5638ab4dba5f4bfc8f4433c8d6b173b7a80a1
| 47,105
|
py
|
Python
|
tensorflow/python/keras/distribute/distributed_training_utils.py
|
Caochuanhao/tensorflow
|
9aaf74d733a38cf587a75f2ffaa05d8a51d8c32b
|
[
"Apache-2.0"
] | 1
|
2019-07-18T09:16:43.000Z
|
2019-07-18T09:16:43.000Z
|
tensorflow/python/keras/distribute/distributed_training_utils.py
|
Caochuanhao/tensorflow
|
9aaf74d733a38cf587a75f2ffaa05d8a51d8c32b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/distribute/distributed_training_utils.py
|
Caochuanhao/tensorflow
|
9aaf74d733a38cf587a75f2ffaa05d8a51d8c32b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
# pylint:disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
def set_weights(distribution_strategy, dist_model, weights):
"""Sets the weights of the replicated models.
The weights of the replicated models are set to the weights of the original
model. The weights of the replicated model are Mirrored variables and hence
we need to use the `update` call within a DistributionStrategy scope.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
dist_model: The replicated models on the different devices.
weights: The weights of the original model.
"""
assign_ops = []
for layer in dist_model.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
if ops.executing_eagerly_outside_functions():
sw.assign(w)
else:
assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))
weights = weights[num_param:]
if not ops.executing_eagerly_outside_functions():
K.get_session(assign_ops).run(assign_ops)
def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs,
grouped_updates=None, grouped_session_args=None,
with_loss_tensor=False):
"""Unwrap the list of values contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of values on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_inputs: PerReplica inputs returned from the train or test function
that we ran on each device.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
grouped_updates: PerReplica updates returned from the train or test function
that we ran on each device.
grouped_session_args: PerReplica session args returned from the train or
test function that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica parameters.
"""
# Unwrap per device values returned from each model's train function.
# This will be used to construct the main train function.
all_inputs = flatten_per_replica_values(distribution_strategy,
grouped_inputs)
all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs,
with_loss_tensor)
if grouped_updates:
all_updates = flatten_per_replica_values(distribution_strategy,
grouped_updates)
else:
all_updates = None
all_session_args = {}
if grouped_session_args:
grouped_feed_dict = grouped_session_args.get('feed_dict')
if grouped_feed_dict:
all_session_args['feed_dict'] = flatten_per_replica_values(
distribution_strategy, grouped_feed_dict)
grouped_fetches = grouped_session_args.get('fetches')
if grouped_fetches:
all_session_args['fetches'] = flatten_per_replica_values(
distribution_strategy, grouped_fetches)
# TODO(priyag): Return only non empty/None values
return all_inputs, all_outputs, all_updates, all_session_args
def unwrap_outputs(distribution_strategy, grouped_outputs,
with_loss_tensor=False):
"""Unwrap the list of outputs contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of outputs on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica outputs.
"""
if not with_loss_tensor:
return flatten_per_replica_values(distribution_strategy,
grouped_outputs)
if not isinstance(grouped_outputs, list):
grouped_outputs = [grouped_outputs]
# reduce loss tensor before adding it to the list of fetches
loss = distribution_strategy.reduce(reduce_util.ReduceOp.SUM,
grouped_outputs[0], axis=None)
all_outputs = flatten_per_replica_values(distribution_strategy,
grouped_outputs[1:])
if (is_tpu_strategy(distribution_strategy) and
ops.executing_eagerly_outside_functions()):
# Choose 1 value per replica in the TPU case since all replicas produce the
# same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the graph
# code path as well.
all_outputs = all_outputs[::distribution_strategy.num_replicas_in_sync]
return [loss] + all_outputs
def flatten_per_replica_values(distribution_strategy, per_replica_values):
"""Unwraps and flattens a nest of PerReplica parameters.
PerReplica values have one value associated with each device. Each entry in
the PerReplica dict has a device `key` and the corresponding value on the
device as the `value`. In this function we take a PerReplica value or a list
of PerReplica values and return all the values in the PerReplica dict.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
per_replica_values: List of PerReplica object or a single PerReplica object.
Returns:
List of values of all the PerReplica objects.
"""
# pylint: disable=g-complex-comprehension
# This function takes a PerReplica object or a list of PerReplica objects and
# returns all the values associated with it.
return [e for flattened in nest.flatten(per_replica_values)
for e in distribution_strategy.unwrap(flattened)]
def validate_callbacks(input_callbacks, optimizer):
"""Validate whether given callbacks are supported by DistributionStrategy.
Args:
input_callbacks: List of callbacks passed by the user to fit.
optimizer: Optimizer instance used to train the model.
Raises:
ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the
callbacks passed.
ValueError: If `histogram_freq` or `write_grads` is one of the parameters
passed as part of the TensorBoard callback.
"""
if input_callbacks:
for callback in input_callbacks:
if isinstance(callback, (callbacks.LearningRateScheduler,
callbacks.ReduceLROnPlateau)):
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError('You must specify a Keras Optimizer V2 when using '
'%s callback with DistributionStrategy.' % callback)
# If users want to use the TensorBoard callback they cannot use certain
# features of the callback that involve accessing model attributes and
# running ops.
if isinstance(callback, callbacks.TensorBoard):
if getattr(callback, 'histogram_freq', False):
logging.warning(
UserWarning(
'`histogram_freq` in the TensorBoard callback is not '
'supported when using DistributionStrategy. Setting '
'`histogram_freq` to `0`.'))
callback.histogram_freq = 0
if getattr(callback, 'write_grads', False):
logging.warning(
UserWarning(
'`write_grads` in the TensorBoard callback is not supported '
'when using DistributionStrategy. Setting `write_grads` '
'to `False`.'))
callback.histogram_freq = False
def validate_distributed_dataset_inputs(distribution_strategy, x, y,
sample_weights=None):
"""Validate all the components of a DistributedValue Dataset input.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`/`evaluate`.
x: Input Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. x can also be a tuple or dict. The keys of the
dict should match the names of the input layers of the model.
y: Target Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. y can also be a tuple or dict. The keys of the
dict should match the names of the output layers of the model.
sample_weights: Sample weights Dataset DistributedValue object. For example,
when we use `MirroredStrategy` this is a PerReplica object with a tensor
for each device set in the dict.
Returns:
The unwrapped values list of the x and y DistributedValues inputs.
Raises:
ValueError: If x and y do not have support for being evaluated as tensors.
or if x and y contain elements that are not tensors or if x and y
contain elements that have a shape or dtype mismatch.
"""
# If the input and target used to call the model are not dataset tensors,
# we need to raise an error. When using a DistributionStrategy, the input
# and targets to a model should be from a `tf.data.Dataset`.
# If each element of x and y are not tensors, we cannot standardize and
# validate the input and targets.
x_values_list = validate_per_replica_inputs(distribution_strategy, x)
if y is not None:
y_values_list = validate_per_replica_inputs(distribution_strategy, y)
else:
y_values_list = None
if sample_weights is not None:
sample_weights_list = validate_per_replica_inputs(distribution_strategy,
sample_weights)
else:
sample_weights_list = None
# Return the unwrapped values to avoid calling `unwrap` a second time.
return x_values_list, y_values_list, sample_weights_list
def validate_per_replica_inputs(distribution_strategy, x):
"""Validates PerReplica dataset input list.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`, `evaluate` and `predict`.
x: A list of PerReplica objects that represent the input or
target values.
Returns:
List containing the first element of each of the PerReplica objects in
the input list.
Raises:
ValueError: If any of the objects in the `per_replica_list` is not a tensor.
"""
# Convert the inputs and targets into a list of PerReplica objects.
per_replica_list = nest.flatten(x)
x_values_list = []
for x in per_replica_list:
if not tensor_util.is_tensor(x):
raise ValueError('Dataset input to the model should be tensors instead '
'they are of type {}'.format(type(x)))
# At this point both x and y contain tensors in the `DistributedValues`
# structure.
x_values = distribution_strategy.unwrap(x)
if not context.executing_eagerly():
# Validate that the shape and dtype of all the elements in x are the same.
validate_all_tensor_shapes(x, x_values)
validate_all_tensor_types(x, x_values)
x_values_list.append(x_values[0])
return x_values_list
def validate_all_tensor_types(x, x_values):
x_dtype = x_values[0].dtype
for i in range(1, len(x_values)):
if x_dtype != x_values[i].dtype:
raise ValueError('Input tensor dtypes do not match for distributed tensor'
' inputs {}'.format(x))
def validate_all_tensor_shapes(x, x_values):
# Validate that the shape of all the elements in x have the same shape
x_shape = x_values[0].shape.as_list()
for i in range(1, len(x_values)):
if x_shape != x_values[i].shape.as_list():
raise ValueError('Input tensor shapes do not match for distributed tensor'
' inputs {}'.format(x))
def _wait_for_variable_initialization(session):
"""Utility to wait for variables to be initialized."""
all_variables = K._get_variables(K.get_graph()) # pylint: disable=protected-access
candidate_vars = []
for v in all_variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if not candidate_vars:
return
while True:
is_initialized = session.run(
[variables.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True # pylint: disable=protected-access
if not uninitialized_vars:
break
def init_restore_or_wait_for_variables():
"""Initialize or restore variables or wait for variables to be initialized."""
session = K._get_session() # pylint: disable=protected-access
if not multi_worker_util.has_worker_context(
) or multi_worker_util.should_load_checkpoint():
# TODO(yuefengz): if checkpoints exist, restore from checkpoint.
K._initialize_variables(session) # pylint: disable=protected-access
else:
_wait_for_variable_initialization(session)
def validate_inputs(x, y):
"""Validate inputs when using DistributionStrategy.
Args:
x: Model Inputs.
y: Model Targets.
Raises:
ValueError: if input is not a Dataset or a numpy array(when we use
MirroredStrategy).
"""
if (isinstance(x, iterator_ops.Iterator) or
isinstance(y, iterator_ops.Iterator)):
raise ValueError('`DistributionStrategy` does not support inputs of type '
'Iterator. You must pass a `tf.data.Dataset` object or a '
'numpy array as input.')
# TODO(b/118776054): Currently we support global batch size for TPUStrategy and
# core MirroredStrategy only. Remove this check when contrib MirroredStrategy is
# no longer needed.
def global_batch_size_supported(distribution_strategy):
return distribution_strategy.extended._global_batch_size # pylint: disable=protected-access
# TODO(sourabhbajaj): Remove this once we use the same API for all strategies.
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return (strategy is not None and
strategy.__class__.__name__.startswith('TPUStrategy'))
def is_dataset_shape_fully_defined(dataset):
"""Returns whether a dataset contains a final partial batch."""
shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))
unknown_shapes = [s for s in shapes if not s.is_fully_defined()]
return not unknown_shapes
def process_batch_and_step_size(
strategy, inputs, batch_size, steps_per_epoch, mode):
"""Process the batch size and step size based on input and dist strategy."""
first_x_value = nest.flatten(inputs)[0]
if isinstance(first_x_value, np.ndarray):
# Until support for partial batch is implemented across all
# functions and distribution strategy, we pass `mode` to selectively
# relax the constraint to consume all the training samples.
steps_per_epoch, batch_size = get_input_params(strategy,
first_x_value,
steps_per_epoch,
batch_size,
mode=mode)
return batch_size, steps_per_epoch
def get_input_params(distribution_strategy, first_x_value, steps, batch_size,
mode=None):
"""Calculate the number of batches and steps/steps_per_epoch.
Args:
distribution_strategy: The DistributionStrategy used to compile the model.
first_x_value: This is the first input numpy array that is passed in as the
model input.
steps: The specified number of steps.
batch_size: The specified batch_size.
mode: ModeKey representing whether input will be used for training,
evaluation, or prediction. This is used to relax the constraints on
consuming all the training samples to keep compatibility till we
support partial batches. If none, then partial batches are not allowed.
Returns:
steps: The steps or steps_per_epoch argument depending on if a user is
calling `fit`, `evaluate` or `predict`. If the is_training flag is set
we don't require the number of samples to be used completely.
batch_size: The batch size to be used in model iterations.
Raises:
ValueError: If the number of batches or steps evaluates to 0.
"""
num_samples = first_x_value.shape[0]
# TODO(b/118776054): Use global batch size for Keras/DS support.
# Currently this is only supported in TPUStrategy and CoreMirroredStrategy.
use_per_replica_batch = not global_batch_size_supported(
distribution_strategy)
# TODO(b/128995245): In eager mode, uneven batch sizes are allowed except for
# `fit()` on TPUStrategy.
# In graph mode, the zero batch case in batch norm is not handled due to
# XLA-GPU regression. Uneven batch sizes are not allowed except
# for `test()` and `predict()` on TPUStrategy.
if context.executing_eagerly():
allow_partial_batch = (mode != ModeKeys.TRAIN or
not is_tpu_strategy(distribution_strategy))
else:
allow_partial_batch = (mode == ModeKeys.TRAIN or
((mode == ModeKeys.PREDICT or mode == ModeKeys.TEST)
and is_tpu_strategy(distribution_strategy)))
if steps is None:
if batch_size is None:
# If neither the batch size or number of steps are set. We choose the
# global batch size as the minimum of number of samples and 32. 32 is
# chosen to provide backward compatibility.
global_batch_size = min(num_samples, 32)
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
if allow_partial_batch:
steps = np.ceil(num_samples / global_batch_size).astype(int)
else:
if num_samples % global_batch_size:
raise ValueError('The number of samples %s is not divisible by '
'batch size %s.' % (num_samples, global_batch_size))
steps = num_samples // global_batch_size
else:
if batch_size is None:
# We calculate the batch size based on the number of steps specified
if num_samples % steps:
raise ValueError('The number of samples %s is not divisible by '
'steps %s. Please change the number of steps to a '
'value that can consume all the samples' % (
num_samples, steps))
global_batch_size = num_samples // steps
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
min_num_samples = global_batch_size * steps
if allow_partial_batch:
min_num_samples = global_batch_size * (steps-1) + 1 if steps > 1 else 0
if num_samples < min_num_samples:
raise ValueError('Number of samples %s is less than samples required '
'for specified batch_size %s and steps %s' % (
num_samples, global_batch_size, steps))
# We need to return the per replica or global batch size based on the strategy
if use_per_replica_batch:
if global_batch_size % distribution_strategy.num_replicas_in_sync:
raise ValueError(
'The batch size (%s) could not be sharded evenly across the sync '
'replicas (%s) in the distribution strategy.' % (
global_batch_size, distribution_strategy.num_replicas_in_sync))
batch_size = global_batch_size // distribution_strategy.num_replicas_in_sync
else:
batch_size = global_batch_size
return steps, batch_size
def get_batch_dimension(iterator):
shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(iterator))
# Take the batch size from the first element, as it should be the same for
# all.
dims = shapes[0].dims
return dims[0] if dims else None
def get_iterator(dataset, distribution_strategy):
with distribution_strategy.scope():
iterator = distribution_strategy.make_dataset_iterator(dataset)
initialize_iterator(iterator, distribution_strategy)
return iterator
def initialize_iterator(iterator, distribution_strategy):
with distribution_strategy.scope():
init_op = control_flow_ops.group(iterator.initialize())
if not context.executing_eagerly():
K.get_session((init_op,)).run(init_op)
def _get_input_from_iterator(iterator, model):
"""Get elements from the iterator and verify the input shape and type."""
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around that. Empty
# elements are going to get filtered out as part of the flattening.
if len(nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(nest.flatten(next_element)) == (len(model.inputs) +
len(model.outputs)):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
validate_distributed_dataset_inputs(
model._distribution_strategy, x, y, sample_weights)
return x, y, sample_weights
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Arguments:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
strategy = model._distribution_strategy
inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)
if is_tpu_strategy(strategy):
if sample_weights is not None:
raise ValueError('TPUStrategy does not support sample weights.')
# When the inputs are dict, then we want to flatten it in the same order as
# the input layers, such that the data are fed into the input layers in the
# correct order.
if isinstance(inputs, dict):
inputs = [inputs[key] for key in model._feed_input_names]
if is_distributing_by_cloning(model):
inputs = flatten_per_replica_values(strategy, inputs)
targets = flatten_per_replica_values(strategy, targets)
# Expand 1-dimensional inputs.
# TODO(b/124535720): Remove once this standarize data logic is shared with
# main flow.
inputs, targets = nest.map_structure(
training_utils.standardize_single_array, (inputs, targets))
else:
inputs = training_utils.ModelInputs(inputs).as_list()
if mode == ModeKeys.PREDICT:
sample_weights = []
targets = []
elif sample_weights is not None and is_distributing_by_cloning(model):
if context.executing_eagerly() and not model._compile_distribution:
raise NotImplementedError('`sample_weight` is not supported when using '
'tf.distribute.Strategy in eager mode and '
'cloning=True.')
sample_weights = flatten_per_replica_values(strategy, sample_weights)
ins = [inputs, targets, sample_weights]
return tuple(ins)
def is_distributing_by_cloning(model):
"""Decide whether this model is going to be distributed via cloning.
We are going to distribute the model by cloning if the user has signaled
that intent by setting `cloning=True` in `Model.compile()` unless we are in
graph mode.
Args:
model: Keras model to distribute.
Returns:
True if the `model` is going to be distributed using cloning and False
otherwise.
"""
if (is_tpu_strategy(model._distribution_strategy) and
context.executing_eagerly): # b/137580852
return False
elif ops.executing_eagerly_outside_functions():
return bool(model._compile_distribution)
return True
def _custom_compile_for_predict(model):
"""Custom compile for TPU predict mode."""
if not model.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
model._is_compiled = True
model.total_loss = None
model.train_function = None
model.test_function = None
model.predict_function = None
def _build_network_on_replica(model, mode, inputs=None, targets=None):
"""Build an updated model on replicas.
We create a new Keras model while sharing the variables from the old graph.
Building a new sub-graph is required since the original keras model creates
placeholders for the input and the output that are not accessible till we
call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.
The sharing of weights and layers between the old and the new model gaurantee
that we're using Strategy variables and any updates on either model are
reflected correctly in callbacks and loop iterations.
We need to make sure we share the optimizers between the old and the new model
as well so that optimizer state is not lost if the user is running fit
multiple times.
Args:
model: Model to be replicated across Replicas
mode: Which of fit/eval/predict is building the distributed network
inputs: Input variables to be passed to the model
targets: Target tensor to be passed to model.compile
Returns:
A new model with shared layers with the old model.
"""
# Need to do imports here since we run into a circular dependency error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.engine import sequential # pylint: disable=g-import-not-at-top
# We rely on the internal methods to avoid having share_weights weights in the
# public API.
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(
model, input_tensors=inputs, layer_fn=models.share_weights)
else:
updated_model = models._clone_functional_model(
model, input_tensors=inputs, layer_fn=models.share_weights)
# Callable losses added directly to a functional Model need to be added
# here.
updated_model._callable_losses = model._callable_losses
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
updated_model.outputs = [_upcast_low_precision_outputs(o)
for o in updated_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(updated_model)
else:
updated_model.compile(
model.optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return updated_model
def _build_distributed_network(model, strategy, mode, inputs=None,
targets=None):
"""Create a cloned model on each replica."""
with K.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_build_network_on_replica,
args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
def _clone_and_build_model(model, mode, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
cloned_model.outputs = [_upcast_low_precision_outputs(o)
for o in cloned_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(cloned_model)
else:
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return cloned_model
def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None):
"""Create a cloned model on each replica."""
with K.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_clone_and_build_model, args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
if mode == ModeKeys.TRAIN:
model._make_callback_model(distributed_model)
def _make_execution_function(model, mode):
"""Makes or reuses function to run one step of distributed model execution."""
if is_distributing_by_cloning(model):
return _make_execution_function_with_cloning(model, mode)
distributed_function = get_distributed_function(model, mode)
if distributed_function:
return distributed_function
distribution_function = _make_execution_function_without_cloning(model, mode)
set_distributed_function(model, mode, distribution_function)
return distribution_function
def _make_execution_function_without_cloning(model, mode):
"""Creates a function to run one step of distributed model execution."""
strategy = model._distribution_strategy
with strategy.scope():
per_replica_function = _make_replica_execution_function(model, mode)
def distributed_function(input_fn):
"""A single step of the distributed execution across replicas."""
x, y, sample_weights = input_fn()
# Call `Model.{train,test,predict}_on_batch` on every replica passing
# PerReplicas as arguments. On every replica inside this call, each
# PerReplica object will return the value for that replica. The outputs
# are PerReplicas too.
outputs = strategy.experimental_run_v2(
per_replica_function, args=(x, y, sample_weights))
# Out of PerReplica outputs reduce or pick values to return.
all_outputs = unwrap_outputs(
strategy, outputs, with_loss_tensor=(mode != ModeKeys.PREDICT))
return all_outputs
if not model.run_eagerly:
distributed_function = def_function.function(distributed_function)
def execution_function(input_fn):
# `numpy` translates Tensors to values in Eager mode.
return [out.numpy() for out in distributed_function(input_fn)]
else:
execution_function = distributed_function
return execution_function
def _make_replica_execution_function(model, mode):
"""A single step of the distributed execution on a replica."""
if mode == ModeKeys.TRAIN:
func = model.train_on_batch
elif mode == ModeKeys.TEST:
func = model.test_on_batch
else:
def predict_on_batch(x, y=None, sample_weights=None):
del y, sample_weights
return model.predict_on_batch(x)
func = predict_on_batch
if mode != ModeKeys.PREDICT:
# `reset_metrics` is set to False to maintain stateful metrics across
# batch-level calls.
func = functools.partial(func, reset_metrics=False)
return func
def _make_replicated_models_with_cloning(model, mode):
"""Build models on each replica."""
strategy = model._distribution_strategy
# If distributed_model is not built, create one for `mode`.
if model._compile_distribution:
clone_model_on_replicas(model, strategy, mode)
else:
_build_distributed_network(model, strategy, mode)
def _make_execution_function_with_cloning(model, mode):
"""Clones or re-uses models to run one step of distributed model execution."""
distributed_model = get_distributed_model(model, mode)
# TODO(b/134069401): Create a cache for the distributed model and exec
# function that incorporates additional attributes to be part of the cache key
# than just the mode.
# If distributed model for a particular `mode` is already built, use the
# `_distribution_function` on that distributed model.
# If you have updated the sample_weight_mode on the model, then you will need
# to recompile metrics and recreate the execution function. This is indicated
# by the `_recompile_exec_function` property.
if (distributed_model and hasattr(distributed_model, '_distribution_function')
and not (hasattr(distributed_model, '_recompile_exec_function') and
distributed_model._recompile_exec_function)):
return distributed_model._distributed_function
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
assert distributed_model
# Also create an execution fuction on that distributed model.
if context.executing_eagerly():
distributed_function = _make_eager_execution_function(model, mode)
else:
distributed_function = _make_graph_execution_function(model, mode)
# We cache the distributed execution function on the model since creating
# distributed models and execution functions are expensive.
distributed_model._distributed_function = distributed_function
distributed_model._recompile_exec_function = False
return distributed_function
def _make_graph_execution_function(model, mode):
"""Makes function to run one step of distributed model in graph mode."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)
strategy = model._distribution_strategy
with strategy.scope():
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
# Initialize the variables in the replicated model. This is necessary for
# multi-worker training because on some workers, initialization is not
# needed. This method does initialization or waiting for initialization
# according to the context object of distribute coordinator.
init_restore_or_wait_for_variables()
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates, all_session_args) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
with_loss_tensor=(mode != ModeKeys.PREDICT))
return K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_{}_function'.format(mode),
**all_session_args)
def _make_eager_execution_function(model, mode):
"""Makes function to run one step of distributed model eager execution."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs)
# NOTE(priyag): Try creating a new FuncGraph within DS scope instead of using
# the global one.
strategy = model._distribution_strategy
global_graph = K.get_graph()
with global_graph.as_default(), strategy.scope():
# First we gather the relevant portions of the model across all replicas.
# `K._scratch_graph(global_graph)` signals to Keras that it should not
# lift to a separate graph when creating the per-replica functions.
with K._scratch_graph(global_graph):
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
grouped = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
grouped_inputs, grouped_outputs = grouped
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of
# inputs/outputs on all the devices over which the model is distributed.
(all_inputs, all_outputs, _, _) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
with_loss_tensor=(mode != ModeKeys.PREDICT))
# Finally, a joint Keras function is created; this one will be created in
# a separate FuncGraph.
return K.function(
all_inputs,
all_outputs,
name='eager_distributed_{}_function'.format(mode))
def _copy_weights_to_distributed_model(original_model, mode):
"""Copies weights from original model to distributed models."""
strategy = original_model._distribution_strategy
distributed_model = get_distributed_model(original_model, mode)
if strategy:
# Copy the weights from the original model to each of the replicated
# models.
orig_model_weights = original_model.get_weights()
first_model = strategy.unwrap(distributed_model)[0]
set_weights(strategy, first_model, orig_model_weights)
def _copy_weights_to_original_model(model, mode):
"""Copies weights from first distributed model back to original model."""
if model._distribution_strategy and mode == ModeKeys.TRAIN:
distributed_model = get_distributed_model(model, mode)
updated_weights = model._distribution_strategy.unwrap(
distributed_model)[0].get_weights()
model.set_weights(updated_weights)
def _per_replica_aggregate_batch(batch_outs, model, mode):
"""Aggregates the per-replica batch-level outputs from a distributed step."""
if model._distribution_strategy is not None and mode == ModeKeys.PREDICT:
total_batch_outs = []
for i in range(len(model.outputs)):
num_replicas = model._distribution_strategy.num_replicas_in_sync
nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]
total_batch_outs.append(np.concatenate(nest.flatten(nested_outs)))
return total_batch_outs
return batch_outs
def _reset_metrics(model):
if model._distribution_strategy:
for mode in [ModeKeys.TRAIN, ModeKeys.TEST, ModeKeys.PREDICT]:
distributed_model = get_distributed_model(model, mode)
if distributed_model:
first_model = model._distribution_strategy.unwrap(distributed_model)[0]
first_model.reset_metrics()
def get_distributed_model(model, mode):
key = _generate_cache_key(mode)
return model._distributed_model_cache.get(key, None)
def set_distributed_model(model, mode, distributed_model):
key = _generate_cache_key(mode)
model._distributed_model_cache[key] = distributed_model
def get_distributed_function(model, mode):
key = _generate_cache_key(mode)
return model._distributed_function_cache.get(key, None)
def set_distributed_function(model, mode, distributed_function):
key = _generate_cache_key(mode)
model._distributed_function_cache[key] = distributed_function
def _generate_cache_key(mode):
key = hash(mode)
return key
@tf_contextlib.contextmanager
def distributed_scope(strategy, learning_phase):
with strategy.scope(), K.learning_phase_scope(learning_phase):
yield
def call_replica_local_fn(fn, *args, **kwargs):
"""Call a function that uses replica-local variables.
This function correctly handles calling `fn` in a cross-replica
context.
Arguments:
fn: The function to call.
*args: Positional arguments to the `fn`.
**kwargs: Keyword argument to `fn`.
Returns:
The result of calling `fn`.
"""
# TODO(b/132666209): Remove this function when we support assign_*
# for replica-local variables.
strategy = None
if 'strategy' in kwargs:
strategy = kwargs.pop('strategy')
else:
if ds_context.has_strategy():
strategy = ds_context.get_strategy()
# TODO(b/120571621): TPUStrategy does not implement replica-local variables.
is_tpu = is_tpu_strategy(strategy)
if ((not is_tpu) and strategy and ds_context.in_cross_replica_context()):
with strategy.scope():
return strategy.extended.call_for_each_replica(fn, args, kwargs)
return fn(*args, **kwargs)
def is_current_worker_chief():
return dc_context.get_current_worker_context().is_chief
def filter_distributed_callbacks(callbacks_list):
"""Filter Callbacks based on the worker context when running multi-worker.
Arguments:
callbacks_list: A list of `Callback` instances.
Returns:
The list of `Callback` instances that should be run on this worker.
"""
if not multi_worker_util.in_multi_worker_mode():
raise ValueError(
'filter_distributed_callbacks() should only be called when Keras '
'is in multi worker mode.')
callbacks_list = callbacks_list or []
if not [
c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)
]:
# TODO(rchao): Consider providing a ModelCheckpoint here if the user
# fails to (possibly with tempfile directory).
logging.warning('ModelCheckpoint callback is not provided. '
'Workers will need to restart training if any fails.')
if callbacks_list is None or is_current_worker_chief():
return callbacks_list
# Some Callbacks should only run on the chief worker.
return [
callback for callback in callbacks_list if not callback._chief_worker_only
] # pylint: disable=protected-access
def _update_sample_weight_modes(model, mode, sample_weights):
"""Update sample_weight_mode of the distributed model."""
if is_distributing_by_cloning(model):
distributed_model = get_distributed_model(model, mode)
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
distributed_model._recompile_exec_function = any(
[e.sample_weights_mismatch() for e in model._training_endpoints])
if sample_weights:
distributed_models = flatten_per_replica_values(
model._distribution_strategy, distributed_model)
# sample_weights is a tuple of 1 list where the number of elements in the
# list is equal to the number of replicas in sync.
sample_weights = sample_weights[0]
if sample_weights and None not in sample_weights:
for m, sw in zip(distributed_models, sample_weights):
m._update_sample_weight_modes(sample_weights=[sw])
| 40.607759
| 94
| 0.728458
|
113003d6f60040230b1a5cf46f7d7ff2d1f6fca0
| 226
|
py
|
Python
|
SimPEG/EM/__init__.py
|
kimjaed/simpeg
|
b8d716f86a4ea07ba3085fabb24c2bc974788040
|
[
"MIT"
] | 3
|
2020-11-27T03:18:28.000Z
|
2022-03-18T01:29:58.000Z
|
SimPEG/EM/__init__.py
|
kimjaed/simpeg
|
b8d716f86a4ea07ba3085fabb24c2bc974788040
|
[
"MIT"
] | null | null | null |
SimPEG/EM/__init__.py
|
kimjaed/simpeg
|
b8d716f86a4ea07ba3085fabb24c2bc974788040
|
[
"MIT"
] | 1
|
2019-12-27T15:58:49.000Z
|
2019-12-27T15:58:49.000Z
|
from __future__ import absolute_import
from scipy.constants import mu_0, epsilon_0
from . import TDEM
from . import FDEM
from . import NSEM
from . import Static
from . import Base
from . import Analytics
from . import Utils
| 18.833333
| 43
| 0.787611
|
8ce6c67ce5f5c768169c5ad457994ee099e207c5
| 46,185
|
py
|
Python
|
tests/functions/test_conditional_join.py
|
samukweku/pyjanitor
|
9a6cadfc7fa6e109e805722703af6247a6707711
|
[
"MIT"
] | null | null | null |
tests/functions/test_conditional_join.py
|
samukweku/pyjanitor
|
9a6cadfc7fa6e109e805722703af6247a6707711
|
[
"MIT"
] | 3
|
2020-09-29T04:41:33.000Z
|
2021-07-26T07:11:23.000Z
|
tests/functions/test_conditional_join.py
|
samukweku/pyjanitor
|
9a6cadfc7fa6e109e805722703af6247a6707711
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
conditional_df,
conditional_right,
conditional_series,
)
@pytest.mark.xfail(reason="empty object will pass thru")
@given(s=conditional_series())
def test_df_empty(s):
"""Raise ValueError if `df` is empty."""
df = pd.DataFrame([], dtype="int", columns=["A"])
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@pytest.mark.xfail(reason="empty object will pass thru")
@given(df=conditional_df())
def test_right_empty(df):
"""Raise ValueError if `right` is empty."""
s = pd.Series([], dtype="int", name="A")
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_right_df(df):
"""Raise TypeError if `right` is not a Series/DataFrame."""
with pytest.raises(TypeError):
df.conditional_join({"non": [2, 3, 4]}, ("A", "non", "=="))
@given(df=conditional_df(), s=conditional_series())
def test_right_series(df, s):
"""Raise ValueError if `right` is not a named Series."""
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_df_MultiIndex(df):
"""Raise ValueError if `df` columns is a MultiIndex."""
with pytest.raises(ValueError):
df.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(
pd.Series([2, 3, 4], name="A"), (("A", "F"), "non", "==")
)
@given(df=conditional_df())
def test_right_MultiIndex(df):
"""Raise ValueError if `right` columns is a MultiIndex."""
with pytest.raises(ValueError):
right = df.copy()
right.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(right, (("A", "F"), "non", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_conditions_exist(df, s):
"""Raise ValueError if no condition is provided."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s)
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_type(df, s):
"""Raise TypeError if any condition in conditions is not a tuple."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("A", "B", ""), ["A", "B"])
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_length(df, s):
"""Raise ValueError if any condition is not length 3."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("A", "B", "C", "<"))
df.conditional_join(s, ("A", "B", ""), ("A", "B"))
@given(df=conditional_df(), s=conditional_series())
def test_check_left_on_type(df, s):
"""Raise TypeError if left_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, (1, "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_right_on_type(df, s):
"""Raise TypeError if right_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", 1, "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_type(df, s):
"""Raise TypeError if the operator is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", 1))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_df(df, s):
"""
Raise ValueError if `left_on`
can not be found in `df`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("C", "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_right(df, s):
"""
Raise ValueError if `right_on`
can not be found in `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "A", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_correct(df, s):
"""
Raise ValueError if `op` is not any of
`!=`, `<`, `>`, `>=`, `<=`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "=!"))
@given(df=conditional_df(), s=conditional_series())
def test_check_how_type(df, s):
"""
Raise TypeError if `how` is not a string.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how=1)
@given(df=conditional_df(), s=conditional_series())
def test_check_how_value(df, s):
"""
Raise ValueError if `how` is not one of
`inner`, `left`, or `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how="INNER")
@given(df=conditional_df(), right=conditional_right())
def test_dtype_strings_non_equi(df, right):
"""
Raise ValueError if the dtypes are both strings
on a non-equi operator.
"""
with pytest.raises(ValueError):
df.conditional_join(right, ("C", "Strings", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_not_permitted(df, s):
"""
Raise ValueError if dtype of column in `df`
is not an acceptable type.
"""
df["F"] = pd.Timedelta("1 days")
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("F", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_str(df, s):
"""
Raise ValueError if dtype of column in `df`
does not match the dtype of column from `right`.
"""
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_category_non_equi(df, s):
"""
Raise ValueError if dtype is category,
and op is non-equi.
"""
with pytest.raises(ValueError):
s.name = "A"
s = s.astype("category")
df["C"] = df["C"].astype("category")
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_sort_by_appearance_type(df, s):
"""
Raise TypeError if `sort_by_appearance` is not a boolean.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), sort_by_appearance="True")
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_floats(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, C="2"), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints_extension_array(df, right):
"""Test output for a single condition. "<"."""
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_equal(df, right):
"""Test output for a single condition. "<=". DateTimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_date(df, right):
"""Test output for a single condition. "<". Dates"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_datetime(df, right):
"""Test output for a single condition. ">". Datetimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} >= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_floats_floats(df, right):
"""Test output for a single condition. ">"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints_extension_array(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_numeric(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_ints_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_floats_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["B", "Numeric"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_datetime(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["E", "Dates"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_string(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@pytest.mark.xfail(
reason="""sometimes, categories are coerced to objects;
might be a pandas version issue.
"""
)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_category(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
df = df.assign(C=df["C"].astype("category"))
right = right.assign(Strings=right["Strings"].astype("category"))
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_numeric(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
df.loc[0, "A"] = pd.NA
right.loc[0, "Integers"] = pd.NA
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_datetime(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["E", "Dates"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_left(df, right):
"""Test output when `how==left`. "<="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1, index=np.arange(len(df)))
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = df.join(
expected.filter(right.columns), how="left", sort=False
).reset_index(drop=True)
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="left", sort_by_appearance=True
)
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_right(df, right):
"""Test output when `how==right`. ">"."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, index=np.arange(len(right))), on="t")
.query(f"{left_on} > {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = (
expected.filter(df.columns)
.join(right, how="right", sort=False)
.reset_index(drop=True)
)
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="right", sort_by_appearance=True
)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {middle} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">"),
(middle, right_on, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ge_and_le_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_le_and_ge_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, right_on, "<="),
(middle, left_on, ">="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ge_and_le_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_le_and_ge_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, right_on, "<="),
(middle, left_on, ">="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {middle} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">"),
(middle, right_on, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers_(df, right):
"""
Test output for multiple conditions.
"""
first, second, third = ("Numeric", "Floats", "B")
expected = (
right.assign(t=1)
.merge(df.assign(t=1), on="t")
.query(f"{first} > {third} and {second} < {third}")
.reset_index(drop=True)
)
expected = expected.filter([first, second, third])
actual = right.conditional_join(
df,
(first, third, ">"),
(second, third, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([first, second, third])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers_left_join(df, right):
"""
Test output for multiple conditions, and how is `left`.
"""
first, second, third = ("Numeric", "Floats", "B")
right = right.assign(t=1, check=range(len(right)))
df = df.assign(t=1)
expected = right.merge(df, on="t").query(
f"{first} > {third} and {second} < {third}"
)
drop = right.columns.difference(["check"])
expected = right.merge(
expected.drop(columns=[*drop]), on="check", how="left", sort=False
)
expected = expected.filter([first, second, third])
actual = right.conditional_join(
df,
(first, third, ">"),
(second, third, "<"),
how="left",
sort_by_appearance=True,
)
actual = actual.droplevel(0, 1)
actual = actual.loc[:, [first, second, third]]
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers_right_join(df, right):
"""
Test output for multiple conditions, and how is `right`.
"""
first, second, third = ("Numeric", "Floats", "B")
df = df.assign(t=1, check=range(len(df)))
right = right.assign(t=1)
expected = right.merge(df, on="t").query(
f"{first} > {third} and {second} < {third}"
)
drop = df.columns.difference(["check"])
expected = expected.drop(columns=[*drop]).merge(
df, on="check", how="right", sort=False
)
expected = expected.filter([first, second, third])
actual = right.conditional_join(
df,
(first, third, ">"),
(second, third, "<"),
how="right",
sort_by_appearance=True,
)
actual = actual.droplevel(0, 1)
actual = actual.loc[:, [first, second, third]]
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ne(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "B", "Numeric"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and B != Numeric")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("B", "Numeric", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ne_extension(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "B", "Numeric"]
df = df.assign(A=df["A"].astype("Int64"))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and B != Numeric")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("B", "Numeric", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ne_extension_right(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "B", "Numeric"]
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and B != Numeric")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("B", "Numeric", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ne_dates(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and E != Dates")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("E", "Dates", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_multiple_ne_dates(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "E", "Dates", "B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and E != Dates and B != Numeric")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("E", "Dates", "!="),
("B", "Numeric", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_eq_and_ne(df, right):
"""Test output for equal and not equal conditions."""
A, B, C, D = ("B", "Numeric", "E", "Dates")
expected = (
df.merge(right, left_on=A, right_on=B)
.dropna(subset=[A, B])
.query(f"{C} != {D}")
.reset_index(drop=True)
)
expected = expected.filter([A, B, C, D])
actual = df.conditional_join(
right,
(A, B, "=="),
(C, D, "!="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter([A, B, C, D])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ne_and_eq(df, right):
"""Test output for equal and not equal conditions."""
A, B, C, D = ("A", "Integers", "E", "Dates")
expected = (
df.merge(right, left_on=C, right_on=D)
.dropna(subset=[C, D])
.query(f"{A} != {B}")
.reset_index(drop=True)
)
expected = expected.filter([A, B, C, D])
actual = df.conditional_join(
right,
(A, B, "!="),
(C, D, "=="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter([A, B, C, D])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_gt_lt_ne_conditions(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A > Integers and B < Numeric and E != Dates")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", ">"),
("B", "Numeric", "<"),
("E", "Dates", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_gt_lt_ne_start(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, C="C"), on="t")
.query("A > Integers and B < Numeric and E != Dates")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("E", "Dates", "!="),
("A", "Integers", ">"),
("B", "Numeric", "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_ge_le_ne_extension_array(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and B < Numeric and E >= Dates")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("E", "Dates", ">="),
("A", "Integers", "!="),
("B", "Numeric", "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_ge_lt_ne_extension(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(
"A < Integers and B != Numeric and E >= Dates and E != Dates_Right"
)
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("E", "Dates", ">="),
("B", "Numeric", "!="),
("A", "Integers", "<"),
("E", "Dates_Right", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_eq_ge_and_le_numbers(df, right):
"""Test output for multiple conditions."""
l_eq, l_ge, l_le = ["B", "A", "E"]
r_eq, r_ge, r_le = ["Floats", "Integers", "Dates"]
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
expected = (
df.merge(right, left_on=l_eq, right_on=r_eq, how="inner", sort=False)
.dropna(subset=[l_eq, r_eq])
.query(f"{l_ge} >= {r_ge} and {l_le} <= {r_le}")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
(l_eq, r_eq, "=="),
(l_ge, r_ge, ">="),
(l_le, r_le, "<="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ge_and_le_diff_numbers(df, right):
"""Test output for multiple conditions."""
l_ge, l_le = ["A", "E"]
r_ge, r_le = ["Integers", "Dates"]
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t", how="inner", sort=False)
.query(f"{l_ge} > {r_ge} and {l_le} <= {r_le}")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
(l_le, r_le, "<="),
(l_ge, r_ge, ">"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_ge_lt_ne_extension_variant(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(
"A != Integers and B < Numeric and E >= Dates and E != Dates_Right"
)
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("E", "Dates", ">="),
("B", "Numeric", "<"),
("A", "Integers", "!="),
("E", "Dates_Right", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_ge_eq_and_le_numbers_variant(df, right):
"""Test output for multiple conditions."""
l_eq, l_ge, l_le = ["B", "A", "E"]
r_eq, r_ge, r_le = ["Floats", "Integers", "Dates"]
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
expected = (
df.merge(right, left_on=l_eq, right_on=r_eq, how="inner", sort=False)
.dropna(subset=[l_eq, r_eq])
.query(f"{l_ge} >= {r_ge} and {l_le} <= {r_le}")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
(l_ge, r_ge, ">="),
(l_le, r_le, "<="),
(l_eq, r_eq, "=="),
how="inner",
sort_by_appearance=True,
)
# actual = actual.droplevel(0, 1)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_multiple_eqs_variant(df, right):
"""Test output for multiple conditions."""
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
expected = (
df.merge(
right,
left_on=["B", "A"],
right_on=["Floats", "Integers"],
how="inner",
sort=False,
)
.dropna(subset=["B", "A", "Floats", "Integers"])
.query("E != Dates")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
("E", "Dates", "!="),
("B", "Floats", "=="),
("A", "Integers", "=="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ge_and_le_range_numbers(df, right):
"""Test output for multiple conditions."""
l_ge, l_le = ["A", "E"]
r_ge, r_le = ["Integers", "Dates"]
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t", how="inner", sort=False)
.query(f"{l_ge} >= {r_ge} and {l_le} < {r_le}")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
(l_le, r_le, "<"),
(l_ge, r_ge, ">="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_ge_eq_and_le_numbers(df, right):
"""Test output for multiple conditions."""
l_eq, l_ge, l_le = ["B", "A", "E"]
r_eq, r_ge, r_le = ["Floats", "Integers", "Dates"]
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
expected = (
df.merge(right, left_on=l_eq, right_on=r_eq, how="inner", sort=False)
.dropna(subset=[l_eq, r_eq])
.query(f"{l_ge} >= {r_ge} and {l_le} <= {r_le}")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
(l_ge, r_ge, ">="),
(l_le, r_le, "<="),
(l_eq, r_eq, "=="),
how="inner",
sort_by_appearance=True,
)
# actual = actual.droplevel(0, 1)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_multiple_non_equi(df, right):
"""Test output for multiple conditions."""
l_eq, l_ge, l_le = ["B", "A", "E"]
r_eq, r_ge, r_le = ["Floats", "Integers", "Dates"]
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t", how="inner", sort=False)
.query(f"{l_ge} >= {r_ge} and {l_le} <= {r_le} and {l_eq} < {r_eq}")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
(l_ge, r_ge, ">="),
(l_le, r_le, "<="),
(l_eq, r_eq, "<"),
how="inner",
sort_by_appearance=True,
)
# actual = actual.droplevel(0, 1)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_multiple_non_equii(df, right):
"""Test output for multiple conditions."""
l_eq, l_ge, l_le, ex1 = ["B", "A", "E", "B"]
r_eq, r_ge, r_le, ex2 = ["Floats", "Integers", "Dates", "Numeric"]
columns = ["B", "A", "E", "Floats", "Integers", "Dates", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t", how="inner", sort=False)
.query(
f"{l_ge} >= {r_ge} and {l_le} <= {r_le} and {l_eq} < {r_eq} and {ex1} > {ex2}" # noqa: E501
)
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
(l_ge, r_ge, ">="),
(l_le, r_le, "<="),
(l_eq, r_eq, "<"),
(ex1, ex2, ">"),
how="inner",
sort_by_appearance=True,
)
# actual = actual.droplevel(0, 1)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_multiple_eqs(df, right):
"""Test output for multiple conditions."""
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
expected = (
df.merge(
right,
left_on=["B", "A"],
right_on=["Floats", "Integers"],
how="inner",
sort=False,
)
.dropna(subset=["B", "A", "Floats", "Integers"])
.query("E != Dates")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
("E", "Dates", "!="),
("B", "Floats", "=="),
("A", "Integers", "=="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_multiple_eqs_extension_array(df, right):
"""Test output for multiple conditions."""
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.merge(
right,
left_on=["B", "E"],
right_on=["Floats", "Dates"],
how="inner",
sort=False,
)
.dropna(subset=["B", "E", "Floats", "Integers"])
.query("A != Integers")
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
("E", "Dates", "=="),
("B", "Floats", "=="),
("A", "Integers", "!="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_multiple_eqs_only(df, right):
"""Test output for multiple conditions."""
columns = ["B", "A", "E", "Floats", "Integers", "Dates"]
df = df.assign(A=df["A"].astype("Int64"), C=df["C"].astype("string"))
right = right.assign(
Integers=right["Integers"].astype(pd.Int64Dtype()),
Strings=right["Strings"].astype("string"),
)
df.loc[0, "A"] = pd.NA
right.loc[0, "Integers"] = pd.NA
expected = (
df.merge(
right,
left_on=["B", "A", "E"],
right_on=["Floats", "Integers", "Dates"],
how="inner",
sort=False,
)
.dropna(subset=columns)
.reset_index(drop=True)
)
expected = expected.filter(columns)
actual = df.conditional_join(
right,
("E", "Dates", "=="),
("B", "Floats", "=="),
("A", "Integers", "=="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter(columns)
assert_frame_equal(expected, actual)
| 29.81601
| 104
| 0.598744
|
7755937efb9ed31f4b4e28eecd0bb9a221a20dc0
| 2,913
|
py
|
Python
|
elementary_datastructures/linked_list/Sentinel_LinkedList.py
|
apri-me/datastructure-journey
|
315cbce29a263a899a966fd6041293c08db7a8d0
|
[
"MIT"
] | 3
|
2021-11-01T22:53:03.000Z
|
2022-01-22T11:49:56.000Z
|
elementary_datastructures/linked_list/Sentinel_LinkedList.py
|
MobinNesari81/datastructure-journey
|
d6ad2e710fe9238fcf75f8838bab6e813e408789
|
[
"MIT"
] | null | null | null |
elementary_datastructures/linked_list/Sentinel_LinkedList.py
|
MobinNesari81/datastructure-journey
|
d6ad2e710fe9238fcf75f8838bab6e813e408789
|
[
"MIT"
] | 4
|
2021-11-28T07:44:20.000Z
|
2021-12-12T08:49:17.000Z
|
'''
Implementing Linked list data structure with respect to Sentinel element method.
In this program Sentinel element will be a node which is called nil.
For safe programming reasons, nil.val will be None which will return some exceptions if in any part of the program we want to compare it with another node.
'''
class Node: # Node class which contain an arbitrary data type as value and two node which are the next and previous nodes.
def __init__(self, value) -> None: # Constructor
self.val = value
self.next = None
self.prev = None
class Linked_List: # Implementing Linked List
def __init__(self) -> None: # Constructor for Linked_List which will construct nil node.
self.nil = Node(None)
self.nil.next = self.nil
self.nil.prev = self.nil
def get_head(self) -> Node: # get_head() function will return head of the link list.
return self.nil.next
def get_tail(self) -> Node: # get_tail() function will return tail of the link list.
return self.nil.prev
def append(self, key) -> None: # append() function will add new node to link list.
tmp = Node(key)
tail = self.get_tail()
tail.next = tmp
tmp.prev = tail
tmp.next = self.nil
self.nil.prev = tmp
def prepend(self, key) -> None: # prepend() function will add new node to linked list head.
tmp = Node(key)
head = self.get_head()
head.prev = tmp
tmp.next = head
self.nil.next = tmp
def printer(self) -> None: # printer() function will print every nodes' value.
head = self.get_head()
while head != self.nil:
print(head.val)
head = head.next
def get_all_elements(self) -> list: # get_all_elements() function will return a list of nodes' values.
answer = []
head = self.get_head()
while head != self.nil:
answer.append(head.val)
head = head.next
return answer
def search(self, key) -> Node: # search() function will find a specific node with given key. If there is no node with that key, it will return None.
head = self.get_head()
while head != self.nil:
if head.val == key:
return head
head = head.next
return None
def delete_node(self, key) -> None: # delete_node() function is using search function to find specific node and delete that node from linked list.
target = self.search(key)
if target:
target.prev.next = target.next
target.next.prev = target.prev
def __str__(self) -> str: # this function will return all of elements in one string.
answer = self.get_all_elements()
string = ""
for i in answer:
string += str(i) + ' '
return string[:len(string) - 1]
| 39.90411
| 159
| 0.611397
|
e968d90f2cae63e521cd8445a62d3b7ccd981fa1
| 14,739
|
py
|
Python
|
examples/classify/semi_supervised/img/libml/augment/randaugment/augment_ops.py
|
parmarsuraj99/objax
|
111cd78960f5812885505b5ec02552b98a789973
|
[
"Apache-2.0"
] | 715
|
2020-08-20T07:23:03.000Z
|
2022-03-31T18:17:54.000Z
|
examples/classify/semi_supervised/img/libml/augment/randaugment/augment_ops.py
|
parmarsuraj99/objax
|
111cd78960f5812885505b5ec02552b98a789973
|
[
"Apache-2.0"
] | 145
|
2020-08-21T07:42:36.000Z
|
2022-03-18T16:51:37.000Z
|
examples/classify/semi_supervised/img/libml/augment/randaugment/augment_ops.py
|
parmarsuraj99/objax
|
111cd78960f5812885505b5ec02552b98a789973
|
[
"Apache-2.0"
] | 59
|
2020-08-20T07:30:53.000Z
|
2022-01-05T23:00:06.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various ops for augmentation."""
import math
import tensorflow as tf
import tensorflow_addons as tfa
# Default replace value
REPLACE_VALUE = 128
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
A value of factor 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor.
image2: An image Tensor.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
"""
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8)
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1)
replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1],
image.dtype)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(
image,
[0, 0, 0],
[image_shape[0], image_shape[1], image_shape[2] - 1])
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
threshold = tf.saturate_cast(threshold, image.dtype)
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128
threshold = tf.saturate_cast(threshold, image.dtype)
added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32)
added_im = tf.saturate_cast(added_im, tf.uint8)
return tf.where(image < threshold, added_im, image)
def invert(image):
"""Inverts the image pixels."""
return 255 - tf.convert_to_tensor(image)
def invert_blend(image, factor):
"""Implements blend of invert with original image."""
return blend(invert(image), image, factor)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
grayscale_im = tf.image.rgb_to_grayscale(image)
mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))
mean = tf.saturate_cast(mean + 0.5, tf.uint8)
degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean
degenerate = tf.image.grayscale_to_rgb(degenerate)
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = tf.cast(8 - bits, image.dtype)
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees):
"""Equivalent of PIL Rotation."""
# Convert from degrees to radians
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = tfa.image.transform_ops.rotate(wrap(image), radians)
return unwrap(image)
def translate_x(image, pixels):
"""Equivalent of PIL Translate in X dimension."""
image = tfa.image.translate_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image)
def translate_y(image, pixels):
"""Equivalent of PIL Translate in Y dimension."""
image = tfa.image.translate_ops.translate(wrap(image), [0, -pixels])
return unwrap(image)
def shear_x(image, level):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1]
image = tfa.image.transform_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image)
def shear_y(image, level):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1]
image = tfa.image.transform_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops."""
def scale_channel(channel):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(channel), tf.float32)
hi = tf.cast(tf.reduce_max(channel), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
return tf.saturate_cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def autocontrast_blend(image, factor):
"""Implements blend of autocontrast with original image."""
return blend(autocontrast(image), image, factor)
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_im = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im)
# Blend the final result
return blend(result, orig_im, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def equalize_blend(image, factor):
"""Implements blend of equalize with original image."""
return blend(equalize(image), image, factor)
def _convolve_image_with_kernel(image, kernel):
num_channels = tf.shape(image)[-1]
kernel = tf.tile(kernel, [1, 1, num_channels, 1])
image = tf.expand_dims(image, axis=0)
convolved_im = tf.nn.depthwise_conv2d(
tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME')
# adding 0.5 for future rounding, same as in PIL:
# https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long
convolved_im = convolved_im + 0.5
return tf.squeeze(convolved_im, axis=0)
def blur(image, factor):
"""Blur with the same kernel as ImageFilter.BLUR."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class BLUR(BuiltinFilter):
# name = "Blur"
# # fmt: off
# filterargs = (5, 5), 16, 0, (
# 1, 1, 1, 1, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 1, 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
blur_kernel = tf.constant([[1., 1., 1., 1., 1.],
[1., 0., 0., 0., 1.],
[1., 0., 0., 0., 1.],
[1., 0., 0., 0., 1.],
[1., 1., 1., 1., 1.]],
dtype=tf.float32,
shape=[5, 5, 1, 1]) / 16.0
blurred_im = _convolve_image_with_kernel(image, blur_kernel)
return blend(image, blurred_im, factor)
def smooth(image, factor):
"""Smooth with the same kernel as ImageFilter.SMOOTH."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class SMOOTH(BuiltinFilter):
# name = "Smooth"
# # fmt: off
# filterargs = (3, 3), 13, 0, (
# 1, 1, 1,
# 1, 5, 1,
# 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
smooth_kernel = tf.constant([[1., 1., 1.],
[1., 5., 1.],
[1., 1., 1.]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.0
smoothed_im = _convolve_image_with_kernel(image, smooth_kernel)
return blend(image, smoothed_im, factor)
def rescale(image, level):
"""Rescales image and enlarged cornet."""
# TODO: should we do center crop instead?
# TODO: add support of other resize methods
# See tf.image.ResizeMethod for full list
size = image.shape[:2]
scale = level * 0.25
scale_height = tf.cast(scale * size[0], tf.int32)
scale_width = tf.cast(scale * size[1], tf.int32)
cropped_image = tf.image.crop_to_bounding_box(
image,
offset_height=scale_height,
offset_width=scale_width,
target_height=size[0] - scale_height,
target_width=size[1] - scale_width)
rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC)
return tf.saturate_cast(rescaled, tf.uint8)
NAME_TO_FUNC = {
'Identity': tf.identity,
'AutoContrast': autocontrast,
'AutoContrastBlend': autocontrast_blend,
'Equalize': equalize,
'EqualizeBlend': equalize_blend,
'Invert': invert,
'InvertBlend': invert_blend,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Blur': blur,
'Smooth': smooth,
'Rescale': rescale,
}
| 34.92654
| 153
| 0.631997
|
ea07bcd1f5518f5c822e44897090367504b6191b
| 2,757
|
py
|
Python
|
tests/test_prep_tasks.py
|
rednafi/rush
|
216d0a8f85ec90853608de4226fb61b2d287cf9f
|
[
"MIT"
] | 32
|
2019-12-20T15:01:32.000Z
|
2021-04-30T18:58:17.000Z
|
tests/test_prep_tasks.py
|
rednafi/rush
|
216d0a8f85ec90853608de4226fb61b2d287cf9f
|
[
"MIT"
] | 22
|
2019-12-21T08:24:26.000Z
|
2020-01-22T19:30:53.000Z
|
tests/test_prep_tasks.py
|
rednafi/rush
|
216d0a8f85ec90853608de4226fb61b2d287cf9f
|
[
"MIT"
] | 4
|
2020-01-16T22:41:28.000Z
|
2021-05-31T21:30:01.000Z
|
from collections import OrderedDict
import pytest
import yaml
from mock import patch
from rush_cli.prep_tasks import PrepTasks
from rush_cli.prep_tasks import Views
@pytest.fixture
def make_preptasks():
obj = PrepTasks()
return obj
def test_clean_tasks(make_preptasks):
obj = make_preptasks
assert obj._clean_tasks(
{
"task_1": 'echo "task1 is running"\n',
"task_2": 'task_1\necho "task2 is running"\n',
}
) == OrderedDict(
[
("task_1", ['echo "task1 is running"']),
("task_2", ["task_1", 'echo "task2 is running"']),
]
)
def test_replace_placeholder_tasks(make_preptasks):
obj = make_preptasks
assert obj._replace_placeholder_tasks(
["task_1", 'echo "task"'], {"task_1": "hello"}
) == ["hello", 'echo "task"']
def test_flatten_task_chunk(make_preptasks):
obj = make_preptasks
assert obj._flatten_task_chunk(
[["hello"], ["from", ["the", ["other"]], "side"]]
) == ["hello", "from", "the", "other", "side"]
def test_filter_tasks(make_preptasks):
obj = make_preptasks
assert obj._filter_tasks(
{"task_1": "ay", "task_2": "g", "task_3": "homie"}, "task_1", "task_3"
) == {"task_1": "ay", "task_3": "homie"}
with pytest.raises(SystemExit):
obj._filter_tasks(
{"task_1": "ay", "task_2": "g", "task_3": "homie"}, "task_1", "task_4"
)
@pytest.fixture(autouse=True)
def make_tmpdir(tmpdir):
tmp_dir = tmpdir.mkdir("folder")
tmp_path = tmp_dir.join("rushfile.yml")
return tmp_dir, tmp_path
@pytest.fixture(autouse=True)
def make_cwd(request, make_tmpdir):
tmp_dir, tmp_path = make_tmpdir
patched = patch("os.getcwd", return_value=tmp_dir)
request.addfinalizer(lambda: patched.__exit__())
return patched.__enter__()
# find_rushfile
@pytest.fixture(autouse=True)
def make_rushfile(make_tmpdir):
"""Creating dummy rushfile.yml."""
# dummy rushfile path
tmp_dir, tmp_path = make_tmpdir
# dummy rushfile contents
content = """task_1: |
echo "task1 is running"
task_2: |
# Task chaining [task_1 is a dependency of task_2]
task_1
echo "task2 is running"
"""
# loading dummy rushfile
yml_content = yaml.load(content, Loader=yaml.FullLoader)
# saving dummy rushfile to tmp dir
with open(tmp_path, "w") as f:
yaml.dump(yml_content, f)
return yml_content
@pytest.fixture
def make_views():
obj = Views()
return obj
def test_view_rushpath(capsys, make_views):
obj = make_views
obj.view_rushpath
captured = capsys.readouterr()
print(captured.out)
assert captured.out.rstrip().split("/")[-1] == "rushfile.yml"
| 23.168067
| 82
| 0.635111
|
db741988bd36b8dd8cb04415185731c74e951572
| 63,899
|
py
|
Python
|
Test.py
|
Tony031218/manim-projects
|
b243dec0f0a007649a92938e90d60eccb4c7dd15
|
[
"Apache-2.0"
] | 45
|
2019-10-08T23:58:20.000Z
|
2020-05-20T03:49:15.000Z
|
Test.py
|
Tony031218/manim-projects
|
b243dec0f0a007649a92938e90d60eccb4c7dd15
|
[
"Apache-2.0"
] | null | null | null |
Test.py
|
Tony031218/manim-projects
|
b243dec0f0a007649a92938e90d60eccb4c7dd15
|
[
"Apache-2.0"
] | 12
|
2019-08-15T08:07:22.000Z
|
2020-05-09T12:34:14.000Z
|
from manimlib.imports import *
from manim_projects.tony_useful.imports import *
from random import randint
'''
这个文件中是群友问问题时我写的测试代码(2020.02.03开始)
一些目的和效果已经通过文档字符串的形式给出
'''
class Test0(Scene):
def construct(self):
circle = Circle(radius=3)
poly = []
for i in range(3, 11):
po = Polygon(
*[
UP * np.sin(j * 2 * PI / i) + RIGHT * np.cos(j * 2 * PI / i)
for j in range(i)
]
)
poly.append(po.scale(3, about_point=ORIGIN))
self.play(ShowCreation(circle))
self.play(ShowCreation(poly[0]))
self.wait()
for i in range(1, 8):
self.play(Transform(poly[0], poly[i]))
self.wait()
self.wait(2)
class Test1(Scene):
'''Matrix类中间元素的下标布局'''
def construct(self):
mat = Matrix([['0', '-1', '2'], ['1', '0', '12'], ['3', '2', 'x']])
self.add(mat)
debugTeX(self, mat[0])
class Test2(Scene):
'''使用\tt调TextMobject打字机字体'''
def construct(self):
text = VGroup(
TextMobject("\\tt UR=np.array([ 1, 1, 0])", tex_to_color_map={"=":RED, "array":BLUE}),
TextMobject("\\tt UL=np.array([-1, 1, 0])", tex_to_color_map={"=":RED, "array":BLUE}),
TextMobject("\\tt DR=np.array([ 1,-1, 0])", tex_to_color_map={"=":RED, "array":BLUE}),
TextMobject("\\tt DL=np.array([-1,-1, 0])", tex_to_color_map={"=":RED, "array":BLUE})
).arrange_submobjects(DOWN)
self.add(text)
class Test3(Scene):
'''坐标可以用ndarray,也可以用列表'''
def construct(self):
l = Line([0, 0, 0], [3, 3, 0])
self.add(l)
class Test4(Scene):
'''aligned_edge的用法'''
def construct(self):
sq1 = Square().shift(LEFT * 2)
sq2 = Square().next_to(sq1.get_corner(DR), DOWN)
sq3 = Square().shift(RIGHT * 2)
sq4 = Square().next_to(sq3.get_corner(DR), DOWN, aligned_edge=LEFT)
self.add(sq1, sq2, sq3, sq4)
class Test5(Scene):
'''加号强制next_to对齐'''
def construct(self):
text = TextMobject("LOVE\\ DEATH\\ ", "$+$", "\\ ROBOTS", color=RED)
text[1].next_to(text[0], RIGHT)
text[2].next_to(text[1], RIGHT)
self.add(text)
class Test6(Scene):
'''FocusOn和Flash的动画效果'''
def construct(self):
title1 = TextMobject("FocusOn").scale(2).to_corner(UL)
self.add(title1)
dot = Dot(radius=0.5, color=BLUE)
self.play(ShowCreation(dot))
self.wait()
self.play(FocusOn(dot))
self.wait(2)
title2 = TextMobject("Flash").scale(2).to_corner(UL)
self.play(Transform(title1, title2))
self.wait()
self.play(Flash(dot, flash_radius=0.55))
self.wait(3)
class Test7(Scene):
'''白底黑字'''
def construct(self):
txt = TexMobject("0",
fill_color=BLACK,
fill_opacity=1.0,
stroke_color=BLACK,
stroke_opacity=1.0,
).scale(3)
self.add(txt)
class Test8(Scene):
'''使用Rectangle或者Line来强制Brace宽度'''
def construct(self):
rec = Rectangle(width=4)
brac = Brace(rec, DOWN)
self.add(brac)
class Test9(ThreeDScene):
'''立方体三维旋转'''
def construct(self):
self.set_to_default_angled_camera_orientation()
cube = Cube()
self.add(cube)
self.wait()
self.play(Rotating(cube, axis=UP, radians=PI / 6))
self.wait(2)
class Test10(Scene):
'''文字渐变色'''
def construct(self):
text = TextMobject("test").scale(2).set_color_by_gradient(BLUE, RED)
self.add(text)
class Test11(Scene):
'''LaTeX的cases可行'''
def construct(self):
text = TexMobject(
r"""
\begin{cases}
u^3+v^3=-q\\
uv=-\frac{p}{3}\\
\end{cases}
"""
)
self.add(text)
class Test12(Scene):
def construct(self):
circle0 = Circle(color=WHITE,radius=2)
text0 = TextMobject("Gaussian \\\\ Elimination")
vec1 =Vector(1.4*LEFT).move_to(circle0.get_center()+2.8*LEFT)
circle1 = Circle(color=RED,radius=1.6).next_to(vec1, LEFT)
text1 = TextMobject("System of \\\\ linear equation").move_to(circle1.get_center()+ORIGIN).scale(0.8)
vgr1 = VGroup(text1, circle1)
self.add(circle0, text0)
self.add(vec1)
self.add(vgr1)
self.wait(2)
pos = Dot(fill_opacity=0).move_to(circle1.get_center())
def update_text(obj):
obj.move_to(pos)
vgr1.add_updater(update_text)
self.add(vgr1)
self.play(
Rotating(vec1, radians = 6 * PI, about_point = ORIGIN, axis = IN),
Rotating(pos , radians = 6 * PI, about_point = ORIGIN, axis = IN),
run_time=20
)
class Test13(Scene):
'''Uncreate效果,注意不是UnCreate'''
def construct(self):
sq = Square()
self.add(sq)
self.wait()
self.play(Uncreate(sq))
self.wait()
class Test14(Scene):
def construct(self):
rec1 = Rectangle(height=2, width=6)
rec2 = Rectangle(height=1, width=1).shift(LEFT*2)
rec3 = Rectangle(height=1, width=1).shift(RIGHT*2)
rec4 = Rectangle(height=1, width=1)
recs = VGroup(rec1, rec2, rec3, rec4)
self.add(recs)
self.wait()
self.play(recs.shift, UP*2.5)
self.wait()
circle = Circle(radius=0.5).move_to(rec3)
self.play(Transform(rec3, circle))
self.wait()
class Test15(GraphScene):
'''GraphScene的坐标轴可以FadeOut'''
def construct(self):
self.setup_axes(animate=True)
self.wait()
self.play(FadeOut(self.axes))
self.wait()
class Test16(Scene):
def construct(self):
objs = [
Square().shift(LEFT * 3),
Square(),
Square().shift(RIGHT * 3)
]
self.add(*objs)
self.wait()
self.play(
*[
ApplyMethod(obj.shift, UP)
for obj in objs
]
)
self.wait()
class Test17(Scene):
'''使用index_of_submobject_to_align来对齐,注意要get_center()'''
def construct(self):
vg1 = VGroup(
Circle(radius = 0.5).shift(LEFT*2),
Circle(radius = 0.5).shift(LEFT*1),
Circle(radius = 0.5),
Circle(radius = 0.5).shift(RIGHT*1),
Circle(radius = 0.5).shift(RIGHT*2),
)
vg2 = VGroup(
Square(side_length=1).shift(LEFT*1),
Square(side_length=1),
Square(side_length=1).shift(RIGHT*1),
)
vg2.next_to(vg1[3].get_center(), DOWN, index_of_submobject_to_align=1)
self.add(vg1, vg2)
class Test18(Scene):
'''使用tex[0]来对TexMobject的每个字符进行分解'''
def construct(self):
tex = TexMobject("a^2+b^2=c^2")
self.add(tex)
debugTeX(self, tex[0])
class Test19(Scene):
'''用AnimatedBoundary实现Line的颜色变化'''
def construct(self):
l = Line(LEFT * 3, RIGHT * 3)
self.add(l)
self.wait()
l2 = AnimatedBoundary(l, colors=[BLUE])
self.add(l2)
self.wait(3)
class Test20(Scene):
'''使用set_opacity实现闪烁效果'''
def construct(self):
text = TextMobject("颓废最不要脸")
self.add(text)
for i in range(20):
self.play(text.set_opacity, 0, run_time=0.2)
self.play(text.set_opacity, 1, run_time=0.2)
self.wait()
class Test21(Scene):
'''圆弧flip的默认轴'''
def construct(self):
grid = NumberPlane()
arc = Arc(0, PI / 2, color = BLUE)
arc2 = arc.copy().flip().set_color(YELLOW)
self.add(grid, arc, arc2)
class Test22(Scene):
def construct(self):
text = TextMobject("abcd")
self.add(text)
class Test23(Scene):
'''move_arc_center_to和不同run_time的动画同时播放'''
def construct(self):
sq = Square(side_length=4)
ci = Arc(0, PI / 2, color=BLUE, radius=4).move_arc_center_to(sq.get_corner(DL))
self.wait()
self.play(ShowCreation(sq, run_time=2), ShowCreation(ci, run_time=4))
self.wait()
class Test24(Scene):
'''环绕每个字符'''
def construct(self):
text = TextMobject("abcdefgh")
rec = VGroup()
for i in text[0]:
rec.add(SurroundingRectangle(i, buff=0))
self.add(text, rec)
class Test25(Scene):
'''使用LaTeX的表格'''
def construct(self):
tab = TextMobject(
r"""
\begin{table}[]
\begin{tabular}{|l|l|l|l|l|l|}
\hline
a & b & c & d & e & f \\ \hline
\end{tabular}
\end{table}
"""
)
self.add(tab)
debugTeX(self, tab[0])
class Test26(Scene):
'''Succession,其实和多个play没什么区别'''
def construct(self):
group = VGroup(
Circle(radius = 0.5).shift(LEFT*2),
Circle(radius = 0.5).shift(LEFT*1),
Circle(radius = 0.5),
Circle(radius = 0.5).shift(RIGHT*1),
Circle(radius = 0.5).shift(RIGHT*2),
).set_opacity(0)
self.wait()
self.play(
Succession(
*[
ApplyMethod(obj.set_opacity, 1)
for obj in group
]
)
)
self.wait()
class Test27(Scene):
'''UP和TOP在to_corner时的区别'''
def construct(self):
text = TextMobject("to\_corner UP").to_corner(UP)
text2 = TextMobject("to\_corner TOP").to_corner(TOP) # 非标准用法
text3 = TextMobject("move\_to TOP").move_to(TOP).set_color(YELLOW)
self.add(text, text2, text3)
class Test28(Scene):
'''将所有物体都FadeOut,没有add的物体也不会强制add再FadeOut'''
def construct(self):
sq = Square()
ci = Circle()
self.add(sq)
self.wait()
self.play(
*[
FadeOut(obj)
for obj in self.mobjects
]
)
self.wait()
class Test29(Scene):
'''Text不会自动换行'''
def construct(self):
text = Text("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", font="Consolas")
self.add(text)
class Test30(Scene):
'''字符上弧'''
def construct(self):
text = TextMobject("\\overarc{AB}")
self.add(text)
class Test31(Scene):
def construct(self):
Gc = VGroup()
colors = color_gradient([BLACK, WHITE], 9)
Gc.add(Square(side_length=1).shift(LEFT*6).set_fill(colors[0], 1).set_color(colors[0]))
for i in range(1, 9):
Gc.add(Gc[-1].copy().set_fill(colors[i], 1).set_color(colors[i]).shift(RIGHT*1.2))
self.play(Transform(Gc[-2], Gc[-1], rate_func=linear))
self.wait()
class Test32(GraphScene):
'''get_graph必须在setup_axes之后'''
def construct(self):
self.setup_axes(animate=True)
graph = self.get_graph(lambda x : x**2,
color = GREEN,
x_min = None,
x_max = None
)
self.play(
ShowCreation(graph),
run_time = 2
)
self.wait()
class Test33(Scene):
'''用颜色灰度来实现透明度的效果,防止两透明度颜色相叠加,导致亮度突变'''
def construct(self):
colors = color_gradient(["#6C6C00", YELLOW], 9)
sq = Square(side_length=1).shift(LEFT*6).set_fill(colors[0], 1).set_color(colors[0])
for i in range(1, 9):
self.play(
ApplyMethod(sq.shift, RIGHT * 1.2, rate_func=linear),
ApplyMethod(sq.set_color, colors[i]),
ApplyMethod(sq.set_fill, colors[i], 1)
)
self.wait()
class Test34(ThreeDScene):
'''cube的面'''
def construct(self):
self.set_to_default_angled_camera_orientation()
cube = Cube(fill_opacity=0, stroke_width=3).set_fill(opacity=0).set_color(WHITE)
cube[5].set_color(BLUE)
self.add(cube)
debugTeX(self, cube)
class Test35(GraphScene):
'''使用updater来实现graph的更新'''
def construct(self):
self.setup_axes()
line = self.get_graph(lambda x: x + 2)
val = ValueTracker(1)
line.add_updater(lambda m: m.become(self.get_graph(lambda x: val.get_value() * x + 2, color=BLUE)))
self.add(line)
self.play(val.increment_value, 4)
self.wait()
class Test36(ThreeDScene):
'''抛物面'''
def construct(self):
self.set_to_default_angled_camera_orientation()
颓废曲面 = ParametricSurface(
lambda u, v: [u, v, u ** 2 + v ** 2],
u_min=-1, u_max=1, v_min=-1, v_max=1
)
self.add(颓废曲面)
class Test37(Scene):
'''Transfrom前统一方向,使动画更顺滑'''
def construct(self):
ci = Circle()
# sq = Square()
sq = Square().flip()
self.play(ShowCreation(ci))
self.play(Transform(ci, sq))
self.wait()
class Test38(Scene):
'''根式上色'''
def construct(self):
text = TexMobject("\\sqrt{x^2+y^2+z^2}")
text[0][2:4].set_color(RED)
self.add(text)
debugTeX(self, text[0])
class Test39(Scene):
'''上色'''
def construct(self):
text4 = TexMobject(
r"ds=\vert d\vec r \vert=",
r"\sqrt{x^2+y^2+z^2}"
)
VGroup(
text4
).set_color(YELLOW)
VGroup(
text4[1][2:4]
).set_color(RED)
self.add(text4)
debugTeX(self, text4[1])
class Test40(Scene):
'''一个self.play中无法处理两个针对同一物体的ApplyMethod,但不加ApplyMethod可以'''
def construct(self):
dot = Dot(color=BLUE)
up = Dot(color=YELLOW).to_edge(UP)
self.add(dot)
self.wait()
# self.play(
# ApplyMethod(dot.next_to, up, DOWN),
# ApplyMethod(dot.scale, 3)
# )
self.play(
dot.next_to, up, DOWN,
dot.scale, 3
)
self.wait()
class Test41(Scene):
'''replace的作用'''
def construct(self):
sq = Square().scale(2)
ci = Circle().shift(RIGHT*3)
self.add(sq, ci)
self.play(sq.replace, ci)
self.wait()
class Test42(Scene):
'''使用updater时不能使用循环变量i'''
def construct(self):
ups = VGroup(
*[
Dot(color=BLUE).move_to([i, 1, 0])
for i in range(-3, 4)
]
)
downs = VGroup(
*[
Dot(color=YELLOW).move_to([i, -1, 0])
for i in range(-3, 4)
]
)
lines = VGroup(
*[
Line(ups[i], downs[i])
for i in range(0, 7)
]
)
lines.add_updater(
lambda m: m.become(
VGroup(
*[
Line(ups[i], downs[i])
for i in range(0, 7)
]
)
)
)
# for i in range(7):
# lines[i].add_updater(lambda m: m.put_start_and_end_on(ups[i].get_bottom(), downs[i].get_top()))
self.add(ups, downs, lines)
self.wait()
self.play(
ups.shift, LEFT * 2
)
self.play(
downs.shift, RIGHT * 2
)
self.wait()
class Test43(Scene):
'''和Test40同理'''
def construct(self):
dot = Dot(color=BLUE)
self.add(dot)
self.wait()
self.play(
ApplyMethod(dot.scale, 3), # 这个被淹没了
ApplyMethod(dot.set_color, YELLOW)
)
self.wait()
class Test44(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
spheres = VGroup(
*[
Sphere(radius=i, opacity=0.5, resolution=(20, 40))
for i in np.arange(1, 3.1, 0.4)
]
)
self.set_to_default_angled_camera_orientation()
self.add(axes)
old = VGroup()
new = VGroup()
for i in range(len(spheres[0])):
old.add(spheres[randint(1, 5)][i].set_opacity(randint(1, 6) / 10))
new.add(spheres[0][i])
self.wait()
self.wait()
self.play(
FadeIn(old),
*[
Transform(i, j)
for i, j in zip(old, new)
],
run_time=6
)
self.wait()
class Test45(ThreeDScene):
def construct(self):
axes = ThreeDAxes()
self.set_to_default_angled_camera_orientation()
self.add(axes)
surface = ParametricSurface(
lambda y, z: [
-np.sqrt(
1 - 9 * y ** 2 / 4 + (320 * 2 ** (1 / 3) * z ** 3) / ((
99532800 * y ** 2 * z ** 2 + 884736000 * z ** 3 - \
1990656000 * y ** 2 * z ** 3 - 884736000 * z ** 5 + np.sqrt(
(-115964116992000000 * z ** 9 + (99532800 * y ** 2 * z ** 2 + \
884736000 * z ** 3 - 1990656000 * y ** 2 * z ** 3 - 884736000 * z ** 5) ** 2) ** 2)
) ** (1 / 3)) + (1 / 960 * 2 ** (1 / 3)) * (99532800 * y ** 2 * z ** 2 + \
884736000 * z ** 3 - 1990656000 * y ** 2 * z ** 3 - 884736000 * z ** 5 + np.sqrt(
(-115964116992000000 * z ** 9 + (99532800 * y ** 2 * z ** 2 + 884736000 * z ** 3 -\
1990656000 * y ** 2 * z ** 3 - 884736000 * z ** 5) ** 2)
)) ** (1 / 3)
),
y, z
]
)
self.add(surface)
class Test46(Scene):
'''Brace'''
def construct(self):
text = TextMobject("test")
brace = Brace(text, DOWN)
self.play(Write(brace))
self.play(Write(text))
class Test47(Scene):
'''LaTeX的dancers小人,需要下载字体包并且更改ctex_template'''
def construct(self):
Test = VGroup()
for i in range(51):
test = TextMobject("\\Pisymbol{dancers}{%d}" % i, stroke_width=1, fill_opacity=1, stroke_opacity=1).scale(200)
Test.add(test)
self.wait()
self.play(Write(Test[0]))
for i in range(1, 51):
self.wait(0.8)
self.play(Transform(Test[0], Test[i]))
self.wait(2)
class Test48(Scene):
'''plot_depth'''
CONFIG = {
"camera_config": {"use_plot_depth": True}
}
def construct(self):
sq = Square(stroke_width=5).set_plot_depth(1)
sq2 = Square(side_length=1, stroke_width=5).shift(RIGHT).set_color(BLUE).set_plot_depth(0)
self.add(sq, sq2)
self.wait()
self.play(sq2.set_plot_depth, 2)
self.wait()
class Test49(Scene):
'''使用LaTeX的lstlisting写代码,需要改ctex_template'''
def construct(self):
text = TextMobject("""
\\begin{lstlisting}
int main() {
}
\\end{lstlisting}
""")
self.add(text)
class Test50(ThreeDScene):
'''正劈锥体,渲染贼慢'''
def construct(self):
axes = ThreeDAxes()
self.set_camera_orientation(phi=70 * DEGREES, theta=45 * DEGREES)
self.add(axes)
a = VGroup()
b = VGroup()
c = VGroup()
for i in np.arange(-1, 1.00001, 0.0005):
tri = Polygon([i, np.sqrt(1 - i ** 2), 0],
[i, -np.sqrt(1 - i ** 2), 0],
[i, 0, 2], stroke_width=0, fill_color=BLUE, fill_opacity=0.75)
a.add(tri)
cnt = 1
self.begin_ambient_camera_rotation(rate=0.5)
for tri in a:
if cnt % 2 == 0:
self.add(tri.set_fill(color=YELLOW, opacity=0.5))
self.wait(0.01)
tri.set_fill(color=BLUE, opacity=0.75)
else:
self.add(tri)
cnt += 1
self.wait(5)
class Test51(ThreeDScene):
'''棱锥到近似圆锥'''
def construct(self):
axes = ThreeDAxes()
self.set_camera_orientation(phi=70 * DEGREES, theta=45 * DEGREES)
self.add(axes)
circle = Circle(radius=2)
polys = []
faces = []
for i in range(3, 16):
po = Polygon(
*[
UP * np.sin(j * 2 * PI / i) + RIGHT * np.cos(j * 2 * PI / i)
for j in range(i)
], stroke_width=1, stroke_color=BLUE, fill_color=BLUE, fill_opacity=0.75
).scale(2, about_point=ORIGIN)
polys.append(po)
verts = po.get_vertices()
faces_ = VGroup()
for j in range(i):
if j == i - 1:
face = Polygon(verts[j], verts[0], [0, 0, 3])
else:
face = Polygon(verts[j], verts[j + 1], [0, 0, 3])
face.set_stroke(width=1, color=BLUE)
face.set_fill(color=BLUE, opacity=0.75)
faces_.add(face)
faces.append(faces_)
self.play(ShowCreation(circle))
self.play(ShowCreation(polys[0]), ShowCreation(faces[0]))
self.wait()
self.begin_ambient_camera_rotation(rate=0.5)
self.wait()
for i in range(1, 13):
self.play(
Transform(polys[0], polys[i]),
Transform(faces[0], faces[i])
)
self.wait()
self.wait(2)
class Test52(SpecialThreeDScene):
'''Boxes类的test'''
CONFIG = {
"default_angled_camera_position": {
"phi": 70 * DEGREES,
"theta": -45 * DEGREES,
"distance": 50,
},
}
def construct(self):
self.set_camera_to_default_position()
axes = self.get_axes()
boxes = MyBoxes(fill_color=GRAY, resolution=(20, 20), bottom_size=(0.25, 0.25), gap=0.05)
self.var_phi = 0
func_01 = lambda x, y: np.sin(x ** 2 / 2.4 + y ** 2 / 2.4 + self.var_phi) * 1.
func_02 = lambda x, y: np.sin(x ** 2 / 2.4 + y ** 2 / 2.4 + self.var_phi) * 1. - 0.25
boxes.update_top_and_bottom_by_func(func_01, func_02)
boxes.update_color_by_func(func_01)
def update_boxes(b, dt):
b.update_top_and_bottom_by_func(func_01, func_02)
b.update_color_by_func(func_01)
self.var_phi += 1 * DEGREES
self.add(boxes)
boxes.add_updater(update_boxes)
# self.wait(2)
# boxes.remove_updater(update_boxes)
self.wait(12)
class Test53(ThreeDScene):
'''MyBoxes的序号分布'''
def construct(self):
axes = ThreeDAxes()
# self.set_camera_orientation(phi=70 * DEGREES, theta=225 * DEGREES)
self.add(axes)
boxes = MyBoxes(fill_color=GRAY, resolution=(9, 18), bottom_size=(0.5, 0.7), gap=0.2, box_height=0.5)
self.add(boxes)
debugTeX(self, boxes)
class Test54(ThreeDScene):
'''测试元素周期表'''
CONFIG = {
"camera_config": {
"background_color": WHITE,
"should_apply_shading": False,
}
}
def construct(self):
self.set_camera_orientation(phi=50 * DEGREES, theta=240 * DEGREES, distance=50)
boxes = ChemicalBoxes(fill_color=BLUE_E).add_label().set_block_color()
self.add(boxes)
self.begin_ambient_camera_rotation(rate=1)
# self.wait(10)
class Test55(Scene):
'''无法像这样获取圆上某一方向的点'''
def construct(self):
ci = Circle()
self.add(ci)
dot = Dot().move_to(ci.get_boundary_point(UP * 2 * np.sqrt(5) / 5 + RIGHT * np.sqrt(5) / 5))
self.add(dot)
class Test56(Scene):
'''带dt的updater'''
def construct(self):
dot = Dot().to_edge(UP)
dot.add_updater(lambda m, dt: m.shift(0.1 * DOWN))
self.add(dot)
self.wait(6)
class Test57(Scene):
'''文字上下标'''
def construct(self):
text = TextMobject("正文A$_{\\text{下标B}}^{\\text{上标C}}$").scale(3)
self.add(text)
class Test58(Scene):
'''rate_func'''
def construct(self):
func = ParametricFunction(
lambda x: [x, smooth(x), 0],
t_min=0, t_max=1
).scale(3, about_point=ORIGIN)
self.add(func)
class Test59(Scene):
'''save_image'''
def construct(self):
sq = Square()
sq.save_image()
self.add(sq)
class Test60(Scene):
'''根据等号对齐'''
def construct(self):
tex1 = TexMobject("A=\\frac{\\displaystyle\\sum^n_{i=0}}{x}")
tex2 = TexMobject("=", "\\frac{x}{\\displaystyle\\sum^n_{i=0}}")
tex2.next_to(tex1, RIGHT)
tex2.next_to(tex1[0][1].get_center(), RIGHT, index_of_submobject_to_align=0, coor_mask=np.array([0, 1, 1]))
self.add(tex1, tex2)
texs = [
"A=\\frac{\\displaystyle\\sum^n_{i=0}}{x}",
"=\\frac{x}{\\displaystyle\\sum^n_{i=0}}"
]
tex = TexMobject(*texs)
self.add(tex)
class Test61(Scene):
def construct(self):
for1 = TexMobject(r"G(x)=\displaystyle\sum_{p=0}^{\infty}{\left( \frac{S^{p}(n)}{p!}x^p\right)}").scale(0.7).to_edge(UP+LEFT)
for1_bg = SurroundingRectangle(for1, fill_opacity = .2)
for2 = TexMobject(r"G(x) = \left( \frac{e^{(n+1)x}-1}{x} \right) \left( \frac{x}{e^x-1} \right)")
forrs = [
r"\frac{e^{(n+1)x}-1}{x}", # for3
r"= \frac{ \left( \displaystyle\sum_{p=0}^{\infty}{\frac{{((n+1)x)}^p}{p!}} \right) -1}{x}}", #for4
r"=\frac{1+\left( \displaystyle\sum_{p=1}^{\infty}{\frac{{((n+1)x)}^p}{p!}} \right) -1}{x}}",#for5
r"=\displaystyle\sum_{p=1}^{\infty}{\frac{(n+1)^p}{p!}x^{p-1}}",#for6
r"=\displaystyle\sum_{p=0}^{\infty}{\frac{(n+1)^{p+1}}{(p+1)!}x^{p}}"#for7
]
forr = TexMobject(*forrs).scale(0.9)
self.add(forr)
class Test62(Scene):
'''三角形绕边翻转'''
def construct(self):
tri = Triangle()
vert = tri.get_vertices()
tri.generate_target()
tri.target.flip(axis=vert[0]-vert[1], about_point=(vert[0]+vert[1])/2)
self.add(tri)
self.wait()
self.play(MoveToTarget(tri))
self.wait()
class Test63(Scene):
'''文字渐变色的区别'''
def construct(self):
vg = VGroup(
TextMobject("abcde").set_color([RED, BLUE, WHITE]),
TextMobject("abcde").set_color_by_gradient(RED, BLUE, WHITE),
TextMobject("abcde")
).arrange(DOWN)
vg[2].shuffle(True)
vg[2].set_color_by_gradient(RED, BLUE, WHITE)
self.add(vg)
class Test64(Scene):
'''CubicBezier的points只有四个点,即锚点和控制点,但ParametricFunction是好多贝塞尔曲线,好多点'''
def construct(self):
# line = CubicBezier([np.array([ -3, -1.5, 0]), np.array([-3.6, 1.5, 0]), np.array([ 0, 1.5, 0]), np.array([ 3, -1.5, 0])])
line = ParametricFunction(
bezier([np.array([ -3, -1.5, 0]), np.array([-3.6, 1.5, 0]), np.array([ 0, 1.5, 0]), np.array([ 3, -1.5, 0])]),
t_min=0, t_max=1
)
self.add(line)
points = line.get_points()
debugTeX(self, points)
class Test65(Scene):
'''渐变色的方向,用sheen_direction来设定'''
def construct(self):
sq = Square()
sq.set_color([RED, BLUE])
# sq.set_opacity([0, 1])
# sq.set_fill([RED, BLUE], [0, 1])
sq.set_sheen_direction(UP)
self.add(sq)
# self.wait()
# self.play(sq.flip)
# self.wait()
class Test66(Scene):
'''digest_config的很愚蠢的用法'''
CONFIG = {
"stroke_width": 15,
}
def construct(self):
line = Line()
digest_config(line, self.CONFIG)
self.add(line)
class Test67(Scene):
'''arc的points,用好多贝塞尔曲线来拟合的'''
def construct(self):
arc = Arc().scale(3)
self.add(arc)
points = arc.get_points()
debugTeX(self, points, 0.3)
class Test68(Scene):
def construct(self):
tex = TexMobject("{\\sin\\alpha\\over\\sin\\gamma}={n_1\\over n_2}")
self.add(tex)
debugTeX(self, tex[0])
class Test69(ThreeDScene):
'''无法将三维物体Transform到fixed_in_frame_mobjects的二维物体,但可以通过z_to_vector等变换得到类似的效果'''
def construct(self):
self.set_camera_orientation(phi=60*DEGREES, theta=45*DEGREES)
vec = [
np.cos(45*DEGREES) * np.sin(60*DEGREES),
np.sin(45*DEGREES) * np.sin(60*DEGREES),
np.cos(60*DEGREES)
]
n = z_to_vector(vec)
tex = TexMobject("a").apply_matrix(n).rotate(PI/2, vec)
# self.camera.add_fixed_in_frame_mobjects(tex)
# tex.to_corner(UL)
surface = Cube()
self.add(surface)
self.play(Transform(surface, tex), run_time=2)
self.wait()
class Test70(Scene):
'''无法通过get_points获取TexMobject的点'''
def construct(self):
tex = TexMobject("S").scale(2)
self.add(tex)
p = tex.get_points()
print(p)
class Test71(Scene):
def construct(self):
grid = NumberPlane()
vector = np.array([1, 2, 0])
matrix = np.identity(3) - np.outer(vector, vector)
self.add(grid, Dot([1, 2, 0], color=RED))
self.wait()
self.play(grid.apply_matrix, matrix, run_time=3)
self.wait()
class Test72(Scene):
'''光源'''
def construct(self):
light = AmbientLight()
self.add(light)
class Test73(Scene):
'''running_start的写法是六次贝塞尔曲线'''
def construct(self):
grid = NumberPlane().scale(3)
func = ParametricFunction(
lambda x: [x, running_start(x), 0],
t_min=0, t_max=1
).scale(3, about_point=ORIGIN)
func2 = ParametricFunction(
bezier([
np.array([0/6, 0, 0]),
np.array([1/6, 0, 0]),
np.array([2/6, -0.5, 0]),
np.array([3/6, -0.5, 0]),
np.array([4/6, 1, 0]),
np.array([5/6, 1, 0]),
np.array([6/6, 1, 0]),
]),
t_min=0, t_max=1, color=RED
).scale(3, about_point=ORIGIN)
self.add(grid, func, func2)
class Test74(Scene):
'''幼儿园小练习1'''
CONFIG = {
"camera_config": {
"use_plot_depth": True,
},
}
def setup(self):
self.A = np.array([1, 0, 0])
self.B = np.array([-1, 0, 0])
self.C = np.array([-0.3, 1.3, 0])
self.main_tri = Polygon(
self.A, self.B, self.C,
color=BLUE, fill_color=BLUE, fill_opacity=0.8
)
label_a = TexMobject("a").scale(0.7).next_to((self.B+self.C)/2, UL, buff=0.08)
label_b = TexMobject("b").scale(0.7).next_to((self.A+self.C)/2, UR, buff=0.08)
label_c = TexMobject("c").scale(0.7).next_to((self.B+self.A)/2, DOWN, buff=0.08)
self.labels = VGroup(label_a, label_b, label_c).set_plot_depth(5)
sq_a = Polygon(self.B, self.C, np.array([-1.6, 2, 0]), np.array([-2.3, 0.7, 0]), color=WHITE)
sq_b = Polygon(self.C, self.A, np.array([2.3, 1.3, 0]), np.array([1, 2.6, 0]), color=WHITE)
sq_c = Polygon(self.A, self.B, np.array([-1, -2, 0]), np.array([1, -2, 0]), color=WHITE)
self.sq = VGroup(sq_a, sq_b, sq_c).set_plot_depth(-1)
tri_a = Polygon(self.A, np.array([1, -2, 0]), np.array([2.3, 1.3, 0]), color=RED, fill_color=RED, fill_opacity=0.8)
tri_b = Polygon(self.B, np.array([-2.3, 0.7, 0]), np.array([-1, -2, 0]), color=YELLOW, fill_color=YELLOW, fill_opacity=0.8)
tri_c = Polygon(self.C, np.array([1, 2.6, 0]), np.array([-1.6, 2, 0]), color=GREEN, fill_color=GREEN, fill_opacity=0.8)
self.tri = VGroup(tri_a, tri_b, tri_c)
equation = TexMobject("S_{\\ } = S_{\\ } = S_{\\ } = S_{\\ }").scale(1.5).to_corner(UR, buff=1.1)
tri_1 = self.main_tri.copy().set_stroke(width=0).set_fill(opacity=1).scale(0.2).next_to(equation[0][0], RIGHT+DOWN*3, buff=-0.08)
tri_2 = tri_a.copy().rotate(PI/2).set_stroke(width=0).set_fill(opacity=1).scale(0.2).next_to(equation[0][2], RIGHT+DOWN*3, buff=-0.08)
tri_3 = tri_b.copy().rotate(PI/2, axis=IN).set_stroke(width=0).set_fill(opacity=1).scale(0.2).next_to(equation[0][4], RIGHT+DOWN*3, buff=-0.08)
tri_4 = tri_c.copy().rotate(PI/4, axis=IN).set_stroke(width=0).set_fill(opacity=1).scale(0.2).next_to(equation[0][6], RIGHT+DOWN*3, buff=-0.08)
self.equation = VGroup(equation, tri_1, tri_2, tri_3, tri_4)
# self.add(self.main_tri, self.labels, self.sq, self.tri, equation, tri_1, tri_2, tri_3, tri_4)
def construct(self):
self.wait()
self.play(ShowCreation(self.main_tri))
self.wait()
self.play(FadeIn(self.labels))
self.wait(2)
self.play(*[ShowCreation(i.set_plot_depth(-5)) for i in self.sq], run_time=2)
self.wait()
self.play(*[ShowCreation(i) for i in self.tri], run_time=2)
self.wait()
self.play(
*[
WiggleOutThenIn(i)
for i in self.tri
], run_time=2
)
self.wait(2)
self.play(
FadeOut(self.sq),
Rotating(self.tri[0], radians=PI/2, about_point=self.A),
Rotating(self.tri[1], radians=PI/2, about_point=self.B),
Rotating(self.tri[2], radians=PI/2, about_point=self.C),
run_time=3
)
self.wait()
self.play(
self.main_tri.shift, LEFT*2.5+DOWN,
self.tri.shift, LEFT*2.5+DOWN,
self.labels.shift, LEFT*2.5+DOWN,
)
self.labels.set_plot_depth(6)
self.wait(2)
self.play(
WiggleOutThenIn(self.tri[0]),
WiggleOutThenIn(self.main_tri)
)
self.play(
FadeIn(self.equation[0][0][:3]),
TransformFromCopy(self.main_tri, self.equation[1]),
TransformFromCopy(self.tri[0], self.equation[2]),
run_time=2
)
self.wait(2)
self.play(
WiggleOutThenIn(self.tri[1]),
WiggleOutThenIn(self.main_tri)
)
equation_copy_1 = self.equation[1].copy()
equation_copy_2 = self.equation[1].copy()
self.play(
FadeIn(self.equation[0][0][3:5]),
TransformFromCopy(self.main_tri, equation_copy_1),
TransformFromCopy(self.tri[1], self.equation[3]),
run_time=2
)
self.wait(2)
self.play(
WiggleOutThenIn(self.tri[2]),
WiggleOutThenIn(self.main_tri)
)
self.play(
FadeIn(self.equation[0][0][5:]),
TransformFromCopy(self.main_tri, equation_copy_2),
TransformFromCopy(self.tri[2], self.equation[4]),
run_time=2
)
self.wait(3)
self.play(FadeOut(VGroup(self.equation[0][0][:2], self.equation[1], equation_copy_1, equation_copy_2)))
self.equation[0][0][:2].set_opacity(0)
self.equation[1].set_fill(opacity=0)
self.equation.generate_target()
self.equation.target.scale(1.3).shift(DOWN+LEFT)
self.play(MoveToTarget(self.equation))
self.wait(5)
class Test75(Scene):
'''对坐标轴的非线性变换'''
def construct(self):
grid = ComplexPlane().prepare_for_nonlinear_transform(50)
self.add(grid)
self.wait()
self.play(
grid.apply_function,
lambda point: complex_to_R3(R3_to_complex(point)**2),
run_time=5
)
self.wait()
class Test76(Scene):
'''交换点的顺序实现五角星'''
def construct(self):
poly = RegularPolygon(5)
verts = poly.get_vertices()
poly2 = Polygon(verts[0], verts[2], verts[4], verts[1], verts[3]).set_fill(BLUE, opacity=0.5)
self.add(poly2)
debugTeX(self, verts)
class Test77(Scene):
'''对Imageset_color,所有rgb均替换为指定颜色,但保留alpha'''
def construct(self):
image = ImageMobject("GZTime.png").set_color(RED)
self.add(image)
class MyTransform(Animation):
'''继承Animation类,自定义动画,用于下一个场景'''
CONFIG = {
"radians": PI/2,
"axis": OUT,
"about_point": None,
"remover": True,
}
def __init__(self, mobject, target, **kwargs):
digest_config(self, kwargs)
self.mobject = mobject.copy()
self.target = target
def clean_up_from_scene(self, scene):
if self.is_remover():
scene.remove(self.mobject)
scene.add(self.target)
def interpolate_mobject(self, alpha):
now = self.starting_mobject.copy()
now.rotate(
alpha * self.radians,
axis=self.axis,
about_point=self.about_point,
)
for i in range(3):
now[i].set_color(interpolate_color(self.starting_mobject[i].get_color(), self.target[i].get_color(), alpha))
self.mobject.become(now)
class Test78(Scene):
'''logo的一种动画方案'''
def construct(self):
logo1 = VGroup(
Polygon(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 2, 0])),
Polygon(np.array([1.5, 0, 0]), np.array([3, 3, 0]), np.array([0, 3, 0])),
Polygon(np.array([2, 0, 0]), np.array([3, 0, 0]), np.array([3, 2, 0])),
).set_stroke(width=0).center()
logo1[0].set_fill(WHITE, 1)
logo1[1].set_fill(BLUE_B, 1)
logo1[2].set_fill(BLUE_C, 1)
logo1.move_to(np.array([2.5, 1, 0]))
logo2 = logo1.copy().rotate(PI/2, about_point=ORIGIN)
logo3 = logo2.copy().rotate(PI/2, about_point=ORIGIN)
logo4 = logo3.copy().rotate(PI/2, about_point=ORIGIN)
logo = VGroup(logo1, logo2, logo3, logo4).scale(1/3)
logo[0][1].set_fill("#C59978", 1)
logo[0][2].set_fill("#8D5630", 1)
text = VGroup(
Text("Manim", font="Nexa Bold"),
Text("Kindergarten", font="Nexa Bold")
).arrange(DOWN, aligned_edge=LEFT, buff=0.3).set_height(2).next_to(logo, buff=0.8).shift(DOWN*0.2)
all_logo = VGroup(logo, text).center()
bg = Rectangle(height=10, width=10, fill_color=BLACK, fill_opacity=1, stroke_width=0)
bg.add_updater(lambda m: m.move_to(logo, aligned_edge=RIGHT))
text.save_state()
text.shift((text.get_right()[0]-bg.get_right()[0]+0.2)*LEFT)
logo.save_state()
logo.center().scale(1.5)
self.add(text, bg)
self.play(FadeIn(logo[0]))
self.wait()
for i in range(3):
self.play(MyTransform(logo[i], logo[i+1], about_point=logo.get_center()), run_time=0.25, rate_func=smooth)
self.wait(2)
self.play(
text.restore, logo.restore,
rate_func=smooth, run_time=1
)
self.wait()
class Test79(Scene):
'''逐字上颜色'''
def construct(self):
tex = TextMobject("text or object")
self.add(tex)
self.wait(0.5)
for letter in tex:
self.play(
LaggedStart(
*[
ApplyMethod(i.set_color, YELLOW)
for i in letter
],
run_time=2
)
)
self.wait()
class Test80(Scene):
'''rate_func的细节效果'''
def construct(self):
dot = Dot()
self.add(dot)
self.wait()
self.play(dot.shift, RIGHT*3, rate_func=func, run_time=2)
self.wait()
class Test81(Scene):
'''白底logo的配色方案'''
CONFIG = {
"camera_config": {
"background_color": WHITE,
},
}
def construct(self):
logo1 = VGroup(
Polygon(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 2, 0])),
Polygon(np.array([1.5, 0, 0]), np.array([3, 3, 0]), np.array([0, 3, 0])),
Polygon(np.array([2, 0, 0]), np.array([3, 0, 0]), np.array([3, 2, 0])),
).set_stroke(width=0).center()
logo1[0].set_fill("#cccccc", 1)
logo1[1].set_fill(BLUE_D, 1)
logo1[2].set_fill(BLUE_E, 1)
logo1.move_to(np.array([2.5, 1, 0]))
logo2 = logo1.copy().rotate(PI/2, about_point=ORIGIN)
logo3 = logo2.copy().rotate(PI/2, about_point=ORIGIN)
logo4 = logo3.copy().rotate(PI/2, about_point=ORIGIN)
logo = VGroup(logo1, logo2, logo3, logo4).scale(0.7).center()
logo[0][1].set_fill("#C59978", 1)
logo[0][2].set_fill("#8D5630", 1)
self.add(logo)
class Test82(Scene):
def construct(self):
tex = TextMobject("ab")
self.add(tex)
class Test83(LogoGenerationTemplate):
'''3B1B的logo动效,并不是想要的效果'''
CONFIG = {
"random_seed": 2,
}
def get_logo_animations(self, logo):
layers = logo.spike_layers
for layer in layers:
random.shuffle(layer.submobjects)
for spike in layer:
spike.save_state()
spike.scale(0.5)
spike.apply_complex_function(np.log)
spike.rotate(-90 * DEGREES, about_point=ORIGIN)
spike.set_fill(opacity=0)
return [
FadeIn(
logo.iris_background,
rate_func=squish_rate_func(smooth, 0.25, 1),
run_time=3,
),
AnimationGroup(*[
LaggedStartMap(
Restore, layer,
run_time=3,
path_arc=180 * DEGREES,
rate_func=squish_rate_func(smooth, a / 3.0, (a + 0.9) / 3.0),
lag_ratio=0.8,
)
for layer, a in zip(layers, [0, 2, 1, 0])
]),
Animation(logo.pupil),
]
class Test84(Scene):
'''坐标系非线性复变换'''
def construct(self):
grid = ComplexPlane().prepare_for_nonlinear_transform(50)
self.add(grid)
self.wait()
self.play(grid.apply_complex_function, np.exp, run_time=3, rate_func=linear)
self.wait()
class Test85(Scene):
'''由Line+VGroup拼成的多边形无法上色'''
def construct(self):
vg = VGroup(
Line(ORIGIN, RIGHT),
Line(RIGHT, UP),
Line(UP, ORIGIN)
).set_fill(BLUE, 1)
self.add(vg)
class Test86(Scene):
'''PointCouldDot的细节,有一个个像素点构成的点'''
def construct(self):
test = PointCloudDot().scale(30)
self.add(test)
class Test87(Scene):
'''无法用Polygon表示折线,因为Polygon强制首尾相接'''
def construct(self):
lines = Polygon(ORIGIN, UP, RIGHT)
self.add(lines)
class Lines(VMobject):
'''利用set_points_as_corner实现的折线类'''
def __init__(self, *points, **kwargs):
VMobject.__init__(self, **kwargs)
self.set_points_as_corners(points)
class Test88(Scene):
'''上面的折线类和VGroup+Line构造的折线的ShowCreation效果相同'''
def construct(self):
# lines = Lines(ORIGIN, UP, RIGHT)
lines = VGroup(
Line(ORIGIN, UP),
Line(UP, RIGHT)
)
self.play(ShowCreation(lines))
class Test89(Scene):
'''测试PMobject,用于画点,stroke_width表示点大小'''
def construct(self):
points = PMobject(stroke_width=1080)
points.add_points([ORIGIN], color=BLUE)
self.add(points)
class Test90(Scene):
'''mk的一次作业,测试包络线'''
CONFIG = {
"camera_config": {
"background_color": WHITE,
"use_plot_depth": True,
},
}
def construct(self):
circle = Circle(radius = 3, color = DARK_BLUE, plot_depth=3).flip()
center = Dot(color=GREEN)
A = Dot(np.array([-2, 0, 0]), color = RED)
alpha = ValueTracker(0.0001)
B = Dot(color=BLUE, radius=0.07, plot_depth=4)
B.add_updater(lambda m: m.move_to(circle.point_from_proportion(alpha.get_value())))
line1 = DashedLine(A.get_center(), B.get_center(), color=DARK_BROWN)
line1.add_updater(lambda m: m.put_start_and_end_on(A.get_center(), B.get_center()))
C = Dot(color=BLUE, radius=0.07, plot_depth=4)
C.add_updater(lambda m: m.move_to(circle.point_from_proportion(alpha.get_value())).flip(axis=B.get_center()-A.get_center(), about_point=ORIGIN))
line2 = Line(B.get_center(), C.get_center(), color=ORANGE, stroke_width=3)
line2.add_updater(lambda m: m.put_start_and_end_on(B.get_center(), C.get_center()))
trace = VGroup()
self.i = 0
def update_trace(m):
self.i += 1
if self.i % 4 == 0:
m.add(line2.copy().clear_updaters())
self.wait(3)
self.play(ShowCreation(circle), ShowCreation(center))
self.wait()
self.play(ShowCreation(A))
alpha.set_value(0.2)
self.play(ShowCreation(B))
self.play(alpha.increment_value, 0.6, run_time=1.5)
self.play(alpha.increment_value, -0.6, run_time=1.6)
self.play(ShowCreation(line1))
self.wait()
ra = Right_angle(corner=B.get_center(), on_the_right=False, stroke_color=BLUE)
ra.move_corner_to(B.get_center())
ra.change_angle_to(line1.get_angle()+PI/2)
self.play(ShowCreation(C), ShowCreation(line2), ShowCreation(ra))
self.wait(2)
self.play(FadeOut(ra))
self.play(alpha.increment_value, 0.6, run_time=1.5)
self.play(alpha.increment_value, -0.7999, run_time=2, rate_func=linear)
self.wait()
self.add(trace)
line2.set_stroke(width=2)
self.wait(2)
trace.add_updater(update_trace)
alpha.set_value(0)
anim = ApplyMethod(alpha.increment_value, 1, run_time=8, rate_func=linear)
self.play(anim)
self.wait(2)
ellipse = Ellipse(width=6, height=2*np.sqrt(5), color=GREEN, plot_depth=10, run_time=2.5)
self.play(ShowCreationThenDestruction(ellipse))
self.wait(5)
class Test91(Scene):
'''tex上色后会拆开'''
def construct(self):
tex = TexMobject("abcdefghijk")
VGroup(tex[0][:2], tex[0][3:5]).set_color(RED)
self.add(tex)
self.wait()
tex2 = VGroup(tex[0][2], tex[0][5])
self.play(tex2.set_color, BLUE)
self.wait(2)
self.remove(*tex[0])
self.wait(2)
class Test92(Scene):
'''测试shift多参数'''
def construct(self):
plane = NumberPlane()
dot = Dot().shift(RIGHT, UP, LEFT)
self.add(plane, dot)
class Test93(Scene):
'''测试切线,适用于所有带路径的,包括文字'''
def construct(self):
circle = Circle()
text = SingleStringTexMobject("j").scale(8)
tl = TangentLine(text[0], 0, length=5, stroke_width=1, color=BLUE)
value = ValueTracker(0)
tl.add_updater(
lambda m: m.become(
TangentLine(text[0], value.get_value(), length=5, stroke_wodth=1, color=BLUE)
)
)
self.add(text, tl)
self.wait()
self.play(value.increment_value, 1, run_time=10, rate_func=linear)
self.wait()
class Test94(ThreeDScene):
'''3D移动相机中心,但是好像没有动画效果'''
def construct(self):
self.set_to_default_angled_camera_orientation()
cube = Cube()
axes = ThreeDAxes()
self.add(axes, cube)
self.wait()
self.play(self.camera.frame_center.move_to, LEFT*2, run_time=3)
self.wait()
class Test95(Scene):
'''修Text的bug时用的,现在应该不好使了'''
def construct(self):
text = Text("abcdefghijklmnopqrstuvwkyz", font="庞门正道标题体", fill_opacity=0, debug=True).scale(1).set_stroke(width=5, opacity=1)
diff = VGroup(
Text("庞门正道标题体", font="庞门正道标题体", fill_opacity=0).scale(1).set_stroke(width=5, opacity=1),
Text("庞门正道标题体", font="庞门正道标题体", fill_opacity=0, debug=True).scale(1).set_stroke(width=5, opacity=1)
).arrange(DOWN)
# points = text[0].get_points()
# print(points)
self.add(diff)
# debugTeX(self, points, 0.2)
class Test96(Scene):
'''修Text的bug时用的,现在应该不好使了'''
def construct(self):
text = Text("a b", font="庞门正道标题体", debug=True).scale(3).shift(UP*2)
text3 = Text("abcd", font="庞门正道标题体", debug=True).scale(3).shift(DOWN*2)
text4 = Text("啦 啦 啦", font="庞门正道标题体", debug=True).scale(3).shift(UP*2)
dot = Dot(ORIGIN, color=BLUE)
self.add(dot)
self.wait()
self.play(Write(text))
self.wait(2)
self.play(Transform(text, text3))
self.wait(2)
self.play(Transform(text, text4))
self.wait(3)
class Test97(Scene):
'''修Text的bug时用的,现在应该不好使了'''
def construct(self):
text = VGroup(
Text("manim", font="庞门正道标题体", debug=True).set_stroke(width=15, opacity=0.5),
Text("manim", font="庞门正道标题体", debug=True, fill_opacity=0).set_stroke(width=5)
).scale(8).arrange(DOWN)
self.add(text)
for i in range(5):
points = text[1][i].get_points()
debugTeX(self, points)
class Test98(Scene):
'''修Text的bug时用的,现在应该不好使了'''
def construct(self):
text = VGroup(
Text("a b", font="庞门正道标题体", debug=False).scale(3).shift(UP*1.5),
Text("a b", font="庞门正道标题体").scale(3).shift(DOWN*1.5),
)
comment = VGroup(
Text("before:", font="Consolas").scale(2).next_to(text[0], LEFT, buff=1),
Text("after:", font="Consolas").scale(2).next_to(text[1], LEFT, buff=1),
)
self.add(text, comment)
for i in text:
debugTeX(self, i, 0.8)
class Test99(Scene):
'''修Text的bug时用的,现在应该不好使了'''
def construct(self):
title = Text("default size compare", font="Consolas", color=BLUE).scale(1.5).shift(UP*2)
text = VGroup(
VGroup(
Text("before", font="Consolas").scale(2),
TextMobject("before"),
).arrange(RIGHT),
VGroup(
Text("after", font="Consolas"),
TextMobject("after"),
).arrange(RIGHT),
).arrange(DOWN, buff=1)
self.add(text, title)
class Test100(Scene):
'''测试ImageMobject导入gif,只保留第一帧,无动图'''
def construct(self):
img = ImageMobject("Test96.gif")
self.add(img)
self.wait(5)
class Test101(Scene):
'''试验黑背景遮罩'''
CONFIG = {
"reverse_order": False,
}
def construct(self):
img = ImageMobject("latexlive.png", height=8)
self.add(img)
rects = VGroup(*[Rectangle() for x in range(2)])
rects.set_stroke(width=0)
rects.set_fill(GREY, 0.5)
rects.set_height(2.2, stretch=True)
rects.set_width(7.4, stretch=True)
rects[0].move_to(DOWN*0.1)
rects[1].set_height(1.5, stretch=True)
rects[1].set_width(3, stretch=True)
rects[1].move_to(DOWN*2.75)
inv_rects = VGroup()
for rect in rects:
fsr = FullScreenFadeRectangle()
fsr.append_points(rect.points[::-1])
inv_rects.add(fsr)
inv_rects.set_fill(BLACK, 0.7)
self.wait(2)
self.play(VFadeIn(inv_rects[0]))
self.wait(2)
self.play(Transform(inv_rects[0], inv_rects[1]))
self.wait(2)
self.play(VFadeOut(inv_rects[0]))
self.wait(2)
class Test102(Scene):
'''同大小Image的Transform'''
def construct(self):
img1 = ImageMobject("latexlive.png", height=8)
img2 = ImageMobject("latexhelp.png", height=8)
self.add(img1)
self.wait(2)
self.play(
Transform(img1, img2), run_time=2
)
self.wait(2)
class Test103(Scene):
'''测试Code'''
def construct(self):
helloworldcpp = Code(
"helloworldcpp.cpp",
tab_width=4,
insert_line_no=True,
style="autumn",
background_stroke_color=BLACK,
background="window",
language="cpp",
)
self.add(helloworldcpp)
class Test104(Scene):
'''修Text的bug时用的'''
def construct(self):
text1 = Text(" ab\ncd", font="Consolas", size=2)
text2 = Text("ef\ngh", font="Consolas", size=2)
self.add(text1)
self.wait()
self.play(Transform(text1, text2))
self.wait()
class Test105(Scene):
'''修Text的bug时用的'''
def construct(self):
text = Text(" ab\ncd\nef", font="Consolas", size=2)
text2 = Text("ab\n cd\nef", font="Consolas", size=2)
text[2].set_color(YELLOW)
self.add(text)
self.wait()
self.play(Transform(text, text2))
self.wait()
debugTeX(self, text)
class Test106(Scene):
'''https://github.com/3b1b/manim/pull/1072'''
CONFIG = {
"camera_config": {
"background_color": WHITE,
},
}
def construct(self):
plane = NumberPlane(axis_config={"stroke_color": BLACK})
plane.add_coordinates(number_config={"color": BLACK})
self.add(plane)
class Test107(Scene):
'''临时做的一张图'''
CONFIG = {
"camera_config": {
"background_color": WHITE,
},
}
def construct(self):
main = Text("粉丝问答", font="思源黑体 CN Heavy", color=BLACK).set_width(6)
comment = Text("(凑够9张图)", font="思源黑体 CN Light", color=BLUE_D)
comment.next_to(main, DOWN)
self.add(main, comment)
class Test108(Scene):
'''https://github.com/3b1b/manim/issues/1095'''
CONFIG = {
"v_coord_strings" : ["-1", "2"],
}
def construct(self):
rule = TexMobject(
"\\text{Transformed}\\vec{\\textbf{v}}",
" = %s"%self.v_coord_strings[0],
"(\\text{Transformed}\\hat{\\imath})",
"+%s"%self.v_coord_strings[1],
"(\\text{Transformed}\\hat{\\jmath})",
)
self.add(rule)
class Test109(Scene):
'''生成README中的头图'''
CONFIG = {
"camera_config": {
"background_color": WHITE,
},
}
def construct(self):
logo = Logo(black_bg=False).set_height(2)
img = ImageMobject("Tony.png").set_height(2)
Group(logo, img).arrange(RIGHT, buff=1.5).center()
line = Line(UP, DOWN, stroke_width=8, color=BLACK).move_to(mid(logo.get_right(), img.get_left()))
line.set_length(1.4)
text = VGroup(
Text("Manim-Kindergarten", font="Orbitron", color=DARK_GRAY),
Text("鹤翔万里", font="庞门正道标题体", color=BLACK, size=2.3)
).arrange(DOWN, aligned_edge=LEFT, buff=0.1).next_to(img, buff=0.5)
text[0][0].set_color(logo.color_2[2])
text[0][6].set_color(logo.color_1[2])
self.add(logo, img, line, text)
Group(*self.mobjects).center()
class Test110(Scene):
"""给cigar的头图,效果不太好"""
CONFIG = {
"camera_config": {
"background_color": WHITE,
},
}
def construct(self):
logo = Logo(black_bg=False).set_height(2)
img = ImageMobject("cigar.png").set_height(2)
Group(logo, img).arrange(RIGHT, buff=1.5).center()
line = Line(UP, DOWN, stroke_width=8, color=BLACK).move_to(mid(logo.get_right(), img.get_left()))
line.set_length(1.4)
text = VGroup(
Text("Manim-Kindergarten", font="Orbitron", color=DARK_GRAY),
Text("cigar666", font="庞门正道标题体", color=BLACK, size=2.1)
).arrange(DOWN, aligned_edge=LEFT, buff=0.1).next_to(img, buff=0.5)
text[0][0].set_color(logo.color_2[2])
text[0][6].set_color(logo.color_1[2])
self.add(logo, img, line, text)
Group(*self.mobjects).center()
class Test111(Scene):
"""Fade和VFade的区别"""
def construct(self):
sq = VGroup(
Square(stroke_width=15, color=RED, opacity=0.5, fill_opacity=0.8),
Square(stroke_width=15, color=RED, opacity=0.5, fill_opacity=0.8),
).arrange(RIGHT, buff=1)
texts = VGroup(
Text("FadeIn", font="Consolas", size=1.3).next_to(sq[0], DOWN),
Text("VFadeIn", font="Consolas", size=1.3).next_to(sq[1], DOWN),
)
text = VGroup(
Text("FadeOut", font="Consolas", size=1.3).next_to(sq[0], DOWN),
Text("VFadeOut", font="Consolas", size=1.3).next_to(sq[1], DOWN),
)
self.add(texts)
self.wait()
self.play(
FadeIn(sq[0]),
VFadeIn(sq[1]),
run_time=3
)
self.wait()
self.play(Transform(texts[0], text[0]), Transform(texts[1], text[1]))
self.wait()
self.play(
FadeOut(sq[0]),
VFadeOut(sq[1]),
run_time=3
)
self.wait()
class Test112(Scene):
"""给VGroup用for循环施加updater,需要转换全局变量i为局部变量n"""
def construct(self):
ups = VGroup(
*[
Dot(color=BLUE).move_to([i, 1, 0])
for i in range(-3, 4)
]
)
downs = VGroup(
*[
Dot(color=YELLOW).move_to([i, -1, 0])
for i in range(-3, 4)
]
)
lines = VGroup(
*[
Line(ups[i], downs[i])
for i in range(0, 7)
]
)
for i in range(7):
lines[i].add_updater(lambda m, n=i: m.put_start_and_end_on(ups[n].get_bottom(), downs[n].get_top()))
self.add(ups, downs, lines)
self.wait()
self.play(
ups.shift, LEFT * 2
)
self.play(
downs.shift, RIGHT * 2
)
self.wait()
class Test113(Scene):
def construct(self):
svg = SVGMobject("afed61182cb6d368.svg")
self.add(svg)
class Test114(Scene):
"""字母也能做切线"""
def construct(self):
ratio = ValueTracker(0)
text = TexMobject("S", fill_opacity=0, stroke_width=2).set_height(7)
point = Dot().add_updater(
lambda m: m.move_to(text[0][0].point_from_proportion(ratio.get_value()))
)
self.add(text, point)
self.wait()
self.play(ratio.set_value, 1, run_time=3, rate_func=linear)
self.wait()
class Test115(Scene):
"""Write的具体细节"""
def construct(self):
text = TextMobject("+").set_height(5)
self.wait()
progress = NumberLine(x_min=0, x_max=1, unit_size=10, tick_frequency=0.5).center().to_edge(DOWN)
alpha = ValueTracker(0)
tick = Triangle(fill_opacity=1).scale(0.2).rotate(PI)
tick.add_updater(lambda m: m.move_to(progress.n2p(alpha.get_value()), aligned_edge=DOWN))
self.add(progress, tick)
self.play(Write(text, stroke_width=30), alpha.set_value, 1, run_time=5, rate_func=linear)
self.wait()
class Test116(Scene):
def construct(self):
test = ParametricFunction(
lambda t: np.array([
2*np.sin(3*t)*np.cos(t),
2*np.sin(3*t)*np.sin(t),
0
]),
t_min=0, t_max=2*PI,
)
debugTeX(self, test.points)
self.add(test)
class Test117(Scene):
def construct(self):
grid = NumberPlane(
center_point=LEFT*3,
x_max=15
)
self.add(grid)
class Test118(Scene):
"""好像是测试妈咪叔的latexlive写的"""
def construct(self):
text = TextMobject("\\begin{equation} x = a_0 \+ \\cfrac{1}{a_1 \+ \\cfrac{1}{a_2 \+ \\cfrac{1}{a_3 \+ \\cfrac{1}{a_4} } } }\\end{equation}")
self.add(text)
class Test119(Scene):
"""mk的logo"""
def construct(self):
logo = Logo().set_height(8)
self.add(logo)
class Test120(Scene):
"""MaintainPositionRelativeTo"""
def construct(self):
dot = Dot()
circle = Circle()
triangle = Triangle()
self.play(
Write(circle),
Write(dot),
Write(triangle),
run_time=3
)
self.play(
dot.shift, UP,
MaintainPositionRelativeTo(triangle, dot)
)
class Test121(Scene):
"""VectorizedPoint不会被显示"""
def construct(self):
v = VectorizedPoint([1, 1, 0], stroke_width=10, stroke_opacity=1, fill_opacity=0, color=YELLOW, artificial_width=10, artificial_height=10)
self.add(v)
class Test122(Scene):
"""StreamLines"""
def construct(self):
sl = StreamLines(
lambda p: rotate_vector(p / 3, 90 * DEGREES), color_by_magnitude=True, color_by_arc_length=False
)
self.add(sl)
self.wait()
self.play(ShowPassingFlashWithThinningStrokeWidth(sl))
self.wait()
class Test123(Scene):
"""和Test112类似"""
def construct(self):
heights = [2, 3, 5, 7, 9]
trackers = [ValueTracker(h) for h in heights]
num_tex = [DecimalNumber(t.get_value())\
.add_updater(lambda v, x=t: v.set_value(x.get_value()))\
for t in trackers
]
for i in range(len(num_tex)):
tex = num_tex[i]
tex.shift(i*RIGHT)
self.play(Write(tex))
class Test124(Scene):
"""discord上有个人问的动画"""
def construct(self):
tex = TexMobject("f(n)+f(n)=2f(n)")
self.wait()
self.play(Write(tex[0][:9]))
self.play(Write(tex[0][9]))
self.wait()
self.play(
TransformFromCopy(tex[0][:4], tex[0][-4:]),
TransformFromCopy(tex[0][5:9], tex[0][-4:].copy()),
FadeInFrom(tex[0][-5], RIGHT)
)
self.wait()
class Test125(Scene):
"""同上"""
def construct(self):
tex = TexMobject("f(n)+f(n)")
two = TexMobject("2").next_to(tex, LEFT, buff=0.02)
self.wait()
self.play(Write(tex))
self.wait()
self.play(
Transform(tex[0][5:], tex[0][:4].copy()),
FadeOut(tex[0][4]),
Write(two)
)
self.wait()
class Test126(Scene):
"""给LaTeX改字体,失败了qwq"""
def construct(self):
text = TextMobject("测试字体test")
self.add(text)
class Test127(Scene):
"""和Axes有关的测试"""
def construct(self):
axes = Axes(
x_min=-14, x_max=14,
y_min=-8, y_max=8,
number_line_config={
"unit_size": 0.5,
"tick_frequency": 2,
}
)
axes.add_coordinates(
[-6, -4, -2, 2, 4, 6],
[-4, -2, 2, 4]
)
graph = VGroup()
graph.add(axes.get_graph(lambda x: -(np.exp(-x)-3), color=RED, x_min=-2.9))
graph.add(axes.get_graph(lambda x: x, color=PURPLE))
graph.add(axes.get_graph(lambda x: -np.log(3-x), color=BLUE, step_size=0.001))
self.add(axes, graph)
class Test128(Scene):
"""给action_renderer项目做的头图"""
CONFIG = {
"camera_config": {
"background_color": WHITE,
},
}
def construct(self):
logo = Logo(black_bg=False).set_height(2)
img = ImageMobject("action.png").set_height(2)
Group(logo, img).arrange(RIGHT, buff=1.3).center()
line = Line(UP, DOWN, stroke_width=8, color=BLACK).move_to(mid(logo.get_right(), img.get_left()))
line.set_length(1.4)
text = VGroup(
Text("Manim-Kindergarten", font="Orbitron", color=DARK_GRAY),
Text("manim_action_renderer", font="庞门正道标题体", color=BLACK, size=1.4)
).arrange(DOWN, aligned_edge=LEFT, buff=0.25).next_to(img, buff=0.5)
text[0][0].set_color(logo.color_2[2])
text[0][6].set_color(logo.color_1[2])
self.add(logo, img, line, text)
Group(*self.mobjects).center()
class Test129(Scene):
"""updater中特判点重合"""
def construct(self):
A = Dot(3 * RIGHT)
B = Dot(2 * RIGHT)
label_A = TexMobject('A').next_to(A, DOWN, buff=SMALL_BUFF)
label_B = TexMobject('B').next_to(B, DOWN, buff=SMALL_BUFF)
m = -5*RIGHT
C = A.copy()
l = Line(A.get_center(), C.get_center())
label_C = TexMobject('C').next_to(C, UP, buff=SMALL_BUFF)
label_C.add_updater(lambda m: m.next_to(C, UP, buff=SMALL_BUFF))
def up_loca(mbj):
if all(A.get_center() == C.get_center()):
pass
else:
mbj.put_start_and_end_on(A.get_center(), C.get_center())
l.add_updater(up_loca)
self.add(A, B, label_A, label_B, label_C, l)
self.play(C.move_to, m, run_time=5)
| 32.584906
| 170
| 0.53896
|
08cb4c8064d8360c27a8321c672012078a2b889f
| 4,013
|
py
|
Python
|
virtool/jobs/create_sample.py
|
jinxuchen/virtool
|
5a6e48fab986d77ad76d5583fa560798babe3f25
|
[
"MIT"
] | null | null | null |
virtool/jobs/create_sample.py
|
jinxuchen/virtool
|
5a6e48fab986d77ad76d5583fa560798babe3f25
|
[
"MIT"
] | null | null | null |
virtool/jobs/create_sample.py
|
jinxuchen/virtool
|
5a6e48fab986d77ad76d5583fa560798babe3f25
|
[
"MIT"
] | null | null | null |
import os
import shutil
import virtool.files.db
import virtool.samples.db
import virtool.jobs.fastqc
import virtool.jobs.job
import virtool.jobs.utils
import virtool.samples.utils
import virtool.utils
class Job(virtool.jobs.job.Job):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#: The ordered list of :ref:`stage methods <stage-methods>` that are called by the job.
self._stage_list = [
self.make_sample_dir,
self.copy_files,
self.fastqc,
self.parse_fastqc,
self.clean_watch
]
def check_db(self):
self.params = virtool.jobs.utils.get_sample_params(
self.db,
self.settings,
self.task_args
)
def make_sample_dir(self):
"""
Make a data directory for the sample and a subdirectory for analyses. Read files, quality data from FastQC, and
analysis data will be stored here.
"""
try:
os.makedirs(self.params["sample_path"])
os.makedirs(self.params["analysis_path"])
os.makedirs(self.params["fastqc_path"])
except OSError:
# If the path already exists, remove it and try again.
shutil.rmtree(self.params["sample_path"])
self.make_sample_dir()
def copy_files(self):
"""
Copy the files from the files directory to the nascent sample directory.
"""
files = self.params["files"]
sample_id = self.params["sample_id"]
paths = [os.path.join(self.settings["data_path"], "files", file["id"]) for file in files]
sizes = virtool.jobs.utils.copy_files_to_sample(
paths,
self.params["sample_path"],
self.proc
)
raw = list()
for index, file in enumerate(files):
name = f"reads_{index + 1}.fq.gz"
raw.append({
"name": name,
"download_url": f"/download/samples/{sample_id}/{name}",
"size": sizes[index],
"from": file,
"raw": True
})
self.db.samples.update_one({"_id": sample_id}, {
"$set": {
"files": raw
}
})
def fastqc(self):
"""
Runs FastQC on the renamed, trimmed read files.
"""
read_paths = virtool.samples.utils.join_read_paths(self.params["sample_path"], self.params["paired"])
virtool.jobs.fastqc.run_fastqc(
self.run_subprocess,
self.proc,
read_paths,
self.params["fastqc_path"]
)
def parse_fastqc(self):
"""
Capture the desired data from the FastQC output. The data is added to the samples database
in the main run() method
"""
qc = virtool.jobs.fastqc.parse_fastqc(self.params["fastqc_path"], self.params["sample_path"])
self.db.samples.update_one({"_id": self.params["sample_id"]}, {
"$set": {
"quality": qc,
"imported": False
}
})
self.dispatch("samples", "update", [self.params["sample_id"]])
def clean_watch(self):
""" Remove the original read files from the files directory """
self.db.files.delete_many({"_id": {"$in": self.params["files"]}})
self.dispatch("files", "delete", self.params["files"])
def cleanup(self):
for file_id in self.params["files"]:
self.db.files.update_many({"_id": file_id}, {
"$set": {
"reserved": False
}
})
self.dispatch("files", "update", self.params["files"])
try:
shutil.rmtree(self.params["sample_path"])
except FileNotFoundError:
pass
self.db.samples.delete_one({"_id": self.params["sample_id"]})
self.dispatch("samples", "delete", [self.params["sample_id"]])
| 29.07971
| 119
| 0.552455
|
a9ab94c5cb1f497cd06a0d2b775b38b37eb47da0
| 2,549
|
py
|
Python
|
evaluation/gekko/statistics.py
|
mczero80/japonicus
|
d183f24a7e1d0e52052f4c6e5e82604d9e7700d3
|
[
"MIT"
] | 229
|
2018-01-05T13:32:52.000Z
|
2021-12-18T00:57:49.000Z
|
evaluation/gekko/statistics.py
|
mczero80/japonicus
|
d183f24a7e1d0e52052f4c6e5e82604d9e7700d3
|
[
"MIT"
] | 142
|
2018-01-04T23:39:28.000Z
|
2019-12-14T16:38:24.000Z
|
evaluation/gekko/statistics.py
|
mczero80/japonicus
|
d183f24a7e1d0e52052f4c6e5e82604d9e7700d3
|
[
"MIT"
] | 95
|
2018-01-06T05:35:23.000Z
|
2021-12-13T16:42:22.000Z
|
#!/bin/python
from deap import tools
import numpy as np
epochStatisticsNames = {
'avg': 'Average profit',
'std': 'Profit variation',
'min': 'Minimum profit',
'max': 'Maximum profit',
'size': 'Population size',
'maxsize': 'Max population size',
'avgTrades': 'Avg trade number',
'sharpe': 'Avg sharpe ratio',
'avgExposure': "Avg exposure time",
'nbElderDies': 'Elder dies count'
}
periodicStatisticsNames = {
'evaluationScore': "Evaluation Score",
'evaluationScoreOnSecondary': "Score on Secondary Dataset"
}
def compileStats(locale):
# --get proper evolution statistics;
Stats = locale.stats.compile(locale.population)
Stats['dateRange'] = ' '.join([DR.textDaterange()
for DR in locale.Dataset])\
if not locale.EPOCH else None
Stats['maxsize'] = locale.POP_SIZE
Stats['size'] = len(locale.population)
Stats['avgTrades'] = locale.extraStats['avgTrades']
Stats['avgExposure'] = locale.extraStats['avgExposure']
#Stats['nbElderDies'] = locale.extraStats['nbElderDies']
Stats['sharpe'] = np.mean([x.fitness.values[1] for x in locale.population])
Stats['evaluationScoreOnSecondary'] = locale.lastEvaluationOnSecondary
Stats['evaluationScore'] = locale.lastEvaluation
locale.lastEvaluationOnSecondary = None
locale.lastEvaluation = None
Stats['id'] = locale.EPOCH
locale.EvolutionStatistics.append(Stats)
locale.World.logger.write_evolution_logs(
locale.EPOCH, locale.EvolutionStatistics, locale.name
)
def showStatistics(locale):
# show information;
Stats = locale.EvolutionStatistics[locale.EPOCH]
print("EPOCH %i\t&%i" % (locale.EPOCH, locale.extraStats['nb_evaluated']))
statnames = ['max', 'avg', 'min',
'std', 'size', 'maxsize',
'avgTrades', 'sharpe', 'avgExposure',
# 'nbElderDies'
]
statisticsText = []
for s in range(len(statnames)):
SNAME = statnames[s]
SVAL = Stats[SNAME]
currentStatisticsText = "%s" % epochStatisticsNames[SNAME]
if not SVAL % 1:
currentStatisticsText += " %i" % SVAL
else:
currentStatisticsText += " %.3f" % SVAL
statisticsText.append(currentStatisticsText)
columnWidth = max([len(STXT) for STXT in statisticsText]) + 3
for j in range(0, len(statisticsText), 2):
print(''.join(word.ljust(columnWidth) for word in statisticsText[j:j+2]))
print()
| 34.445946
| 81
| 0.635151
|
f34623c6dadd4553f2cf87709c2d932fca3c52ef
| 13,521
|
py
|
Python
|
python/ray/serve/api.py
|
hekaisheng/ray
|
05fe6dc278126352e830a21b408027bd7a8cfd78
|
[
"Apache-2.0"
] | 1
|
2021-04-08T12:02:58.000Z
|
2021-04-08T12:02:58.000Z
|
python/ray/serve/api.py
|
hekaisheng/ray
|
05fe6dc278126352e830a21b408027bd7a8cfd78
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serve/api.py
|
hekaisheng/ray
|
05fe6dc278126352e830a21b408027bd7a8cfd78
|
[
"Apache-2.0"
] | null | null | null |
from functools import wraps
import ray
from ray.serve.constants import (DEFAULT_HTTP_HOST, DEFAULT_HTTP_PORT,
SERVE_CONTROLLER_NAME, HTTP_PROXY_TIMEOUT)
from ray.serve.controller import ServeController
from ray.serve.handle import RayServeHandle
from ray.serve.utils import (block_until_http_ready, format_actor_name)
from ray.serve.exceptions import RayServeException
from ray.serve.config import BackendConfig, ReplicaConfig
from ray.actor import ActorHandle
from typing import Any, Callable, Dict, List, Optional, Type, Union
controller = None
def _get_controller() -> ActorHandle:
"""Used for internal purpose because using just import serve.global_state
will always reference the original None object.
"""
global controller
if controller is None:
raise RayServeException("Please run serve.init to initialize or "
"connect to existing ray serve cluster.")
return controller
def _ensure_connected(f: Callable) -> Callable:
@wraps(f)
def check(*args, **kwargs):
_get_controller()
return f(*args, **kwargs)
return check
def accept_batch(f: Callable) -> Callable:
"""Annotation to mark a serving function that batch is accepted.
This annotation need to be used to mark a function expect all arguments
to be passed into a list.
Example:
>>> @serve.accept_batch
def serving_func(flask_request):
assert isinstance(flask_request, list)
...
>>> class ServingActor:
@serve.accept_batch
def __call__(self, *, python_arg=None):
assert isinstance(python_arg, list)
"""
f._serve_accept_batch = True
return f
def init(name: Optional[str] = None,
http_host: str = DEFAULT_HTTP_HOST,
http_port: int = DEFAULT_HTTP_PORT,
_http_middlewares: List[Any] = []) -> None:
"""Initialize or connect to a serve cluster.
If serve cluster is already initialized, this function will just return.
If `ray.init` has not been called in this process, it will be called with
no arguments. To specify kwargs to `ray.init`, it should be called
separately before calling `serve.init`.
Args:
name (str): A unique name for this serve instance. This allows
multiple serve instances to run on the same ray cluster. Must be
specified in all subsequent serve.init() calls.
http_host (str): Host for HTTP servers. Default to "0.0.0.0". Serve
starts one HTTP server per node in the Ray cluster.
http_port (int, List[int]): Port for HTTP server. Default to 8000.
"""
if name is not None and not isinstance(name, str):
raise TypeError("name must be a string.")
# Initialize ray if needed.
if not ray.is_initialized():
ray.init()
# Try to get serve controller if it exists
global controller
controller_name = format_actor_name(SERVE_CONTROLLER_NAME, name)
try:
controller = ray.get_actor(controller_name)
return
except ValueError:
pass
controller = ServeController.options(
name=controller_name,
lifetime="detached",
max_restarts=-1,
max_task_retries=-1,
).remote(name, http_host, http_port, _http_middlewares)
futures = []
for node_id in ray.state.node_ids():
future = block_until_http_ready.options(
num_cpus=0, resources={
node_id: 0.01
}).remote(
"http://{}:{}/-/routes".format(http_host, http_port),
timeout=HTTP_PROXY_TIMEOUT)
futures.append(future)
ray.get(futures)
@_ensure_connected
def shutdown() -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the Serve
instance that's currently connected to (via serve.init).
"""
global controller
ray.get(controller.shutdown.remote())
ray.kill(controller, no_restart=True)
controller = None
@_ensure_connected
def create_endpoint(endpoint_name: str,
*,
backend: str = None,
route: Optional[str] = None,
methods: List[str] = ["GET"]) -> None:
"""Create a service endpoint given route_expression.
Args:
endpoint_name (str): A name to associate to with the endpoint.
backend (str, required): The backend that will serve requests to
this endpoint. To change this or split traffic among backends, use
`serve.set_traffic`.
route (str, optional): A string begin with "/". HTTP server will use
the string to match the path.
methods(List[str], optional): The HTTP methods that are valid for this
endpoint.
"""
if backend is None:
raise TypeError("backend must be specified when creating "
"an endpoint.")
elif not isinstance(backend, str):
raise TypeError("backend must be a string, got {}.".format(
type(backend)))
if route is not None:
if not isinstance(route, str) or not route.startswith("/"):
raise TypeError("route must be a string starting with '/'.")
if not isinstance(methods, list):
raise TypeError(
"methods must be a list of strings, but got type {}".format(
type(methods)))
endpoints = list_endpoints()
if endpoint_name in endpoints:
methods_old = endpoints[endpoint_name]["methods"]
route_old = endpoints[endpoint_name]["route"]
if methods_old.sort() == methods.sort() and route_old == route:
raise ValueError(
"Route '{}' is already registered to endpoint '{}' "
"with methods '{}'. To set the backend for this "
"endpoint, please use serve.set_traffic().".format(
route, endpoint_name, methods))
upper_methods = []
for method in methods:
if not isinstance(method, str):
raise TypeError("methods must be a list of strings, but contained "
"an element of type {}".format(type(method)))
upper_methods.append(method.upper())
ray.get(
controller.create_endpoint.remote(endpoint_name, {backend: 1.0}, route,
upper_methods))
@_ensure_connected
def delete_endpoint(endpoint: str) -> None:
"""Delete the given endpoint.
Does not delete any associated backends.
"""
ray.get(controller.delete_endpoint.remote(endpoint))
@_ensure_connected
def list_endpoints() -> Dict[str, Dict[str, Any]]:
"""Returns a dictionary of all registered endpoints.
The dictionary keys are endpoint names and values are dictionaries
of the form: {"methods": List[str], "traffic": Dict[str, float]}.
"""
return ray.get(controller.get_all_endpoints.remote())
@_ensure_connected
def update_backend_config(backend_tag: str,
config_options: Dict[str, Any]) -> None:
"""Update a backend configuration for a backend tag.
Keys not specified in the passed will be left unchanged.
Args:
backend_tag(str): A registered backend.
config_options(dict): Backend config options to update.
Supported options:
- "num_replicas": number of worker processes to start up that
will handle requests to this backend.
- "max_batch_size": the maximum number of requests that will
be processed in one batch by this backend.
- "batch_wait_timeout": time in seconds that backend replicas
will wait for a full batch of requests before
processing a partial batch.
- "max_concurrent_queries": the maximum number of queries
that will be sent to a replica of this backend
without receiving a response.
"""
if not isinstance(config_options, dict):
raise ValueError("config_options must be a dictionary.")
ray.get(
controller.update_backend_config.remote(backend_tag, config_options))
@_ensure_connected
def get_backend_config(backend_tag: str):
"""Get the backend configuration for a backend tag.
Args:
backend_tag(str): A registered backend.
"""
return ray.get(controller.get_backend_config.remote(backend_tag))
@_ensure_connected
def create_backend(backend_tag: str,
func_or_class: Union[Callable, Type[Callable]],
*actor_init_args: Any,
ray_actor_options: Optional[Dict] = None,
config: Optional[Dict[str, Any]] = None) -> None:
"""Create a backend with the provided tag.
The backend will serve requests with func_or_class.
Args:
backend_tag (str): a unique tag assign to identify this backend.
func_or_class (callable, class): a function or a class implementing
__call__.
actor_init_args (optional): the arguments to pass to the class.
initialization method.
ray_actor_options (optional): options to be passed into the
@ray.remote decorator for the backend actor.
config (optional): configuration options for this backend.
Supported options:
- "num_replicas": number of worker processes to start up that will
handle requests to this backend.
- "max_batch_size": the maximum number of requests that will
be processed in one batch by this backend.
- "batch_wait_timeout": time in seconds that backend replicas
will wait for a full batch of requests before processing a
partial batch.
- "max_concurrent_queries": the maximum number of queries that will
be sent to a replica of this backend without receiving a
response.
"""
if backend_tag in list_backends():
raise ValueError(
"Cannot create backend. "
"Backend '{}' is already registered.".format(backend_tag))
if config is None:
config = {}
if not isinstance(config, dict):
raise TypeError("config must be a dictionary.")
replica_config = ReplicaConfig(
func_or_class, *actor_init_args, ray_actor_options=ray_actor_options)
backend_config = BackendConfig(config, replica_config.accepts_batches,
replica_config.is_blocking)
ray.get(
controller.create_backend.remote(backend_tag, backend_config,
replica_config))
@_ensure_connected
def list_backends() -> Dict[str, Dict[str, Any]]:
"""Returns a dictionary of all registered backends.
Dictionary maps backend tags to backend configs.
"""
return ray.get(controller.get_all_backends.remote())
@_ensure_connected
def delete_backend(backend_tag: str) -> None:
"""Delete the given backend.
The backend must not currently be used by any endpoints.
"""
ray.get(controller.delete_backend.remote(backend_tag))
@_ensure_connected
def set_traffic(endpoint_name: str,
traffic_policy_dictionary: Dict[str, float]) -> None:
"""Associate a service endpoint with traffic policy.
Example:
>>> serve.set_traffic("service-name", {
"backend:v1": 0.5,
"backend:v2": 0.5
})
Args:
endpoint_name (str): A registered service endpoint.
traffic_policy_dictionary (dict): a dictionary maps backend names
to their traffic weights. The weights must sum to 1.
"""
ray.get(
controller.set_traffic.remote(endpoint_name,
traffic_policy_dictionary))
@_ensure_connected
def shadow_traffic(endpoint_name: str, backend_tag: str,
proportion: float) -> None:
"""Shadow traffic from an endpoint to a backend.
The specified proportion of requests will be duplicated and sent to the
backend. Responses of the duplicated traffic will be ignored.
The backend must not already be in use.
To stop shadowing traffic to a backend, call `shadow_traffic` with
proportion equal to 0.
Args:
endpoint_name (str): A registered service endpoint.
backend_tag (str): A registered backend.
proportion (float): The proportion of traffic from 0 to 1.
"""
if not isinstance(proportion, (float, int)) or not 0 <= proportion <= 1:
raise TypeError("proportion must be a float from 0 to 1.")
ray.get(
controller.shadow_traffic.remote(endpoint_name, backend_tag,
proportion))
@_ensure_connected
def get_handle(endpoint_name: str, missing_ok: bool = False) -> RayServeHandle:
"""Retrieve RayServeHandle for service endpoint to invoke it from Python.
Args:
endpoint_name (str): A registered service endpoint.
missing_ok (bool): If true, skip the check for the endpoint existence.
It can be useful when the endpoint has not been registered.
Returns:
RayServeHandle
"""
if not missing_ok:
assert endpoint_name in ray.get(controller.get_all_endpoints.remote())
# TODO(edoakes): we should choose the router on the same node.
routers = ray.get(controller.get_routers.remote())
return RayServeHandle(
list(routers.values())[0],
endpoint_name,
)
| 35.675462
| 79
| 0.647585
|
c7b7b939485e16388731e8ee8401ca172479b7f5
| 1,297
|
py
|
Python
|
ensysmod/model/energy_model_parameter.py
|
NOWUM/EnSysMod
|
18c8a2198db3510e667c1f0298d00a3dfcb0aab7
|
[
"MIT"
] | 1
|
2021-12-10T19:41:01.000Z
|
2021-12-10T19:41:01.000Z
|
ensysmod/model/energy_model_parameter.py
|
NOWUM/EnSysMod
|
18c8a2198db3510e667c1f0298d00a3dfcb0aab7
|
[
"MIT"
] | 83
|
2021-10-20T22:54:28.000Z
|
2022-03-24T19:07:06.000Z
|
ensysmod/model/energy_model_parameter.py
|
NOWUM/EnSysMod
|
18c8a2198db3510e667c1f0298d00a3dfcb0aab7
|
[
"MIT"
] | null | null | null |
import enum
from sqlalchemy import Column, Integer, ForeignKey, Float, UniqueConstraint, Enum
from sqlalchemy.orm import relationship
from ensysmod.database.base_class import Base
from ensysmod.database.ref_base_class import RefCRBase
class EnergyModelParameterAttribute(enum.Enum):
yearly_limit = 'yearly_limit'
class EnergyModelParameterOperation(enum.Enum):
add = 'add'
multiply = 'multiply'
set = 'set'
class EnergyModelParameter(RefCRBase, Base):
"""
A energy model parameter is referenced to a model and a component.
It can be used to overwrite different attributes from the component.
"""
ref_model = Column(Integer, ForeignKey('energy_model.id'), nullable=False)
# The region reference is optional.
ref_region = Column(Integer, ForeignKey('region.id'), nullable=True)
attribute = Column(Enum(EnergyModelParameterAttribute), nullable=False)
operation = Column(Enum(EnergyModelParameterOperation), nullable=False)
value = Column(Float, nullable=False)
# relationships
component = relationship('EnergyComponent')
model = relationship('EnergyModel', back_populates='parameters')
# table constraints
__table_args__ = (
UniqueConstraint("ref_model", "attribute", name="_model_attribute_uc"),
)
| 30.162791
| 81
| 0.744796
|
46ee8802a906a482bf4c5a6763d312084ea1718c
| 4,602
|
py
|
Python
|
explorer/utils.py
|
thecardcheat/django-sql-explorer
|
30676258bdbd1614d26bb89ca5f07c2cff2321c1
|
[
"MIT"
] | null | null | null |
explorer/utils.py
|
thecardcheat/django-sql-explorer
|
30676258bdbd1614d26bb89ca5f07c2cff2321c1
|
[
"MIT"
] | null | null | null |
explorer/utils.py
|
thecardcheat/django-sql-explorer
|
30676258bdbd1614d26bb89ca5f07c2cff2321c1
|
[
"MIT"
] | 1
|
2020-06-15T16:44:58.000Z
|
2020-06-15T16:44:58.000Z
|
import functools
import re
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.views import LoginView
from django.contrib.auth import REDIRECT_FIELD_NAME
from six import text_type
import sqlparse
from explorer import app_settings
EXPLORER_PARAM_TOKEN = "$$"
def passes_blacklist(sql):
clean = functools.reduce(lambda sql, term: sql.upper().replace(term, ""), [t.upper() for t in app_settings.EXPLORER_SQL_WHITELIST], sql)
fails = [bl_word for bl_word in app_settings.EXPLORER_SQL_BLACKLIST if bl_word in clean.upper()]
return not any(fails), fails
def _format_field(field):
return field.get_attname_column()[1], field.get_internal_type()
def param(name):
return "%s%s%s" % (EXPLORER_PARAM_TOKEN, name, EXPLORER_PARAM_TOKEN)
def swap_params(sql, params):
p = params.items() if params else {}
for k, v in p:
regex = re.compile("\$\$%s(?:\:([^\$]+))?\$\$" % str(k).lower(), re.I)
sql = regex.sub(text_type(v), sql)
return sql
def extract_params(text):
regex = re.compile("\$\$([a-z0-9_]+)(?:\:([^\$]+))?\$\$")
params = re.findall(regex, text.lower())
return {p[0]: p[1] if len(p) > 1 else '' for p in params}
def safe_login_prompt(request):
defaults = {
'template_name': 'admin/login.html',
'authentication_form': AuthenticationForm,
'extra_context': {
'title': 'Log in',
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return LoginView.as_view(**defaults)(request)
def shared_dict_update(target, source):
for k_d1 in target:
if k_d1 in source:
target[k_d1] = source[k_d1]
return target
def safe_cast(val, to_type, default=None):
try:
return to_type(val)
except ValueError:
return default
def get_int_from_request(request, name, default):
val = request.GET.get(name, default)
return safe_cast(val, int, default) if val else None
def get_params_from_request(request):
val = request.GET.get('params', None)
try:
d = {}
tuples = val.split('|')
for t in tuples:
res = t.split(':')
d[res[0]] = res[1]
return d
except Exception:
return None
def get_params_for_url(query):
if query.params:
return '|'.join(['%s:%s' % (p, v) for p, v in query.params.items()])
def url_get_rows(request):
return get_int_from_request(request, 'rows', app_settings.EXPLORER_DEFAULT_ROWS)
def url_get_query_id(request):
return get_int_from_request(request, 'query_id', None)
def url_get_log_id(request):
return get_int_from_request(request, 'querylog_id', None)
def url_get_show(request):
return bool(get_int_from_request(request, 'show', 1))
def url_get_fullscreen(request):
return bool(get_int_from_request(request, 'fullscreen', 0))
def url_get_params(request):
return get_params_from_request(request)
def allowed_query_pks(user_id):
return app_settings.EXPLORER_GET_USER_QUERY_VIEWS().get(user_id, [])
def user_can_see_query(request, **kwargs):
# In Django<1.10, is_anonymous was a method.
try:
is_anonymous = request.user.is_anonymous()
except TypeError:
is_anonymous = request.user.is_anonymous
if not is_anonymous and 'query_id' in kwargs:
return int(kwargs['query_id']) in allowed_query_pks(request.user.id)
return False
def fmt_sql(sql):
return sqlparse.format(sql, reindent=True, keyword_case='upper')
def noop_decorator(f):
return f
class InvalidExplorerConnectionException(Exception):
pass
def get_valid_connection(alias=None):
from explorer.connections import connections
if not alias:
return connections[app_settings.EXPLORER_DEFAULT_CONNECTION]
if alias not in connections:
raise InvalidExplorerConnectionException(
'Attempted to access connection %s, but that is not a registered Explorer connection.' % alias
)
return connections[alias]
def get_s3_bucket():
from boto.s3.connection import S3Connection
conn = S3Connection(app_settings.S3_ACCESS_KEY,
app_settings.S3_SECRET_KEY)
return conn.get_bucket(app_settings.S3_BUCKET)
def s3_upload(key, data):
from boto.s3.key import Key
bucket = get_s3_bucket()
k = Key(bucket)
k.key = key
k.set_contents_from_file(data, rewind=True)
k.set_acl('public-read')
k.set_metadata('Content-Type', 'text/csv')
return k.generate_url(expires_in=0, query_auth=False)
| 26.297143
| 140
| 0.680791
|
675666cce1eeca3da6459cea3e1ab142af48330c
| 3,765
|
py
|
Python
|
third_party/sqlalchemy_migrate_0_7_1/migrate/changeset/databases/firebird.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/sqlalchemy_migrate_0_7_1/migrate/changeset/databases/firebird.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/sqlalchemy_migrate_0_7_1/migrate/changeset/databases/firebird.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T11:05:06.000Z
|
2020-07-23T11:05:06.000Z
|
"""
Firebird database specific implementations of changeset classes.
"""
from sqlalchemy.databases import firebird as sa_base
from sqlalchemy.schema import PrimaryKeyConstraint
from migrate import exceptions
from migrate.changeset import ansisql, SQLA_06
if SQLA_06:
FBSchemaGenerator = sa_base.FBDDLCompiler
else:
FBSchemaGenerator = sa_base.FBSchemaGenerator
class FBColumnGenerator(FBSchemaGenerator, ansisql.ANSIColumnGenerator):
"""Firebird column generator implementation."""
class FBColumnDropper(ansisql.ANSIColumnDropper):
"""Firebird column dropper implementation."""
def visit_column(self, column):
"""Firebird supports 'DROP col' instead of 'DROP COLUMN col' syntax
Drop primary key and unique constraints if dropped column is referencing it."""
if column.primary_key:
if column.table.primary_key.columns.contains_column(column):
column.table.primary_key.drop()
# TODO: recreate primary key if it references more than this column
for index in column.table.indexes:
# "column in index.columns" causes problems as all
# column objects compare equal and return a SQL expression
if column.name in [col.name for col in index.columns]:
index.drop()
# TODO: recreate index if it references more than this column
for cons in column.table.constraints:
if isinstance(cons,PrimaryKeyConstraint):
# will be deleted only when the column its on
# is deleted!
continue
if SQLA_06:
should_drop = column.name in cons.columns
else:
should_drop = cons.contains_column(column) and cons.name
if should_drop:
self.start_alter_table(column)
self.append("DROP CONSTRAINT ")
self.append(self.preparer.format_constraint(cons))
self.execute()
# TODO: recreate unique constraint if it refenrences more than this column
self.start_alter_table(column)
self.append('DROP %s' % self.preparer.format_column(column))
self.execute()
class FBSchemaChanger(ansisql.ANSISchemaChanger):
"""Firebird schema changer implementation."""
def visit_table(self, table):
"""Rename table not supported"""
raise exceptions.NotSupportedError(
"Firebird does not support renaming tables.")
def _visit_column_name(self, table, column, delta):
self.start_alter_table(table)
col_name = self.preparer.quote(delta.current_name, table.quote)
new_name = self.preparer.format_column(delta.result_column)
self.append('ALTER COLUMN %s TO %s' % (col_name, new_name))
def _visit_column_nullable(self, table, column, delta):
"""Changing NULL is not supported"""
# TODO: http://www.firebirdfaq.org/faq103/
raise exceptions.NotSupportedError(
"Firebird does not support altering NULL bevahior.")
class FBConstraintGenerator(ansisql.ANSIConstraintGenerator):
"""Firebird constraint generator implementation."""
class FBConstraintDropper(ansisql.ANSIConstraintDropper):
"""Firebird constaint dropper implementation."""
def cascade_constraint(self, constraint):
"""Cascading constraints is not supported"""
raise exceptions.NotSupportedError(
"Firebird does not support cascading constraints")
class FBDialect(ansisql.ANSIDialect):
columngenerator = FBColumnGenerator
columndropper = FBColumnDropper
schemachanger = FBSchemaChanger
constraintgenerator = FBConstraintGenerator
constraintdropper = FBConstraintDropper
| 37.65
| 87
| 0.683931
|
b35cae32a94630e5b3d757e76d76dddf64a5eb61
| 9,369
|
py
|
Python
|
ad-insertion/frontend/manifest_dash.py
|
jhou5/Ad-Insertion-Sample
|
c944c7386f0645ba935c1f9f6ddb9c244a4167f1
|
[
"BSD-3-Clause"
] | null | null | null |
ad-insertion/frontend/manifest_dash.py
|
jhou5/Ad-Insertion-Sample
|
c944c7386f0645ba935c1f9f6ddb9c244a4167f1
|
[
"BSD-3-Clause"
] | 1
|
2019-05-03T23:14:50.000Z
|
2019-05-03T23:14:50.000Z
|
ad-insertion/frontend/manifest_dash.py
|
jhou5/Ad-Insertion-Sample
|
c944c7386f0645ba935c1f9f6ddb9c244a4167f1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3
import xml.etree.ElementTree as ET
from copy import deepcopy
import re
import time
#PT0H1M59.89S
def _to_seconds(time):
s=0
m=re.search("([0-9.]+)H",time)
if m: s=s+float(m.group(1))*3600
m=re.search("([0-9.]+)M",time)
if m: s=s+float(m.group(1))*60
m=re.search("([0-9.]+)S",time)
if m: s=s+float(m.group(1))
return s
def _to_iso8601(s):
h=int(s/3600)
s=s-h*3600
m=int(s/60)
s=s-m*60
return 'PT{0:1g}H{1:1g}M{2:1g}S'.format(h,m,s)
def _to_stream(template, RepresentationID, Number=0):
template=template.replace("$RepresentationID$","{0}")
template=re.sub("\$Number\%([0-9]*)d\$",r"{1:\1d}",template)
return template.format(RepresentationID, Number)
def _ns(tag):
return "{urn:mpeg:dash:schema:mpd:2011}"+tag;
ET.register_namespace('','urn:mpeg:dash:schema:mpd:2011')
_ad_template=ET.fromstring("""<?xml version="1.0" encoding="utf-8"?>
<MPD xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:mpeg:dash:schema:mpd:2011"
xmlns:xlink="http://www.w3.org/1999/xlink"
xsi:schemaLocation="urn:mpeg:DASH:schema:MPD:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd"
profiles="urn:mpeg:dash:profile:isoff-live:2011"
type="static"
mediaPresentationDuration="PT5S"
minBufferTime="PT16.6S">
<ProgramInformation>
</ProgramInformation>
<Period id="0" start="PT0.0S">
<AdaptationSet id="0" contentType="video" segmentAlignment="true" bitstreamSwitching="true" lang="und">
<Representation id="0" mimeType="video/mp4" codecs="avc1.640028" bandwidth="2027988" width="1920" height="1080" frameRate="30000/1001">
<SegmentTemplate timescale="1000000" duration="5000000" initialization="init-stream0.m4s" media="chunk-stream0-$Number%05d$.m4s" startNumber="1">
</SegmentTemplate>
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" segmentAlignment="true" bitstreamSwitching="true" lang="und">
<Representation id="1" mimeType="audio/mp4" codecs="mp4a.40.2" bandwidth="128000" audioSamplingRate="48000">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2" />
<SegmentTemplate timescale="1000000" duration="5000000" initialization="init-stream1.m4s" media="chunk-stream1-$Number%05d$.m4s" startNumber="1">
</SegmentTemplate>
</Representation>
</AdaptationSet>
</Period>
</MPD>""")
def _ad_time(ad_spec, seq):
time=0
for i in range(seq):
time=time+ad_spec["duration"][i%len(ad_spec["duration"])]
return time
def _start_number(ad_spec, number, seq):
for i in range(seq):
number=number+ad_spec["interval"][i%len(ad_spec["interval"])]
return number
def _period_index(ad_spec, s):
i=0
while True:
interval=ad_spec["interval"][i%len(ad_spec["interval"])]
if s<interval: return i
s=s-interval
i=i+1
def parse_dash(stream_cp_url, mpd, ad_spec, ad_segment=5.0):
stream_cp_url="/".join(stream_cp_url.split("/")[:-1])
root=ET.fromstring(mpd)
mediaPresentationDuration = 0
if 'mediaPresentationDuration' in root.attrib:
mediaPresentationDuration=_to_seconds(root.attrib["mediaPresentationDuration"])
Period=root.find(_ns("Period"))
formats={
"video": list(Period.findall(_ns("AdaptationSet[@contentType='video']")+"/"+_ns("Representation"))),
"audio": list(Period.findall(_ns("AdaptationSet[@contentType='audio']")+"/"+_ns("Representation"))),
}
# scan all segs into structure
periods=[]
for AdaptationSet in Period.findall(_ns("AdaptationSet")):
sidx=0
for S in AdaptationSet.findall(_ns("Representation")+"/"+_ns("SegmentTemplate")+"/"+_ns("SegmentTimeline")+"/"+_ns("S")):
if "t" in S.attrib: t=int(S.attrib["t"])
d=int(S.attrib["d"])
r=int(S.attrib["r"]) if "r" in S.attrib else 0
for repeat in range(r+1):
pidx=_period_index(ad_spec, sidx)
if pidx>=len(periods): periods.append({})
if AdaptationSet not in periods[pidx]: periods[pidx][AdaptationSet]=[]
periods[pidx][AdaptationSet].append((t,d,sidx))
sidx=sidx+1
t=t+d
# create new periods with ADs
minfo={"segs":{}, "streams":{} }
manifest=ET.Element(root.tag, root.attrib)
for i in range(len(periods)):
Period1=ET.SubElement(manifest,_ns("Period"),{"id":str(i*2)})
duration_min=0
for AdaptationSet in periods[i]:
AdaptationSet1=ET.SubElement(Period1,_ns("AdaptationSet"),AdaptationSet.attrib)
Representation1=ET.SubElement(AdaptationSet1,_ns("Representation"),AdaptationSet.find(_ns("Representation")).attrib)
SegmentTemplate1=ET.SubElement(Representation1,_ns("SegmentTemplate"),AdaptationSet.find(_ns("Representation")+"/"+_ns("SegmentTemplate")).attrib)
timescale=float(SegmentTemplate1.attrib["timescale"])
SegmentTemplate1.attrib["presentationTimeOffset"]=str(periods[i][AdaptationSet][0][0])
SegmentTimeline1=ET.SubElement(SegmentTemplate1,_ns("SegmentTimeline"))
sidx=0
duration=0
for S in periods[i][AdaptationSet]:
S1=ET.SubElement(SegmentTimeline1,_ns("S"),{"t":str(S[0]),"d":str(S[1])})
duration=duration+S[1]
# schedule analytics
if AdaptationSet.attrib["contentType"]!="video": continue
# decipher streams
stream=_to_stream(SegmentTemplate1.attrib["media"],Representation1.attrib["id"],int(SegmentTemplate1.attrib["startNumber"])+S[2])
init_stream=_to_stream(SegmentTemplate1.attrib["initialization"],Representation1.attrib["id"],int(SegmentTemplate1.attrib["startNumber"])+S[2])
minfo["segs"][stream]={
"stream": stream_cp_url.split("/")[-1],
"bandwidth": int(Representation1.attrib["bandwidth"]),
"resolution": {
"width": int(Representation1.attrib["width"]),
"height": int(Representation1.attrib["height"]),
},
"seg_time": S[0]/timescale+_ad_time(ad_spec,i),
"seg_duration": S[1]/timescale,
"codec": Representation1.attrib["codecs"],
"streaming_type": "dash",
"initSeg": stream_cp_url+"/"+init_stream,
"analytics": stream_cp_url+"/"+stream,
"ad_duration": ad_spec["duration"][i%len(ad_spec["duration"])],
"ad_segment": ad_segment,
}
sidx=sidx+1
if sidx>=3: minfo["segs"][stream]["transcode"]=ad_spec["path"]+"/"+ad_spec["prefix"]+"/"+str(i)+"/"+Representation1.attrib["height"]+"p.mpd"
SegmentTemplate1.attrib["startNumber"]=str(_start_number(ad_spec, int(SegmentTemplate1.attrib["startNumber"]), i))
duration=float(duration)/timescale
if duration<duration_min or duration_min==0: duration_min=duration
Period1.attrib["duration"]=_to_iso8601(duration_min)
if(mediaPresentationDuration == 0):
mediaPresentationDuration += duration_min
# insert AD
if i==len(periods)-1: continue # do not insert AD at the last period
duration=ad_spec["duration"][i%len(ad_spec["duration"])]
mediaPresentationDuration += duration
Period2=ET.SubElement(manifest, _ns("Period"),{"id":str(i*2+1),"duration":_to_iso8601(duration)})
k=0
for j in range(len(formats["video"])):
for AdaptationSet in _ad_template.find(_ns("Period")):
AdaptationSet1=deepcopy(AdaptationSet)
contentType=AdaptationSet1.attrib["contentType"]
if not formats[contentType]: continue
AdaptationSet1.attrib["id"]=str(k)
Period2.append(AdaptationSet1)
k=k+1
Representation=AdaptationSet1.find(_ns("Representation"))
for f in ["id","bandwidth","width","height"]:
if f in formats[contentType][j].attrib:
Representation.attrib[f]=formats[contentType][j].attrib[f]
SegmentTemplate=Representation.find(_ns("SegmentTemplate"))
timescale=int(SegmentTemplate.attrib["timescale"])
SegmentTemplate.attrib["duration"]=str(int(ad_segment*timescale))
for f in ["initialization","media"]:
SegmentTemplate.attrib[f]=ad_spec["prefix"]+"/"+str(i)+"/"+formats["video"][j].attrib["height"]+"p-"+SegmentTemplate.attrib[f]
manifest.set('mediaPresentationDuration', _to_iso8601(mediaPresentationDuration))
minfo["manifest"]=ET.tostring(manifest,encoding='utf-8',method='xml')
minfo["content-type"]="application/xml"
return minfo
| 47.558376
| 177
| 0.607642
|
de33d4372dd0b40cbb5871debc1a44f93fe6c1eb
| 5,607
|
py
|
Python
|
layout_rpt.py
|
dstreev/hive_llap_calculator
|
5630d55a8bda182996388dc976ee97e2b6cb58b2
|
[
"Apache-2.0"
] | 15
|
2019-06-13T00:02:15.000Z
|
2021-02-08T03:13:23.000Z
|
layout_rpt.py
|
dstreev/hive_llap_calculator
|
5630d55a8bda182996388dc976ee97e2b6cb58b2
|
[
"Apache-2.0"
] | 21
|
2019-08-14T18:46:40.000Z
|
2021-02-18T20:56:46.000Z
|
layout_rpt.py
|
dstreev/hive_llap_calculator
|
5630d55a8bda182996388dc976ee97e2b6cb58b2
|
[
"Apache-2.0"
] | 4
|
2019-06-21T15:26:36.000Z
|
2020-07-10T13:22:14.000Z
|
#!/usr/bin/env python
# use this to parse the Ambari Layout Report that's generated with:
# http://${AMBARI_HOST_PORT}/api/v1/clusters/${CLUSTER_NAME}/hosts?fields=Hosts/host_name,host_components,Hosts/ip,Hosts/total_mem,Hosts/os_arch,Hosts/os_type,Hosts/rack_info,Hosts/cpu_count,Hosts/disk_info,metrics/disk,Hosts/ph_cpu_count
import optparse
import logging
import sys
import json
from common import pprinttable
logger = logging.getLogger('LLAPConfig')
def get_hostname( item ):
host_info = item["Hosts"]
return host_info["host_name"]
def is_component( item, componentName ):
components = item["host_components"]
for component in components:
for ckey, cvalue in component.items():
if ckey == "HostRoles":
for hkey, hvalue in cvalue.items():
if hkey == "component_name":
if hvalue == componentName:
return True
return False
def get_info(layoutFile):
layout = json.loads(open(layoutFile).read())
items = layout['items']
hosttable, compute_count, other_count = gen_hosttable( items )
return hosttable, compute_count, other_count
def report(layoutFile):
layout = json.loads(open(layoutFile).read())
items = layout['items']
hosttable, compute_count, other_count = gen_hosttable( items )
rpt_hosttable(hosttable)
rpt_count_type('Compute', compute_count)
rpt_count_type('Other', other_count)
rpt_totals(hosttable)
def gen_hosttable( items ):
records = []
compute_count = {}
other_count = {}
for item in items:
record = []
host = item["Hosts"]
record.append(host["host_name"])
record.append(host["cpu_count"])
record.append(host["os_type"])
record.append(host["total_mem"] / (1024 * 1024))
record.append(host["rack_info"])
record.append(is_component(item, "DATANODE"))
record.append(is_component(item, "NODEMANAGER"))
records.append(record)
compute = is_component(item, "NODEMANAGER")
key = str(compute) + str(record[3]) + str(record[1])
memory = record[3]
cores = record[1]
if compute and key not in compute_count:
compute_count[key] = {'count': 1, 'memory': memory, 'cores': cores, }
elif compute:
compute_count[key]['count'] += 1
elif not compute and key not in other_count:
other_count[key] = {'count': 1, 'memory': memory, 'cores': cores, }
elif not compute:
other_count[key]['count'] += 1
# print key + str(memory) + str(cores)
return records, compute_count, other_count
def rpt_hosttable ( hosttable ):
# master = datanode & compute
fields = [[0, 'Host'], [1, 'CPU Count'], [2, 'OS'], [3, 'Memory'], [4, 'Rack'], [5, 'Data Node'], [6, 'Compute Node']]
pprinttable(hosttable, fields)
def rpt_count_type ( type, count_type ):
# master = datanode & compute
print type
fields = [[0, 'Count'], [1, 'Memory'], [2, 'Cores']]
count_type_rows = []
for key in count_type:
count_type_record = [count_type[key]['count'], count_type[key]['memory'], count_type[key]['cores']]
count_type_rows.append(count_type_record)
pprinttable(count_type_rows, fields)
def rpt_totals ( hosttable ):
totalFields = [[0,"Type"],[1,"Count"],[2, "OS"],[3,"CPU-Min"], [4,"CPU-Max"], [5,"Mem-Min"],[6,"Mem-Max"]]
totalType = []
datanodes = ["Data Nodes", 0, [], 10000, 0, 100000, 0]
for record in hosttable:
if record[5]:
datanodes[1] += 1
if (record[2] not in datanodes[2]):
datanodes[2].append(record[2])
# CPU Min
if record[1] < datanodes[3]:
datanodes[3] = record[1]
# CPU Max
if record[1] > datanodes[4]:
datanodes[4] = record[1]
# Mem Min
if record[3] < datanodes[5]:
datanodes[5] = record[3]
# Mem Max
if record[3] > datanodes[6]:
datanodes[6] = record[3]
totalType.append(datanodes)
computeNodes = ["Compute Nodes", 0, [], 10000, 0, 100000, 0]
for record in hosttable:
if record[6]:
computeNodes[1] += 1
if (record[2] not in computeNodes[2]):
computeNodes[2].append(record[2])
# CPU Min
if record[1] < computeNodes[3]:
computeNodes[3] = record[1]
# CPU Max
if record[1] > computeNodes[4]:
computeNodes[4] = record[1]
# Mem Min
if record[3] < computeNodes[5]:
computeNodes[5] = record[3]
# Mem Max
if record[3] > computeNodes[6]:
computeNodes[6] = record[3]
totalType.append(computeNodes)
pprinttable(totalType, totalFields)
def main():
# global ambari_integration
global cluster
# global version_note
# global ambari_accessor_api
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.add_option("-l", "--ambari-layout", dest="ambari_layout", help=".")
(options, args) = parser.parse_args()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
if options.ambari_layout:
report(options.ambari_layout)
main()
| 32.224138
| 238
| 0.600856
|
c376cc4fedcf016ce4dcc8b91ce81970799a6e01
| 579
|
py
|
Python
|
disdata/migrations/0005_auto_20200708_2003.py
|
biswajitpatra/KB198_TechOverflow
|
cb1dc1c080db792c2a9d7723c67ca7ca0e8baae9
|
[
"MIT"
] | 2
|
2020-09-01T18:17:30.000Z
|
2020-09-03T09:04:53.000Z
|
disdata/migrations/0005_auto_20200708_2003.py
|
biswajitpatra/KB198_TechOverflow
|
cb1dc1c080db792c2a9d7723c67ca7ca0e8baae9
|
[
"MIT"
] | 3
|
2021-04-04T15:41:27.000Z
|
2021-06-10T18:24:08.000Z
|
disdata/migrations/0005_auto_20200708_2003.py
|
biswajitpatra/KB198_TechOverflow
|
cb1dc1c080db792c2a9d7723c67ca7ca0e8baae9
|
[
"MIT"
] | 2
|
2021-04-04T15:04:05.000Z
|
2021-11-15T02:58:47.000Z
|
# Generated by Django 3.0.7 on 2020-07-08 20:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('disdata', '0004_auto_20200708_1957'),
]
operations = [
migrations.AlterField(
model_name='report',
name='source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 26.318182
| 110
| 0.677029
|
36b216d60d84aedcabe48545c354b1a5d9701192
| 4,382
|
py
|
Python
|
echobot/plugins/admin/title.py
|
jks15satoshi/echobot
|
b8f980b330123068f2e9edaa7fd143e70e0ac0fe
|
[
"MIT"
] | 9
|
2021-01-21T18:08:11.000Z
|
2021-04-29T13:40:24.000Z
|
echobot/plugins/admin/title.py
|
jks15satoshi/echobot
|
b8f980b330123068f2e9edaa7fd143e70e0ac0fe
|
[
"MIT"
] | 16
|
2021-01-22T11:41:11.000Z
|
2021-08-23T09:40:56.000Z
|
echobot/plugins/admin/title.py
|
jks15satoshi/echobot
|
b8f980b330123068f2e9edaa7fd143e70e0ac0fe
|
[
"MIT"
] | 1
|
2021-02-22T17:05:06.000Z
|
2021-02-22T17:05:06.000Z
|
"""群头衔申请"""
from typing import Optional
import jieba.posseg as posseg
from echobot.permission import is_owner as owner
from echobot.utils import confirm_intent
from echobot.utils.segment_parser import AtSegmentParser
from nonebot import require
from nonebot.adapters.cqhttp import GROUP, Bot, Event, exception
from nonebot.log import logger
from nonebot.typing import T_State
from .glob import admin, str_parser
title = admin.on_keyword({'头衔'}, permission=GROUP)
cd = require('nonebot_plugin_cooldown').cooldown
@title.handle()
async def first_receive(bot: Bot, event: Event, state: T_State) -> None:
if not await should_continue(bot, event):
await title.finish()
else:
segment = AtSegmentParser(str(event.message))
# 解析 at 消息段
if (user_id := segment.get_data('qq')) and user_id.isdigit():
state['at_userid'] = int(user_id)
# 解析指令
message = segment.filter_segment(any_segments=True).split('头衔')
words = posseg.cut(message[0])
for word, flag in words:
if flag == 'v':
action = word
if action in ('申请', '设置', '设定', '应用', '修改', '更改', '变更'):
state['action'] = 'apply'
if (contents := message[1].strip()):
state['contents'] = contents
elif action in ('移除', '删除', '撤销', '取消'):
state['action'] = 'remove'
state['contents'] = ''
else:
await title.reject(str_parser.parse('admin.title.action_rejected'))
async def contents_parse(bot: Bot, event: Event, state: T_State) -> None:
state[state['_current_key']] = str(event.raw_message)
@title.got('contents', str_parser.parse('admin.title.contents_prompt'),
args_parser=contents_parse)
async def handle(bot: Bot, event: Event, state: T_State) -> None:
segment = AtSegmentParser(state.get('contents').strip())
action = state.get('action')
contents = segment.filter_segment(any_segments=True)
userid = (int(segment.get_data('qq')) if segment.get_data('qq')
else state.get('at_userid'))
# 检查用户权限验证 at 成员有效性
if (userid and userid != event.user_id
and event.sender.role != 'admin'):
await title.reject(str_parser.parse('admin.title.permission_rejected'))
# 申请群头衔
elif action == 'apply':
if not contents:
await title.reject(str_parser.parse(
'admin.title.apply_invalid_rejected'))
elif confirm_intent(contents) == 'decline':
await title.finish(str_parser.parse('admin.title.apply_cancel'))
elif (length := len(bytes(contents, encoding='utf-8'))) > 18:
await title.reject(str_parser.parse(
'admin.title.apply_length_rejected', length=length))
else:
await set_title(bot, event, contents, user=userid)
await title.finish(str_parser.parse('admin.title.apply_success'))
# 移除群头衔
elif action == 'remove':
await set_title(bot, event, '', user=userid)
await title.finish(str_parser.parse('admin.title.remove_success'))
async def set_title(bot: Bot, event: Event, contents: str, /,
user: Optional[int] = None) -> None:
group_id = event.group_id
user_id = user if user else event.user_id
try:
await bot.set_group_special_title(group_id=group_id, user_id=user_id,
special_title=contents)
if contents:
cd.set_event('admin.title', 86400, group=group_id, user=user_id)
except (exception.NetworkError, exception.ActionFailed) as err:
logger.error(err)
await bot.send(event, str_parser.parse('admin.title.on_err'))
async def should_continue(bot: Bot, event: Event) -> bool:
info = cd.get_event('admin.title', group=event.group_id,
user=event.user_id)
is_owner = await owner(bot, event)
is_cooled_down = not info.get('status')
if is_owner and is_cooled_down:
return True
elif is_owner:
if event.sender.role == 'admin':
return True
else:
msg = str_parser.parse('admin.title.on_cooldown',
time=cd.time_format(info.get('remaining'),
preset='zh'))
await bot.send(event, msg)
return False
| 36.214876
| 79
| 0.619808
|
6b5f57500e4329d5a6f89cf9c89a9d05126cba10
| 503
|
py
|
Python
|
src/compgrid/__main__.py
|
Ro-Data/compgrid
|
c848fb50aa432edc0666154a22bcf2ab128d37d5
|
[
"0BSD"
] | null | null | null |
src/compgrid/__main__.py
|
Ro-Data/compgrid
|
c848fb50aa432edc0666154a22bcf2ab128d37d5
|
[
"0BSD"
] | null | null | null |
src/compgrid/__main__.py
|
Ro-Data/compgrid
|
c848fb50aa432edc0666154a22bcf2ab128d37d5
|
[
"0BSD"
] | null | null | null |
from . import compgrid, snowflake
TARGETS = {
"email": compgrid.email,
"slack": compgrid.slack,
"slackimg": compgrid.slackimg,
}
if __name__ == "__main__":
import argparse
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument("target", choices=TARGETS.keys())
parser.add_argument("config")
args = parser.parse_args()
connection = snowflake.get_engine()
target_function = TARGETS[args.target]
target_function(connection, args.config)
| 21.869565
| 57
| 0.693837
|
a05053b897ae382f14266f7bcb11051c780c8287
| 1,103
|
py
|
Python
|
src/ggrc/migrations/versions/20160901125710_173b800a28f3_update_event_enum_field.py
|
acidburn0zzz/ggrc-core
|
386781d08172102eb51030b65db8212974651628
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2016-11-06T05:21:24.000Z
|
2016-11-06T05:21:24.000Z
|
src/ggrc/migrations/versions/20160901125710_173b800a28f3_update_event_enum_field.py
|
acidburn0zzz/ggrc-core
|
386781d08172102eb51030b65db8212974651628
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-02-02T23:09:40.000Z
|
2021-02-08T21:00:48.000Z
|
src/ggrc/migrations/versions/20160901125710_173b800a28f3_update_event_enum_field.py
|
Acidburn0zzz/ggrc-core
|
386781d08172102eb51030b65db8212974651628
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Update event enum field
Create Date: 2016-09-01 12:57:10.984592
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '173b800a28f3'
down_revision = '31fbfc1bc608'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
'events', 'action',
type_=sa.Enum(u'POST', u'PUT', u'DELETE', u'IMPORT', u'BULK', u'GET'),
existing_type=sa.Enum(u'POST', u'PUT', u'DELETE', u'IMPORT', u'GET'),
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
'events', 'action',
type_=sa.Enum(u'POST', u'PUT', u'DELETE', u'IMPORT', u'GET'),
existing_type=sa.Enum(u'POST', u'PUT', u'DELETE', u'IMPORT', u'BULK',
u'GET'),
nullable=False
)
| 26.902439
| 79
| 0.657298
|
96183c791f09f53640155f00cbb637bf11306a4e
| 58,417
|
py
|
Python
|
torchaudio/transforms.py
|
brianjo/audio
|
1d6b15ea0439d84a0804c0bb645456b985e26696
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/transforms.py
|
brianjo/audio
|
1d6b15ea0439d84a0804c0bb645456b985e26696
|
[
"BSD-2-Clause"
] | 1
|
2020-10-14T09:45:32.000Z
|
2020-10-14T09:46:12.000Z
|
torchaudio/transforms.py
|
brianjo/audio
|
1d6b15ea0439d84a0804c0bb645456b985e26696
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import math
import warnings
from typing import Callable, Optional
import torch
from torch import Tensor
from torchaudio import functional as F
from .functional.functional import (
_get_sinc_resample_kernel,
_apply_sinc_resample_kernel,
)
__all__ = [
'Spectrogram',
'GriffinLim',
'AmplitudeToDB',
'MelScale',
'InverseMelScale',
'MelSpectrogram',
'MFCC',
'LFCC',
'MuLawEncoding',
'MuLawDecoding',
'Resample',
'ComplexNorm',
'TimeStretch',
'Fade',
'FrequencyMasking',
'TimeMasking',
'SlidingWindowCmn',
'Vad',
'SpectralCentroid',
'Vol',
'ComputeDeltas',
'PitchShift',
]
class Spectrogram(torch.nn.Module):
r"""Create a spectrogram from a audio signal.
Args:
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float or None, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead. (Default: ``2``)
normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. (Default: ``"reflect"``)
onesided (bool, optional): controls whether to return half of results to
avoid redundancy (Default: ``True``)
return_complex (bool, optional):
Indicates whether the resulting complex-valued Tensor should be represented with
native complex dtype, such as `torch.cfloat` and `torch.cdouble`, or real dtype
mimicking complex value with an extra dimension for real and imaginary parts.
(See also ``torch.view_as_real``.)
This argument is only effective when ``power=None``. It is ignored for
cases where ``power`` is a number as in those cases, the returned tensor is
power spectrogram, which is a real-valued tensor.
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = torchaudio.transforms.Spectrogram(n_fft=800)
>>> spectrogram = transform(waveform)
"""
__constants__ = ['n_fft', 'win_length', 'hop_length', 'pad', 'power', 'normalized']
def __init__(self,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
pad: int = 0,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: Optional[float] = 2.,
normalized: bool = False,
wkwargs: Optional[dict] = None,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
return_complex: bool = True) -> None:
super(Spectrogram, self).__init__()
self.n_fft = n_fft
# number of FFT bins. the returned STFT result will have n_fft // 2 + 1
# number of frequencies due to onesided=True in torch.stft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.pad = pad
self.power = power
self.normalized = normalized
self.center = center
self.pad_mode = pad_mode
self.onesided = onesided
self.return_complex = return_complex
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Dimension (..., freq, time), where freq is
``n_fft // 2 + 1`` where ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
return F.spectrogram(
waveform,
self.pad,
self.window,
self.n_fft,
self.hop_length,
self.win_length,
self.power,
self.normalized,
self.center,
self.pad_mode,
self.onesided,
self.return_complex,
)
class GriffinLim(torch.nn.Module):
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from
*librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`]
and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`].
Args:
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
n_iter (int, optional): Number of iteration for phase recovery process. (Default: ``32``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
momentum (float, optional): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge. (Default: ``0.99``)
length (int, optional): Array length of the expected output. (Default: ``None``)
rand_init (bool, optional): Initializes phase randomly if True and to zero otherwise. (Default: ``True``)
"""
__constants__ = ['n_fft', 'n_iter', 'win_length', 'hop_length', 'power',
'length', 'momentum', 'rand_init']
def __init__(self,
n_fft: int = 400,
n_iter: int = 32,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: float = 2.,
wkwargs: Optional[dict] = None,
momentum: float = 0.99,
length: Optional[int] = None,
rand_init: bool = True) -> None:
super(GriffinLim, self).__init__()
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
self.n_fft = n_fft
self.n_iter = n_iter
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.length = length
self.power = power
self.momentum = momentum / (1 + momentum)
self.rand_init = rand_init
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor):
A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
Returns:
Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
return F.griffinlim(specgram, self.window, self.n_fft, self.hop_length, self.win_length, self.power,
self.n_iter, self.momentum, self.length, self.rand_init)
class AmplitudeToDB(torch.nn.Module):
r"""Turn a tensor from the power/amplitude scale to the decibel scale.
This output depends on the maximum value in the input tensor, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
stype (str, optional): scale of input tensor ('power' or 'magnitude'). The
power being the elementwise square of the magnitude. (Default: ``'power'``)
top_db (float, optional): minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
"""
__constants__ = ['multiplier', 'amin', 'ref_value', 'db_multiplier']
def __init__(self, stype: str = 'power', top_db: Optional[float] = None) -> None:
super(AmplitudeToDB, self).__init__()
self.stype = stype
if top_db is not None and top_db < 0:
raise ValueError('top_db must be positive value')
self.top_db = top_db
self.multiplier = 10.0 if stype == 'power' else 20.0
self.amin = 1e-10
self.ref_value = 1.0
self.db_multiplier = math.log10(max(self.amin, self.ref_value))
def forward(self, x: Tensor) -> Tensor:
r"""Numerically stable implementation from Librosa.
https://librosa.org/doc/latest/generated/librosa.amplitude_to_db.html
Args:
x (Tensor): Input tensor before being converted to decibel scale.
Returns:
Tensor: Output tensor in decibel scale.
"""
return F.amplitude_to_DB(x, self.multiplier, self.amin, self.db_multiplier, self.top_db)
class MelScale(torch.nn.Module):
r"""Turn a normal STFT into a mel frequency STFT, using a conversion
matrix. This uses triangular filter banks.
User can control which device the filter bank (`fb`) is (e.g. fb.to(spec_f.device)).
Args:
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``)
n_stft (int, optional): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`. (Default: ``201``)
norm (str or None, optional): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
"""
__constants__ = ['n_mels', 'sample_rate', 'f_min', 'f_max']
def __init__(self,
n_mels: int = 128,
sample_rate: int = 16000,
f_min: float = 0.,
f_max: Optional[float] = None,
n_stft: int = 201,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(MelScale, self).__init__()
self.n_mels = n_mels
self.sample_rate = sample_rate
self.f_max = f_max if f_max is not None else float(sample_rate // 2)
self.f_min = f_min
self.norm = norm
self.mel_scale = mel_scale
assert f_min <= self.f_max, 'Require f_min: {} < f_max: {}'.format(f_min, self.f_max)
fb = F.melscale_fbanks(
n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate, self.norm,
self.mel_scale)
self.register_buffer('fb', fb)
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor): A spectrogram STFT of dimension (..., freq, time).
Returns:
Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time).
"""
# (..., time, freq) dot (freq, n_mels) -> (..., n_mels, time)
mel_specgram = torch.matmul(specgram.transpose(-1, -2), self.fb).transpose(-1, -2)
return mel_specgram
class InverseMelScale(torch.nn.Module):
r"""Solve for a normal STFT from a mel frequency STFT, using a conversion
matrix. This uses triangular filter banks.
It minimizes the euclidian norm between the input mel-spectrogram and the product between
the estimated spectrogram and the filter banks using SGD.
Args:
n_stft (int): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`.
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``)
max_iter (int, optional): Maximum number of optimization iterations. (Default: ``100000``)
tolerance_loss (float, optional): Value of loss to stop optimization at. (Default: ``1e-5``)
tolerance_change (float, optional): Difference in losses to stop optimization at. (Default: ``1e-8``)
sgdargs (dict or None, optional): Arguments for the SGD optimizer. (Default: ``None``)
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
"""
__constants__ = ['n_stft', 'n_mels', 'sample_rate', 'f_min', 'f_max', 'max_iter', 'tolerance_loss',
'tolerance_change', 'sgdargs']
def __init__(self,
n_stft: int,
n_mels: int = 128,
sample_rate: int = 16000,
f_min: float = 0.,
f_max: Optional[float] = None,
max_iter: int = 100000,
tolerance_loss: float = 1e-5,
tolerance_change: float = 1e-8,
sgdargs: Optional[dict] = None,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(InverseMelScale, self).__init__()
self.n_mels = n_mels
self.sample_rate = sample_rate
self.f_max = f_max or float(sample_rate // 2)
self.f_min = f_min
self.max_iter = max_iter
self.tolerance_loss = tolerance_loss
self.tolerance_change = tolerance_change
self.sgdargs = sgdargs or {'lr': 0.1, 'momentum': 0.9}
assert f_min <= self.f_max, 'Require f_min: {} < f_max: {}'.format(f_min, self.f_max)
fb = F.melscale_fbanks(n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate,
norm, mel_scale)
self.register_buffer('fb', fb)
def forward(self, melspec: Tensor) -> Tensor:
r"""
Args:
melspec (Tensor): A Mel frequency spectrogram of dimension (..., ``n_mels``, time)
Returns:
Tensor: Linear scale spectrogram of size (..., freq, time)
"""
# pack batch
shape = melspec.size()
melspec = melspec.view(-1, shape[-2], shape[-1])
n_mels, time = shape[-2], shape[-1]
freq, _ = self.fb.size() # (freq, n_mels)
melspec = melspec.transpose(-1, -2)
assert self.n_mels == n_mels
specgram = torch.rand(melspec.size()[0], time, freq, requires_grad=True,
dtype=melspec.dtype, device=melspec.device)
optim = torch.optim.SGD([specgram], **self.sgdargs)
loss = float('inf')
for _ in range(self.max_iter):
optim.zero_grad()
diff = melspec - specgram.matmul(self.fb)
new_loss = diff.pow(2).sum(axis=-1).mean()
# take sum over mel-frequency then average over other dimensions
# so that loss threshold is applied par unit timeframe
new_loss.backward()
optim.step()
specgram.data = specgram.data.clamp(min=0)
new_loss = new_loss.item()
if new_loss < self.tolerance_loss or abs(loss - new_loss) < self.tolerance_change:
break
loss = new_loss
specgram.requires_grad_(False)
specgram = specgram.clamp(min=0).transpose(-1, -2)
# unpack batch
specgram = specgram.view(shape[:-2] + (freq, time))
return specgram
class MelSpectrogram(torch.nn.Module):
r"""Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram
and MelScale.
Sources
* https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
* https://timsainb.github.io/spectrograms-mfccs-and-inversion-in-python.html
* http://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``None``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``)
wkwargs (Dict[..., ...] or None, optional): Arguments for window function. (Default: ``None``)
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. (Default: ``"reflect"``)
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. (Default: ``True``)
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.MelSpectrogram(sample_rate)
>>> mel_specgram = transform(waveform) # (channel, n_mels, time)
"""
__constants__ = ['sample_rate', 'n_fft', 'win_length', 'hop_length', 'pad', 'n_mels', 'f_min']
def __init__(self,
sample_rate: int = 16000,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
f_min: float = 0.,
f_max: Optional[float] = None,
pad: int = 0,
n_mels: int = 128,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: float = 2.,
normalized: bool = False,
wkwargs: Optional[dict] = None,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(MelSpectrogram, self).__init__()
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
self.pad = pad
self.power = power
self.normalized = normalized
self.n_mels = n_mels # number of mel frequency bins
self.f_max = f_max
self.f_min = f_min
self.spectrogram = Spectrogram(n_fft=self.n_fft, win_length=self.win_length,
hop_length=self.hop_length,
pad=self.pad, window_fn=window_fn, power=self.power,
normalized=self.normalized, wkwargs=wkwargs,
center=center, pad_mode=pad_mode, onesided=onesided)
self.mel_scale = MelScale(
self.n_mels,
self.sample_rate,
self.f_min,
self.f_max,
self.n_fft // 2 + 1,
norm,
mel_scale
)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time).
"""
specgram = self.spectrogram(waveform)
mel_specgram = self.mel_scale(specgram)
return mel_specgram
class MFCC(torch.nn.Module):
r"""Create the Mel-frequency cepstrum coefficients from an audio signal.
By default, this calculates the MFCC on the DB-scaled Mel spectrogram.
This is not the textbook implementation, but is implemented here to
give consistency with librosa.
This output depends on the maximum value in the input spectrogram, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_mfcc (int, optional): Number of mfc coefficients to retain. (Default: ``40``)
dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``)
norm (str, optional): norm to use. (Default: ``'ortho'``)
log_mels (bool, optional): whether to use log-mel spectrograms instead of db-scaled. (Default: ``False``)
melkwargs (dict or None, optional): arguments for MelSpectrogram. (Default: ``None``)
"""
__constants__ = ['sample_rate', 'n_mfcc', 'dct_type', 'top_db', 'log_mels']
def __init__(self,
sample_rate: int = 16000,
n_mfcc: int = 40,
dct_type: int = 2,
norm: str = 'ortho',
log_mels: bool = False,
melkwargs: Optional[dict] = None) -> None:
super(MFCC, self).__init__()
supported_dct_types = [2]
if dct_type not in supported_dct_types:
raise ValueError('DCT type not supported: {}'.format(dct_type))
self.sample_rate = sample_rate
self.n_mfcc = n_mfcc
self.dct_type = dct_type
self.norm = norm
self.top_db = 80.0
self.amplitude_to_DB = AmplitudeToDB('power', self.top_db)
melkwargs = melkwargs or {}
self.MelSpectrogram = MelSpectrogram(sample_rate=self.sample_rate, **melkwargs)
if self.n_mfcc > self.MelSpectrogram.n_mels:
raise ValueError('Cannot select more MFCC coefficients than # mel bins')
dct_mat = F.create_dct(self.n_mfcc, self.MelSpectrogram.n_mels, self.norm)
self.register_buffer('dct_mat', dct_mat)
self.log_mels = log_mels
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: specgram_mel_db of size (..., ``n_mfcc``, time).
"""
mel_specgram = self.MelSpectrogram(waveform)
if self.log_mels:
log_offset = 1e-6
mel_specgram = torch.log(mel_specgram + log_offset)
else:
mel_specgram = self.amplitude_to_DB(mel_specgram)
# (..., time, n_mels) dot (n_mels, n_mfcc) -> (..., n_nfcc, time)
mfcc = torch.matmul(mel_specgram.transpose(-1, -2), self.dct_mat).transpose(-1, -2)
return mfcc
class LFCC(torch.nn.Module):
r"""Create the linear-frequency cepstrum coefficients from an audio signal.
By default, this calculates the LFCC on the DB-scaled linear filtered spectrogram.
This is not the textbook implementation, but is implemented here to
give consistency with librosa.
This output depends on the maximum value in the input spectrogram, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_filter (int, optional): Number of linear filters to apply. (Default: ``128``)
n_lfcc (int, optional): Number of lfc coefficients to retain. (Default: ``40``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``None``)
dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``)
norm (str, optional): norm to use. (Default: ``'ortho'``)
log_lf (bool, optional): whether to use log-lf spectrograms instead of db-scaled. (Default: ``False``)
speckwargs (dict or None, optional): arguments for Spectrogram. (Default: ``None``)
"""
__constants__ = ['sample_rate', 'n_filter', 'n_lfcc', 'dct_type', 'top_db', 'log_lf']
def __init__(self,
sample_rate: int = 16000,
n_filter: int = 128,
f_min: float = 0.,
f_max: Optional[float] = None,
n_lfcc: int = 40,
dct_type: int = 2,
norm: str = 'ortho',
log_lf: bool = False,
speckwargs: Optional[dict] = None) -> None:
super(LFCC, self).__init__()
supported_dct_types = [2]
if dct_type not in supported_dct_types:
raise ValueError('DCT type not supported: {}'.format(dct_type))
self.sample_rate = sample_rate
self.f_min = f_min
self.f_max = f_max if f_max is not None else float(sample_rate // 2)
self.n_filter = n_filter
self.n_lfcc = n_lfcc
self.dct_type = dct_type
self.norm = norm
self.top_db = 80.0
self.amplitude_to_DB = AmplitudeToDB('power', self.top_db)
speckwargs = speckwargs or {}
self.Spectrogram = Spectrogram(**speckwargs)
if self.n_lfcc > self.Spectrogram.n_fft:
raise ValueError('Cannot select more LFCC coefficients than # fft bins')
filter_mat = F.linear_fbanks(
n_freqs=self.Spectrogram.n_fft // 2 + 1,
f_min=self.f_min,
f_max=self.f_max,
n_filter=self.n_filter,
sample_rate=self.sample_rate,
)
self.register_buffer("filter_mat", filter_mat)
dct_mat = F.create_dct(self.n_lfcc, self.n_filter, self.norm)
self.register_buffer('dct_mat', dct_mat)
self.log_lf = log_lf
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Linear Frequency Cepstral Coefficients of size (..., ``n_lfcc``, time).
"""
specgram = self.Spectrogram(waveform)
# (..., time, freq) dot (freq, n_filter) -> (..., n_filter, time)
specgram = torch.matmul(specgram.transpose(-1, -2), self.filter_mat).transpose(-1, -2)
if self.log_lf:
log_offset = 1e-6
specgram = torch.log(specgram + log_offset)
else:
specgram = self.amplitude_to_DB(specgram)
# (..., time, n_filter) dot (n_filter, n_lfcc) -> (..., n_lfcc, time)
lfcc = torch.matmul(specgram.transpose(-1, -2), self.dct_mat).transpose(-1, -2)
return lfcc
class MuLawEncoding(torch.nn.Module):
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = torchaudio.transforms.MuLawEncoding(quantization_channels=512)
>>> mulawtrans = transform(waveform)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: int = 256) -> None:
super(MuLawEncoding, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (Tensor): A signal to be encoded.
Returns:
x_mu (Tensor): An encoded signal.
"""
return F.mu_law_encoding(x, self.quantization_channels)
class MuLawDecoding(torch.nn.Module):
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: int = 256) -> None:
super(MuLawDecoding, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, x_mu: Tensor) -> Tensor:
r"""
Args:
x_mu (Tensor): A mu-law encoded signal which needs to be decoded.
Returns:
Tensor: The signal decoded.
"""
return F.mu_law_decoding(x_mu, self.quantization_channels)
class Resample(torch.nn.Module):
r"""Resample a signal from one frequency to another. A resampling method can be given.
Note:
If resampling on waveforms of higher precision than float32, there may be a small loss of precision
because the kernel is cached once as float32. If high precision resampling is important for your application,
the functional form will retain higher precision, but run slower because it does not cache the kernel.
Alternatively, you could rewrite a transform that caches a higher precision kernel.
Args:
orig_freq (float, optional): The original frequency of the signal. (Default: ``16000``)
new_freq (float, optional): The desired frequency. (Default: ``16000``)
resampling_method (str, optional): The resampling method to use.
Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``)
lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper
but less efficient. (Default: ``6``)
rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist.
Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``)
beta (float or None): The shape parameter used for kaiser window.
dtype (torch.device, optional):
Determnines the precision that resampling kernel is pre-computed and cached. If not provided,
kernel is computed with ``torch.float64`` then cached as ``torch.float32``.
If you need higher precision, provide ``torch.float64``, and the pre-computed kernel is computed and
cached as ``torch.float64``. If you use resample with lower precision, then instead of providing this
providing this argument, please use ``Resample.to(dtype)``, so that the kernel generation is still
carried out on ``torch.float64``.
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.Resample(sample_rate, sample_rate/10)
>>> waveform = transform(waveform)
"""
def __init__(
self,
orig_freq: float = 16000,
new_freq: float = 16000,
resampling_method: str = 'sinc_interpolation',
lowpass_filter_width: int = 6,
rolloff: float = 0.99,
beta: Optional[float] = None,
*,
dtype: Optional[torch.dtype] = None,
) -> None:
super().__init__()
self.orig_freq = orig_freq
self.new_freq = new_freq
self.gcd = math.gcd(int(self.orig_freq), int(self.new_freq))
self.resampling_method = resampling_method
self.lowpass_filter_width = lowpass_filter_width
self.rolloff = rolloff
self.beta = beta
if self.orig_freq != self.new_freq:
kernel, self.width = _get_sinc_resample_kernel(
self.orig_freq, self.new_freq, self.gcd,
self.lowpass_filter_width, self.rolloff,
self.resampling_method, beta, dtype=dtype)
self.register_buffer('kernel', kernel)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Output signal of dimension (..., time).
"""
if self.orig_freq == self.new_freq:
return waveform
return _apply_sinc_resample_kernel(
waveform, self.orig_freq, self.new_freq, self.gcd,
self.kernel, self.width)
class ComplexNorm(torch.nn.Module):
r"""Compute the norm of complex tensor input.
Args:
power (float, optional): Power of the norm. (Default: to ``1.0``)
Example
>>> complex_tensor = ... # Tensor shape of (…, complex=2)
>>> transform = transforms.ComplexNorm(power=2)
>>> complex_norm = transform(complex_tensor)
"""
__constants__ = ['power']
def __init__(self, power: float = 1.0) -> None:
warnings.warn(
'torchaudio.transforms.ComplexNorm has been deprecated '
'and will be removed from future release.'
'Please convert the input Tensor to complex type with `torch.view_as_complex` then '
'use `torch.abs` and `torch.angle`. '
'Please refer to https://github.com/pytorch/audio/issues/1337 '
"for more details about torchaudio's plan to migrate to native complex type."
)
super(ComplexNorm, self).__init__()
self.power = power
def forward(self, complex_tensor: Tensor) -> Tensor:
r"""
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`.
Returns:
Tensor: norm of the input tensor, shape of `(..., )`.
"""
return F.complex_norm(complex_tensor, self.power)
class ComputeDeltas(torch.nn.Module):
r"""Compute delta coefficients of a tensor, usually a spectrogram.
See `torchaudio.functional.compute_deltas` for more details.
Args:
win_length (int): The window length used for computing delta. (Default: ``5``)
mode (str): Mode parameter passed to padding. (Default: ``'replicate'``)
"""
__constants__ = ['win_length']
def __init__(self, win_length: int = 5, mode: str = "replicate") -> None:
super(ComputeDeltas, self).__init__()
self.win_length = win_length
self.mode = mode
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time).
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time).
"""
return F.compute_deltas(specgram, win_length=self.win_length, mode=self.mode)
class TimeStretch(torch.nn.Module):
r"""Stretch stft in time without modifying pitch for a given rate.
Args:
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
n_freq (int, optional): number of filter banks from stft. (Default: ``201``)
fixed_rate (float or None, optional): rate to speed up or slow down by.
If None is provided, rate must be passed to the forward method. (Default: ``None``)
"""
__constants__ = ['fixed_rate']
def __init__(self,
hop_length: Optional[int] = None,
n_freq: int = 201,
fixed_rate: Optional[float] = None) -> None:
super(TimeStretch, self).__init__()
self.fixed_rate = fixed_rate
n_fft = (n_freq - 1) * 2
hop_length = hop_length if hop_length is not None else n_fft // 2
self.register_buffer('phase_advance', torch.linspace(0, math.pi * hop_length, n_freq)[..., None])
def forward(self, complex_specgrams: Tensor, overriding_rate: Optional[float] = None) -> Tensor:
r"""
Args:
complex_specgrams (Tensor):
Either a real tensor of dimension of ``(..., freq, num_frame, complex=2)``
or a tensor of dimension ``(..., freq, num_frame)`` with complex dtype.
overriding_rate (float or None, optional): speed up to apply to this batch.
If no rate is passed, use ``self.fixed_rate``. (Default: ``None``)
Returns:
Tensor:
Stretched spectrogram. The resulting tensor is of the same dtype as the input
spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``.
"""
if overriding_rate is None:
if self.fixed_rate is None:
raise ValueError(
"If no fixed_rate is specified, must pass a valid rate to the forward method.")
rate = self.fixed_rate
else:
rate = overriding_rate
return F.phase_vocoder(complex_specgrams, rate, self.phase_advance)
class Fade(torch.nn.Module):
r"""Add a fade in and/or fade out to an waveform.
Args:
fade_in_len (int, optional): Length of fade-in (time frames). (Default: ``0``)
fade_out_len (int, optional): Length of fade-out (time frames). (Default: ``0``)
fade_shape (str, optional): Shape of fade. Must be one of: "quarter_sine",
"half_sine", "linear", "logarithmic", "exponential". (Default: ``"linear"``)
"""
def __init__(self,
fade_in_len: int = 0,
fade_out_len: int = 0,
fade_shape: str = "linear") -> None:
super(Fade, self).__init__()
self.fade_in_len = fade_in_len
self.fade_out_len = fade_out_len
self.fade_shape = fade_shape
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Tensor of audio of dimension (..., time).
"""
waveform_length = waveform.size()[-1]
device = waveform.device
return self._fade_in(waveform_length).to(device) * \
self._fade_out(waveform_length).to(device) * waveform
def _fade_in(self, waveform_length: int) -> Tensor:
fade = torch.linspace(0, 1, self.fade_in_len)
ones = torch.ones(waveform_length - self.fade_in_len)
if self.fade_shape == "linear":
fade = fade
if self.fade_shape == "exponential":
fade = torch.pow(2, (fade - 1)) * fade
if self.fade_shape == "logarithmic":
fade = torch.log10(.1 + fade) + 1
if self.fade_shape == "quarter_sine":
fade = torch.sin(fade * math.pi / 2)
if self.fade_shape == "half_sine":
fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5
return torch.cat((fade, ones)).clamp_(0, 1)
def _fade_out(self, waveform_length: int) -> Tensor:
fade = torch.linspace(0, 1, self.fade_out_len)
ones = torch.ones(waveform_length - self.fade_out_len)
if self.fade_shape == "linear":
fade = - fade + 1
if self.fade_shape == "exponential":
fade = torch.pow(2, - fade) * (1 - fade)
if self.fade_shape == "logarithmic":
fade = torch.log10(1.1 - fade) + 1
if self.fade_shape == "quarter_sine":
fade = torch.sin(fade * math.pi / 2 + math.pi / 2)
if self.fade_shape == "half_sine":
fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5
return torch.cat((ones, fade)).clamp_(0, 1)
class _AxisMasking(torch.nn.Module):
r"""Apply masking to a spectrogram.
Args:
mask_param (int): Maximum possible length of the mask.
axis (int): What dimension the mask is applied on.
iid_masks (bool): Applies iid masks to each of the examples in the batch dimension.
This option is applicable only when the input tensor is 4D.
"""
__constants__ = ['mask_param', 'axis', 'iid_masks']
def __init__(self, mask_param: int, axis: int, iid_masks: bool) -> None:
super(_AxisMasking, self).__init__()
self.mask_param = mask_param
self.axis = axis
self.iid_masks = iid_masks
def forward(self, specgram: Tensor, mask_value: float = 0.) -> Tensor:
r"""
Args:
specgram (Tensor): Tensor of dimension (..., freq, time).
mask_value (float): Value to assign to the masked columns.
Returns:
Tensor: Masked spectrogram of dimensions (..., freq, time).
"""
# if iid_masks flag marked and specgram has a batch dimension
if self.iid_masks and specgram.dim() == 4:
return F.mask_along_axis_iid(specgram, self.mask_param, mask_value, self.axis + 1)
else:
return F.mask_along_axis(specgram, self.mask_param, mask_value, self.axis)
class FrequencyMasking(_AxisMasking):
r"""Apply masking to a spectrogram in the frequency domain.
Args:
freq_mask_param (int): maximum possible length of the mask.
Indices uniformly sampled from [0, freq_mask_param).
iid_masks (bool, optional): whether to apply different masks to each
example/channel in the batch. (Default: ``False``)
This option is applicable only when the input tensor is 4D.
"""
def __init__(self, freq_mask_param: int, iid_masks: bool = False) -> None:
super(FrequencyMasking, self).__init__(freq_mask_param, 1, iid_masks)
class TimeMasking(_AxisMasking):
r"""Apply masking to a spectrogram in the time domain.
Args:
time_mask_param (int): maximum possible length of the mask.
Indices uniformly sampled from [0, time_mask_param).
iid_masks (bool, optional): whether to apply different masks to each
example/channel in the batch. (Default: ``False``)
This option is applicable only when the input tensor is 4D.
"""
def __init__(self, time_mask_param: int, iid_masks: bool = False) -> None:
super(TimeMasking, self).__init__(time_mask_param, 2, iid_masks)
class Vol(torch.nn.Module):
r"""Add a volume to an waveform.
Args:
gain (float): Interpreted according to the given gain_type:
If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio.
If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared).
If ``gain_type`` = ``db``, ``gain`` is in decibels.
gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``)
"""
def __init__(self, gain: float, gain_type: str = 'amplitude'):
super(Vol, self).__init__()
self.gain = gain
self.gain_type = gain_type
if gain_type in ['amplitude', 'power'] and gain < 0:
raise ValueError("If gain_type = amplitude or power, gain must be positive.")
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Tensor of audio of dimension (..., time).
"""
if self.gain_type == "amplitude":
waveform = waveform * self.gain
if self.gain_type == "db":
waveform = F.gain(waveform, self.gain)
if self.gain_type == "power":
waveform = F.gain(waveform, 10 * math.log10(self.gain))
return torch.clamp(waveform, -1, 1)
class SlidingWindowCmn(torch.nn.Module):
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
"""
def __init__(self,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False) -> None:
super().__init__()
self.cmn_window = cmn_window
self.min_cmn_window = min_cmn_window
self.center = center
self.norm_vars = norm_vars
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Tensor of audio of dimension (..., time).
"""
cmn_waveform = F.sliding_window_cmn(
waveform, self.cmn_window, self.min_cmn_window, self.center, self.norm_vars)
return cmn_waveform
class Vad(torch.nn.Module):
r"""Voice Activity Detector. Similar to SoX implementation.
Attempts to trim silence and quiet background sounds from the ends of recordings of speech.
The algorithm currently uses a simple cepstral power measurement to detect voice,
so may be fooled by other things, especially music.
The effect can trim only from the front of the audio,
so in order to trim from the back, the reverse effect must also be used.
Args:
sample_rate (int): Sample rate of audio signal.
trigger_level (float, optional): The measurement level used to trigger activity detection.
This may need to be cahnged depending on the noise level, signal level,
and other characteristics of the input audio. (Default: 7.0)
trigger_time (float, optional): The time constant (in seconds)
used to help ignore short bursts of sound. (Default: 0.25)
search_time (float, optional): The amount of audio (in seconds)
to search for quieter/shorter bursts of audio to include prior
to the detected trigger point. (Default: 1.0)
allowed_gap (float, optional): The allowed gap (in seconds) between
quiteter/shorter bursts of audio to include prior
to the detected trigger point. (Default: 0.25)
pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve
before the trigger point and any found quieter/shorter bursts. (Default: 0.0)
boot_time (float, optional) The algorithm (internally) uses adaptive noise
estimation/reduction in order to detect the start of the wanted audio.
This option sets the time for the initial noise estimate. (Default: 0.35)
noise_up_time (float, optional) Time constant used by the adaptive noise estimator
for when the noise level is increasing. (Default: 0.1)
noise_down_time (float, optional) Time constant used by the adaptive noise estimator
for when the noise level is decreasing. (Default: 0.01)
noise_reduction_amount (float, optional) Amount of noise reduction to use in
the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35)
measure_freq (float, optional) Frequency of the algorithm’s
processing/measurements. (Default: 20.0)
measure_duration: (float, optional) Measurement duration.
(Default: Twice the measurement period; i.e. with overlap.)
measure_smooth_time (float, optional) Time constant used to smooth
spectral measurements. (Default: 0.4)
hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied
at the input to the detector algorithm. (Default: 50.0)
lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied
at the input to the detector algorithm. (Default: 6000.0)
hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used
in the detector algorithm. (Default: 150.0)
lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used
in the detector algorithm. (Default: 2000.0)
Reference:
- http://sox.sourceforge.net/sox.html
"""
def __init__(self,
sample_rate: int,
trigger_level: float = 7.0,
trigger_time: float = 0.25,
search_time: float = 1.0,
allowed_gap: float = 0.25,
pre_trigger_time: float = 0.0,
boot_time: float = .35,
noise_up_time: float = .1,
noise_down_time: float = .01,
noise_reduction_amount: float = 1.35,
measure_freq: float = 20.0,
measure_duration: Optional[float] = None,
measure_smooth_time: float = .4,
hp_filter_freq: float = 50.,
lp_filter_freq: float = 6000.,
hp_lifter_freq: float = 150.,
lp_lifter_freq: float = 2000.) -> None:
super().__init__()
self.sample_rate = sample_rate
self.trigger_level = trigger_level
self.trigger_time = trigger_time
self.search_time = search_time
self.allowed_gap = allowed_gap
self.pre_trigger_time = pre_trigger_time
self.boot_time = boot_time
self.noise_up_time = noise_up_time
self.noise_down_time = noise_down_time
self.noise_reduction_amount = noise_reduction_amount
self.measure_freq = measure_freq
self.measure_duration = measure_duration
self.measure_smooth_time = measure_smooth_time
self.hp_filter_freq = hp_filter_freq
self.lp_filter_freq = lp_filter_freq
self.hp_lifter_freq = hp_lifter_freq
self.lp_lifter_freq = lp_lifter_freq
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)`
Tensor of shape `(channels, time)` is treated as a multi-channel recording
of the same event and the resulting output will be trimmed to the earliest
voice activity in any channel.
"""
return F.vad(
waveform=waveform,
sample_rate=self.sample_rate,
trigger_level=self.trigger_level,
trigger_time=self.trigger_time,
search_time=self.search_time,
allowed_gap=self.allowed_gap,
pre_trigger_time=self.pre_trigger_time,
boot_time=self.boot_time,
noise_up_time=self.noise_up_time,
noise_down_time=self.noise_down_time,
noise_reduction_amount=self.noise_reduction_amount,
measure_freq=self.measure_freq,
measure_duration=self.measure_duration,
measure_smooth_time=self.measure_smooth_time,
hp_filter_freq=self.hp_filter_freq,
lp_filter_freq=self.lp_filter_freq,
hp_lifter_freq=self.hp_lifter_freq,
lp_lifter_freq=self.lp_lifter_freq,
)
class SpectralCentroid(torch.nn.Module):
r"""Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
sample_rate (int): Sample rate of audio signal.
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.SpectralCentroid(sample_rate)
>>> spectral_centroid = transform(waveform) # (channel, time)
"""
__constants__ = ['sample_rate', 'n_fft', 'win_length', 'hop_length', 'pad']
def __init__(self,
sample_rate: int,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
pad: int = 0,
window_fn: Callable[..., Tensor] = torch.hann_window,
wkwargs: Optional[dict] = None) -> None:
super(SpectralCentroid, self).__init__()
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.pad = pad
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Spectral Centroid of size (..., time).
"""
return F.spectral_centroid(waveform, self.sample_rate, self.pad, self.window, self.n_fft, self.hop_length,
self.win_length)
class PitchShift(torch.nn.Module):
r"""Shift the pitch of a waveform by ``n_steps`` steps.
Args:
waveform (Tensor): The input waveform of shape `(..., time)`.
sample_rate (float): Sample rate of `waveform`.
n_steps (int): The (fractional) steps to shift `waveform`.
bins_per_octave (int, optional): The number of steps per octave (Default : ``12``).
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``).
win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``).
hop_length (int or None, optional): Length of hop between STFT windows. If None, then ``win_length // 4``
is used (Default: ``None``).
window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window.
If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``).
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.PitchShift(sample_rate, 4)
>>> waveform_shift = transform(waveform) # (channel, time)
"""
__constants__ = ['sample_rate', 'n_steps', 'bins_per_octave', 'n_fft', 'win_length', 'hop_length']
def __init__(self,
sample_rate: int,
n_steps: int,
bins_per_octave: int = 12,
n_fft: int = 512,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window_fn: Callable[..., Tensor] = torch.hann_window,
wkwargs: Optional[dict] = None) -> None:
super(PitchShift, self).__init__()
self.n_steps = n_steps
self.bins_per_octave = bins_per_octave
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 4
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: The pitch-shifted audio of shape `(..., time)`.
"""
return F.pitch_shift(waveform, self.sample_rate, self.n_steps, self.bins_per_octave, self.n_fft,
self.win_length, self.hop_length, self.window)
| 43.594776
| 117
| 0.610678
|
24ba42fb04c4f9c013d2e725e681622dacba5787
| 5,074
|
py
|
Python
|
django/contrib/comments/views/moderation.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 1
|
2019-02-10T19:33:27.000Z
|
2019-02-10T19:33:27.000Z
|
django/contrib/comments/views/moderation.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/comments/views/moderation.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 1
|
2020-05-03T20:42:29.000Z
|
2020-05-03T20:42:29.000Z
|
from django import template
from django.conf import settings
from django.contrib import comments
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.comments import signals
from django.contrib.comments.views.utils import next_redirect, confirmation_view
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.csrf import csrf_protect
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, fallback=next or 'comments-flag-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: :template:`comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request, fallback=next or 'comments-delete-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: :template:`comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request, fallback=next or 'comments-approve-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into separate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_delete(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_approve(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
# Confirmation views.
flag_done = confirmation_view(
template="comments/flagged.html",
doc='Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template="comments/deleted.html",
doc='Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template="comments/approved.html",
doc='Displays a "comment was approved" success page.'
)
| 30.939024
| 95
| 0.684667
|
3a0aa67ef0e46228765eb8549d2d3f59625a0392
| 4,292
|
py
|
Python
|
etc/results.py
|
sublee/etc
|
f2be64604da5af0d7739cfacf36f55712f0fc5cb
|
[
"BSD-3-Clause"
] | 4
|
2016-05-02T07:37:55.000Z
|
2019-12-24T03:25:26.000Z
|
etc/results.py
|
sublee/etc
|
f2be64604da5af0d7739cfacf36f55712f0fc5cb
|
[
"BSD-3-Clause"
] | null | null | null |
etc/results.py
|
sublee/etc
|
f2be64604da5af0d7739cfacf36f55712f0fc5cb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
etc.results
~~~~~~~~~~~
"""
from __future__ import absolute_import
import six
from etc.helpers import gen_repr, registry
__all__ = ['ComparedThenDeleted', 'ComparedThenSwapped', 'Created', 'Deleted',
'Directory', 'EtcdResult', 'Expired', 'Got', 'Node', 'Set',
'Updated', 'Value']
def __eq__(self, other):
"""Common `__eq__` implementation for classes which has `__slots__`."""
if self.__class__ is not other.__class__:
return False
return all(getattr(self, attr) == getattr(other, attr)
for attr in self.__slots__)
class Node(object):
__slots__ = ('key', 'modified_index', 'created_index', 'ttl', 'expiration')
def __init__(self, key, modified_index=None, created_index=None,
ttl=None, expiration=None):
self.key = key
self.modified_index = modified_index
self.created_index = created_index
self.ttl = ttl
self.expiration = expiration
@property
def index(self):
"""Alias for `modified_index`."""
return self.modified_index
@property
def expires_at(self):
"""Alias for `expiration`."""
return self.expiration
__eq__ = __eq__
class Value(Node):
"""An etcd value Node."""
__slots__ = Node.__slots__ + ('value',)
def __init__(self, key, value, *args, **kwargs):
super(Value, self).__init__(key, *args, **kwargs)
self.value = value
def __repr__(self):
options = [('modified_index', self.modified_index),
('created_index', self.created_index),
('ttl', self.ttl),
('expiration', self.expiration)]
return gen_repr(self.__class__, u'{0}={1}',
self.key, self.value, options=options)
class Directory(Node):
"""An etcd directory Node."""
__slots__ = Node.__slots__ + ('nodes',)
def __init__(self, key, nodes=(), *args, **kwargs):
super(Directory, self).__init__(key, *args, **kwargs)
self.nodes = nodes
@property
def values(self):
return [node.value for node in self.nodes]
def __repr__(self):
key = self.key
if not key.endswith(u'/'):
key += u'/'
return gen_repr(self.__class__, u'{0}[{1}]',
self.key, len(self.nodes), short=True)
class EtcdResult(six.with_metaclass(registry('action'))):
"""A successful etcd result.
Don't use this class directly. There're specific sub classes to be used
instead.
"""
__slots__ = ('node', 'prev_node',
'etcd_index', 'raft_index', 'raft_term')
action = NotImplemented
def __init__(self, node, prev_node=None,
etcd_index=None, raft_index=None, raft_term=None):
self.node = node
self.prev_node = prev_node
self.etcd_index = etcd_index
self.raft_index = raft_index
self.raft_term = raft_term
# Node accessors.
key = property(lambda x: x.node.key)
modified_index = property(lambda x: x.node.modified_index)
created_index = property(lambda x: x.node.created_index)
ttl = property(lambda x: x.node.ttl)
expiration = property(lambda x: x.node.expiration)
index = property(lambda x: x.node.index)
expires_at = property(lambda x: x.node.expires_at)
value = property(lambda x: x.node.value)
nodes = property(lambda x: x.node.nodes)
values = property(lambda x: x.node.values)
def __repr__(self):
return gen_repr(self.__class__, u'{0}', self.node, options=[
('prev_node', self.prev_node),
('etcd_index', self.etcd_index),
('raft_index', self.raft_index),
('raft_term', self.raft_term),
])
__eq__ = __eq__
def def_(name, action=NotImplemented, base=EtcdResult):
return type(name, (base,), {'action': action})
Got = def_('Got', 'get')
Set = def_('Set', 'set')
Deleted = def_('Deleted', 'delete')
Updated = def_('Updated', 'update', Set)
Created = def_('Created', 'create', Set)
Expired = def_('Expired', 'expire', Deleted)
ComparedThenSwapped = def_('ComparedThenSwapped', 'compareAndSwap', Set)
ComparedThenDeleted = def_('ComparedThenDeleted', 'compareAndDelete', Deleted)
| 29
| 79
| 0.6137
|
f4791de4dc6ba437cbee536401a78b1114b243b6
| 5,340
|
py
|
Python
|
adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/python/ppg_application_interface_def.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | 5
|
2021-06-13T17:11:19.000Z
|
2021-12-01T18:20:38.000Z
|
adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/python/ppg_application_interface_def.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | null | null | null |
adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/python/ppg_application_interface_def.py
|
ArrowElectronics/Vital-Signs-Monitoring
|
ba43fe9a116d94170561433910fd7bffba5726e7
|
[
"Unlicense"
] | 1
|
2022-01-08T15:01:44.000Z
|
2022-01-08T15:01:44.000Z
|
from ctypes import *
from common_application_interface_def import *
from common_sensor_interface_def import *
from dcb_interface_def import *
from m2m2_core_def import *
class M2M2_PPG_APP_CMD_ENUM_t(c_ubyte):
_M2M2_PPG_APP_CMD_LOWEST = 0x40
M2M2_PPG_APP_CMD_GET_LAST_STATES_REQ = 0x42
M2M2_PPG_APP_CMD_GET_LAST_STATES_RESP = 0x43
M2M2_PPG_APP_CMD_GET_STATES_INFO_REQ = 0x44
M2M2_PPG_APP_CMD_GET_STATES_INFO_RESP = 0x45
M2M2_PPG_APP_CMD_GET_ALGO_VENDOR_VERSION_REQ = 0x46
M2M2_PPG_APP_CMD_GET_ALGO_VENDOR_VERSION_RESP = 0x47
M2M2_PPG_APP_CMD_SYNC_DATA_REQ = 0x48
M2M2_PPG_APP_CMD_SYNC_DATA_RESP = 0x49
M2M2_PPG_APP_CMD_DEBUG_DATA_REQ = 0x4A
M2M2_PPG_APP_CMD_DEBUG_DATA_RESP = 0x4B
M2M2_PPG_APP_CMD_GET_CTRVALUE_REQ = 0x4C
M2M2_PPG_APP_CMD_GET_CTRVALUE_RESP = 0x4D
M2M2_PPG_APP_CMD_GET_SMETRICS_REQ = 0x4E
M2M2_PPG_APP_CMD_GET_SMETRICS_RESP = 0x4F
class M2M2_PPG_APP_SYNC_ENUM_t(c_ubyte):
M2M2_PPG_APP_SOFTWARE_SYNC = 0x0
M2M2_PPG_APP_HARDWARE_SYNC = 0x3
class M2M2_SENSOR_PPG_SYNC_NSAMPLES_ENUM_t(c_ubyte):
M2M2_SENSOR_PPG_SYNC_NSAMPLES = 0x3
class M2M2_SENSOR_HRV_NSAMPLES_ENUM_t(c_ubyte):
M2M2_SENSOR_HRV_NSAMPLES = 0x4
class M2M2_SENSOR_PPG_SYNC_DATA_TYPES_ENUM_t(c_ubyte):
M2M2_SENSOR_PPG_SYNC_DATA_TYPES_NO_SYNC = 0x0
M2M2_SENSOR_PPG_SYNC_DATA_TYPES_HW_SYNC1 = 0x1
M2M2_SENSOR_PPG_SYNC_DATA_TYPES_HW_SYNC2 = 0x2
M2M2_SENSOR_PPG_SYNC_DATA_TYPES_HW_SYNC3 = 0x3
M2M2_SENSOR_PPG_SYNC_DATA_TYPES_SW_SYNC = 0x4
class M2M2_SENSOR_PPG_LCFG_ID_ENUM_t(c_ubyte):
M2M2_SENSOR_PPG_LCFG_ID_ADPD4000 = 0x28
M2M2_SENSOR_PPG_LCFG_ID_ADPD107 = 0x6B
M2M2_SENSOR_PPG_LCFG_ID_ADPD108 = 0x6C
M2M2_SENSOR_PPG_LCFG_ID_ADPD185 = 0xB9
M2M2_SENSOR_PPG_LCFG_ID_ADPD188 = 0xBC
class ppg_app_lib_state_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("states", c_ubyte * 10),
]
class ppg_app_ctrValue_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("ctrValue", c_ushort),
]
class ppg_app_signal_metrics_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("metrics", c_ushort * 3),
]
class ppg_app_lcfg_op_t(Structure):
_pack_ = 1
_fields_ = [
("field", c_ubyte),
("value", c_ulong),
]
def ppg_app_lcfg_op_hdr_t(array_size):
class ppg_app_lcfg_op_hdr_t_internal(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("num_ops", c_ubyte),
("ops", ppg_app_lcfg_op_t * array_size),
]
return ppg_app_lcfg_op_hdr_t_internal()
class ppg_app_hr_debug_stream_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("sequence_num", c_ushort),
("timestamp", c_ulong),
("adpdlibstate", c_ushort),
("hr", c_ushort),
("confidence", c_ushort),
("hr_type", c_ushort),
("rr_interval", c_ushort),
("debugInfo", c_ushort * 10),
]
class ppg_app_state_info_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("state", c_ubyte),
("info", c_ubyte * 20),
]
class ppg_app_cmd_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("commandVal", c_ushort),
]
class ppg_app_set_lcfg_req_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("lcfgid", c_ubyte),
]
class ppg_app_set_lcfg_resp_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
]
class ppg_app_dynamic_agc_stream_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("sequence_num", c_ushort),
("timestamp", c_ulong),
("mts", c_ushort * 6),
("setting", c_ushort * 10),
]
class hrv_data_set_t(Structure):
_pack_ = 1
_fields_ = [
("timestamp", c_ushort),
("rr_interval", c_short),
("is_gap", c_ushort),
("rmssd", c_ushort),
]
class ppg_app_hrv_info_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("sequence_num", c_ushort),
("timestamp", c_ulong),
("first_rr_interval", c_short),
("first_is_gap", c_ushort),
("first_rmssd", c_ushort),
("hrv_data", hrv_data_set_t * 3),
]
class m2m2_ppg_lcfg_data_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("size", c_ubyte),
("lcfgdata", c_long * 56),
]
| 29.021739
| 56
| 0.591199
|
8340984ac4236eb711586f512c20608bf4e8704e
| 14,111
|
py
|
Python
|
project/apps/birth_registration/migrations/0030_auto_20150921_1750.py
|
kostik/vrs
|
a347c2d901e1a6b60a85480c9d2b247157881fce
|
[
"BSD-3-Clause"
] | 1
|
2016-11-09T18:57:23.000Z
|
2016-11-09T18:57:23.000Z
|
project/apps/birth_registration/migrations/0030_auto_20150921_1750.py
|
kostik/vrs
|
a347c2d901e1a6b60a85480c9d2b247157881fce
|
[
"BSD-3-Clause"
] | null | null | null |
project/apps/birth_registration/migrations/0030_auto_20150921_1750.py
|
kostik/vrs
|
a347c2d901e1a6b60a85480c9d2b247157881fce
|
[
"BSD-3-Clause"
] | 4
|
2016-09-30T08:24:09.000Z
|
2019-02-28T14:09:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import birth_registration.fields
import birth_registration.validators
class Migration(migrations.Migration):
dependencies = [
('birth_registration', '0029_auto_20150921_1714'),
]
operations = [
migrations.AlterField(
model_name='f201',
name='Father_name',
field=birth_registration.fields.Char100Field(max_length=100, null=True, verbose_name="Father's name", blank=True),
),
migrations.AlterField(
model_name='f201',
name='Mother_name',
field=birth_registration.fields.Char100Field(max_length=100, null=True, verbose_name="Mother's name", blank=True),
),
migrations.AlterField(
model_name='f201',
name='UPCR',
field=birth_registration.fields.TownshipField(blank=True, help_text='Township or town', null=True, verbose_name='Usual place of residence', choices=[('01 - Myitkyina', [(1, '01 - Myitkyina'), (2, '02 - Waingmaw'), (3, '03 - Ingyan Yan'), (4, '04 - Moekaung'), (5, '05 - Moehnyin'), (6, '06 - Phakant'), (7, '07 - Karmine*'), (8, '08 - Ta Naing'), (9, '09 - Chibway'), (10, '10 - Sautlaw')]), ('02 - Bamaw', [(11, '01 - Bamaw'), (12, '02 - Shwegu'), (13, '03 - Moemauk'), (14, '04 - Mansi')]), ('03 - Puta O', [(15, '01 - Puta O'), (16, '02 - Swanprabum'), (17, '03 - Machanbaw'), (18, '04 - Khaunglanphoo'), (19, '05 - Naungmon')]), ('04 - Loikaw', [(20, '01 - Loikaw'), (21, '02 - Dimawsoe'), (22, '03 - Phrusoe'), (23, '04 - Shartaw')]), ('05 - Bawlakhei', [(24, '01 - Bawlakhei'), (25, '02 - Pharsaung'), (26, '03 - Meiseit')]), ('06 - Pha An', [(27, '01 - Pha An'), (28, '02 - Hlaingbweit'), (29, '03 - Pharpon'), (30, '04 - Thandaung'), (31, '05 - Thandaungkyee')]), ('07 - Myawaddy', [(32, '01 - Myawaddy')]), ('08 - Kawtkaraik', [(33, '01 - Kawtkaraik'), (34, '02 - Kyarinseikkyi'), (35, '03 - Phayathonesu*'), (36, '04 - Kyoandoe')]), ('09 - Phalan', [(37, '01 - Phalan'), (38, '02 - Hakha'), (39, '03 - Htantalan'), (40, '04 - Teetain'), (41, '05 - Tunzan')]), ('10 - Mintatt', [(42, '01 - Mintatt'), (43, '02 - Matupi'), (44, '03 - Kanpetlet'), (45, '04 - Paletwa')]), ('11 - Sagaing', [(46, '01 - Sagaing'), (47, '02 - Myinmu'), (48, '03 - Myaung')]), ('12 - Shwebo', [(49, '01 - Shwebo'), (50, '02 - Khin Oo'), (51, '03 - Wetlet'), (52, '04 - Kantbalu'), (53, '05 - Kyunhla'), (54, '06 - Yay Oo'), (55, '07 - Dipeiyin'), (56, '08 - Tantsei')]), ('13 - Monywar', [(57, '01 - Monywar'), (58, '02 - Butalin'), (59, '03 - Ahyartaw'), (60, '04 - Chaung Oo'), (61, '05 - Yinmarbin'), (62, '06 - Kani'), (63, '07 - Salingyee'), (64, '08 - Palei')]), ('14 - Kathar', [(65, '01 - Kathar'), (66, '02 - Indaw'), (67, '03 - Hteechaink'), (68, '04 - Bamauk'), (69, '05 - Kawlin'), (70, '06 - Wuntho'), (71, '07 - Pinleibu')]), ('15 - Kalay', [(72, '01 - Kalay'), (73, '02 - Kalaywa'), (74, '03 - Minkin')]), ('16 - Tamu', [(75, '01 - Tamu')]), ('17 - Mawlaik', [(76, '01 - Mawlaik'), (77, '02 - Phaungpyin')]), ('18 - Khantee', [(78, '01 - Khantee'), (79, '02 - Hoamalin'), (80, '03 - Layshee'), (81, '04 - Lahei'), (82, '05 - Nanyon')]), ('19 - Dawei', [(83, '01 - Dawei'), (84, '02 - Launglon'), (85, '03 - Thayetchaung'), (86, '04 - Yayphyu')]), ('20 - Myeik', [(87, '01 - Myeik'), (88, '02 - Kyunsu'), (89, '03 - Pulaw'), (90, '04 - Tanintharyi')]), ('21 - Kautthaung', [(91, '01 - Kautthaung'), (92, '02 - Boatpyin')]), ('22 - Bago', [(93, '01 - Bago'), (94, '02 - Thanatpin'), (95, '03 - Kawa'), (96, '04 - Waw'), (97, '05 - Nyaunglaybin'), (98, '06 - Madauk*'), (99, '07 - Pyuntanzar*'), (100, '08 - Kyauktaga'), (101, '09 - Peinweikone*'), (102, '10 - Daik Oo'), (103, '11 - Shwekyin')]), ('23 - Pyay', [(104, '01 - Pyay'), (105, '02 - Pauk Khaung'), (106, '03 - Padaung'), (107, '04 - Paungtei'), (108, '05 - Theikone'), (109, '06 - Shwetaung')]), ('24 - Tharyarwaddy', [(110, '01 - Tharyarwaddy'), (111, '02 - Thonesei*'), (112, '03 - Letpandan'), (113, '04 - Minhla'), (114, '05 - Oakpho'), (115, '06 - Zeekone'), (116, '07 - Nattalin'), (117, '08 - Moenyo'), (118, '09 - Kyoetbinkaut')]), ('25 - Taungoo', [(119, '01 - Taungoo'), (120, '02 - Yaytarshay'), (121, '03 - Kyaukyee'), (122, '04 - Phyu'), (123, '05 - Oaktwin'), (124, '06 - Htandabin')]), ('26 - Magway', [(125, '01 - Magway'), (126, '02 - Yenanchaung'), (127, '03 - Chauk'), (128, '04 - Taungtwingyee'), (129, '05 - Myoethit'), (130, '06 - Natmauk')]), ('27 - Minbu', [(131, '01 - Minbu'), (132, '02 - Saku*'), (133, '03 - Pwintphyu'), (134, '04 - Ngaphei'), (135, '05 - Salin'), (136, '06 - Sinphyukyun*'), (137, '07 - Saytoattaya')]), ('28 - Thayet', [(138, '01 - Thayet'), (139, '02 - Minhla'), (140, '03 - Mintone'), (141, '04 - Kanma'), (142, '05 - Aunglan'), (143, '06 - Sinpaungwei')]), ('29 - Pakokku', [(144, '01 - Pakokku'), (145, '02 - Yesagyo'), (146, '03 - Myaing'), (147, '04 - Pauk'), (148, '05 - Saikphyu')]), ('30 - Gantgaw', [(149, '01 - Gantgaw'), (150, '02 - Hteelin'), (151, '03 - Saw')]), ('31 - Mandalay', [(152, '01 - Aungmyaytharzan'), (153, '02 - Chanayetharzan'), (154, '03 - MahaAungmyay'), (155, '04 - Chanmyatharsi'), (156, '05 - Pyigyeetakhun'), (157, '06 - Amarapura'), (158, '07 - Myitnge*'), (159, '08 - Patheingyee')]), ('32 - Pyin Oo Lwin', [(160, '01 - Pyin Oo Lwin'), (161, '02 - Madayar'), (162, '03 - Sintkuu'), (163, '04 - Moegauk'), (164, '05 - Thabaikkyin')]), ('33 - Kyaukse', [(165, '01 - Kyaukse'), (166, '02 - Sintkai'), (167, '03 - Myitthar'), (168, '04 - Tadaoo')]), ('34 - Myingyan', [(169, '01 - Myingyan'), (170, '02 - Taungthar'), (171, '03 - Nahtoegyee'), (172, '04 - Kyaukbadaung'), (173, '05 - Nganzun')]), ('35 - Nyaung Oo', [(174, '01 - Nyaung Oo'), (175, '02 - Bagan*'), (176, '03 - Ngatharauk*')]), ('36 - Yameithinn', [(177, '01 - Yameithinn'), (178, '02 - Pyawbwei'), (179, '03 - Tatkone'), (180, '04 - Pyinmana'), (181, '05 - Leiway')]), ('37 - Meikhtila', [(182, '01 - Meikhtila'), (183, '02 - Mahlaing'), (184, '03 - Tharzi'), (185, '04 - Wantwin')]), ('38 - Mawlamyaing', [(186, '01 - Mawlamyaing'), (187, '02 - Kyaikmayaw'), (188, '03 - Chaungzon'), (189, '04 - Thanphyuzayat'), (190, '05 - Kyaikkhami*'), (191, '06 - Mudon'), (192, '07 - Yay')]), ('39 - Thahton', [(193, '01 - Thahton'), (194, '02 - Paung'), (195, '03 - Kyaikhto'), (196, '04 - Beelin')]), ('40 - Sittwe', [(197, '01 - Sittwe'), (198, '02 - Poannakyun'), (199, '03 - Myauk Oo'), (200, '04 - Kyauktaw'), (201, '05 - Minbya'), (202, '06 - Pauktaw'), (203, '07 - Myaybon')]), ('41 - Maungdaw', [(204, '01 - Maungdaw')]), ('42 - Buthidaung', [(205, '01 - Buthidaung'), (206, '02 - Rathedaung')]), ('43 - Kyaukphyu', [(207, '01 - Kyaukphyu'), (208, '02 - Man Aung'), (209, '03 - Ranbyei'), (210, '04 - Ann')]), ('44 - Thandwe', [(211, '01 - Thandwe'), (212, '02 - Taungkauk'), (213, '03 - Gwa')]), ('45 - Ahshaytbine (East)', [(214, '01 - Thingankyun'), (215, '02 - Yankin'), (216, '03 - Taung Okkalapa'), (217, '04 - Myauk Okkalapa'), (218, '05 - Tharketa'), (219, '06 - Dawbon'), (220, '07 - Tarmwe'), (221, '08 - Pazuntaung'), (222, '09 - Botahtaung'), (223, '10 - Dagon Myothit Taung (Sounth)'), (224, '11 - Dagon Myothi Myauk (North)'), (225, '12 - Dagon Myothit Ahshayt (East)'), (226, '13 - Dagon Myothit (Seikkan)'), (227, '14 - Mingalataungnyunt')]), ('46 - Ahnoutpine (West)', [(228, '01 - Kyauktada'), (229, '02 - Panbedan'), (230, '03 - Lanmadaw'), (231, '04 - Lathar'), (232, '05 - Ahlon'), (233, '06 - Kyeemyindine'), (234, '07 - Sanchaung'), (235, '08 - Hlaing'), (236, '09 - Kamaryut'), (237, '10 - Mayangone'), (238, '11 - Dagon'), (239, '12 - Bahan'), (240, '13 - Seikkan')]), ('47 - Taungbine (South)', [(241, '01 - Thanhlyin'), (242, '02 - Kyauktan'), (243, '03 - Thonekhwa'), (244, '04 - Khayan'), (245, '05 - Tonte'), (246, '06 - Kauthmu'), (247, '07 - Kunchangone'), (248, '08 - Dala'), (249, '09 - Seikkyee'), (250, '10 - Khanaungto'), (251, '11 - Kokoe Island')]), ('48 - Myaukpine (North)', [(252, '01 - Insein'), (253, '02 - Mingalardon'), (254, '03 - Htaunkkyant*'), (255, '04 - Hmawbi'), (256, '05 - Hlegu'), (257, '06 - Tiakkyee'), (258, '07 - Oakkan*'), (259, '08 - Htantabin'), (260, '09 - Shwepyithar'), (261, '10 - Hlaingtharyar'), (262, '11 - Ahphyauk*')]), ('49 - Taungyi', [(263, '01 - Taungyi'), (264, '02 - Ayetharyar*'), (265, '03 - Hopone'), (266, '04 - Nyaungshwe'), (267, '05 - Sisaing'), (268, '06 - Kalaw'), (269, '07 - Aungban*'), (270, '08 - Pindaya'), (271, '09 - Ywarngan'), (272, '10 - Yatsauk'), (273, '11 - Pinlaung'), (274, '12 - Phekhoan')]), ('50 - Loilin', [(275, '01 - Loilin'), (276, '02 - Pinlon'), (277, '03 - Leichar'), (278, '04 - Nantsam(South)'), (279, '05 - Kunhein'), (280, '06 - Moenei'), (281, '07 - Linkhay'), (282, '08 - Maukmei'), (283, '09 - Minepan'), (284, '10 - Kyaythee'), (285, '11 - Minekaing'), (286, '12 - Mineshu')]), ('51 - Lahsio', [(287, '01 - Lashio'), (288, '02 - Theinni'), (289, '03 - Mineyei'), (290, '04 - Tantyan'), (291, '05 - Minephant'), (292, '06 - Panyang'), (293, '07 - Narphan'), (294, '08 - Panwaing'), (295, '09 - Minemaw'), (296, '10 - Pansan (Pankhan)')]), ('52 - Muse', [(297, '01 - Muse'), (298, '02 - Nantkhan'), (299, '03 - Kutkhaing'), (300, '04 - Monkoe'), (301, '05 - Kyukoak')]), ('53 - Kyaukmei', [(302, '01 - Kyaukmei'), (303, '02 - Naungcho'), (304, '03 - Thibaw'), (305, '04 - Namtu'), (306, '05 - Nantsam(North)'), (307, '06 - Moemaik'), (308, '07 - Mabain'), (309, '08 - Mantoan')]), ('54 - Kunloan', [(310, '01 - Kunloan'), (311, '02 - Hopan')]), ('55 - Laukkaing', [(312, '01 - Laukkaing'), (313, '02 - Chinshwehaw*'), (314, '03 - Koankyan')]), ('56 - Kyaington', [(315, '01 - Kyaington'), (316, '02 - Minekhat'), (317, '03 - Mineyang'), (318, '04 - Minelar'), (319, '05 - Metman')]), ('57 - Minesatt', [(320, '01 - Minesatt'), (321, '02 - Minepyinn'), (322, '03 - Minetoan')]), ('58 - Tachilaik', [(323, '01 - Tachilaik')]), ('59 - Minephyat', [(324, '01 - Minephyat'), (325, '02 - Mineyaung')]), ('60 - Pathein', [(326, '01 - Pathein'), (327, '02 - Kangyidaunt'), (328, '03 - Tharpaung'), (329, '04 - Ngaputaw'), (330, '05 - Kyoanpyaw'), (331, '06 - Yaykyi'), (332, '07 - Ngathaingchaung'), (333, '08 - Kyaungkone'), (334, '09 - Haigyikyun')]), ('61 - Hinthada', [(335, '01 - Hinthada'), (336, '02 - Zalun'), (337, '03 - Laymyethnar'), (338, '04 - Myan Aung'), (339, '05 - Ka Naung'), (340, '06 - Kyankhin'), (341, '07 - Ingapu')]), ('62 - Myaungmya', [(342, '01 - Myaungmya'), (343, '02 - Ainmei'), (344, '03 - Laputta'), (345, '04 - Warkhema'), (346, '05 - Mawlamyaingkyun')]), ('63 - Ma U Bin', [(347, '01 - Ma U Bin'), (348, '02 - Pantanaw'), (349, '03 - Nyaungtone'), (350, '04 - Danubyu')]), ('64 - Phyarpon', [(351, '01 - Phyarpon'), (352, '02 - Bogalay'), (353, '03 - Kyaiklatt'), (354, '04 - Daydayei')]), ('65 - Zayarthiri', [(355, '01 - Zayarthiri')]), ('66 - Dakhenathiri', [(356, '01 - Dakhenathiri')]), ('67 - Oktayathiri', [(357, '01 - Oktayathiri')]), ('68 - Potebathiri', [(358, '01 - Potebathiri')]), ('69 - Zabuthiri', [(359, '01 - Zabuthiri')]), ('70 - Tatkon', [(360, '01 - Tatkon')]), ('71 - Pyinmana', [(361, '01 - Pyinmana')]), ('72 - Lewe', [(362, '01 - Lewe')])]),
),
migrations.AlterField(
model_name='f201',
name='UPRS',
field=birth_registration.fields.StateDivisionField(validators=[birth_registration.validators.validate_2digits], choices=[(1, '01 - Kachin'), (2, '02 - Kayh'), (3, '03 - Kayin'), (4, '04 - Chin'), (5, '05 - Sagaing'), (6, '06 - Tanintharyi'), (7, '07 - Bago'), (8, '08 - Magway'), (9, '09 - Mandalay'), (10, '10 - Mon'), (11, '11 - Rakhine'), (12, '12 - Yangon'), (13, '13 - Shan'), (14, '14 - Ayyarwaddy'), (15, '15 - NayPyiTaw')], blank=True, help_text=' State Division', null=True, verbose_name='Usual place of residence'),
),
migrations.AlterField(
model_name='f201',
name='UPRS1',
field=birth_registration.fields.DistrictField(validators=[birth_registration.validators.validate_2digits], choices=[('01 - Kachin', [(1, '01 - Myitkyina'), (2, '02 - Bamaw'), (3, '03 - Puta O')]), ('02 - Kayh', [(4, '04 - Loikaw'), (5, '05 - Bawlakhei')]), ('03 - Kayin', [(6, '06 - Pha An'), (7, '07 - Myawaddy'), (8, '08 - Kawtkaraik')]), ('04 - Chin', [(9, '09 - Phalan'), (10, '10 - Mintatt')]), ('05 - Sagaing', [(11, '11 - Sagaing'), (12, '12 - Shwebo'), (13, '13 - Monywar'), (14, '14 - Kathar'), (15, '15 - Kalay'), (16, '16 - Tamu'), (17, '17 - Mawlaik'), (18, '18 - Khantee')]), ('06 - Tanintharyi', [(19, '19 - Dawei'), (20, '20 - Myeik'), (21, '21 - Kautthaung')]), ('07 - Bago', [(22, '22 - Bago'), (23, '23 - Pyay'), (24, '24 - Tharyarwaddy'), (25, '25 - Taungoo')]), ('08 - Magway', [(26, '26 - Magway'), (27, '27 - Minbu'), (28, '28 - Thayet'), (29, '29 - Pakokku'), (30, '30 - Gantgaw')]), ('09 - Mandalay', [(31, '31 - Mandalay'), (32, '32 - Pyin Oo Lwin'), (33, '33 - Kyaukse'), (34, '34 - Myingyan'), (35, '35 - Nyaung Oo'), (36, '36 - Yameithinn'), (37, '37 - Meikhtila')]), ('10 - Mon', [(38, '38 - Mawlamyaing'), (39, '39 - Thahton')]), ('11 - Rakhine', [(40, '40 - Sittwe'), (41, '41 - Maungdaw'), (42, '42 - Buthidaung'), (43, '43 - Kyaukphyu'), (44, '44 - Thandwe')]), ('12 - Yangon', [(45, '45 - Ahshaytbine (East)'), (46, '46 - Ahnoutpine (West)'), (47, '47 - Taungbine (South)'), (48, '48 - Myaukpine (North)')]), ('13 - Shan', [(49, '49 - Taungyi'), (50, '50 - Loilin'), (51, '51 - Lahsio'), (52, '52 - Muse'), (53, '53 - Kyaukmei'), (54, '54 - Kunloan'), (55, '55 - Laukkaing'), (56, '56 - Kyaington'), (57, '57 - Minesatt'), (58, '58 - Tachilaik'), (59, '59 - Minephyat')]), ('14 - Ayyarwaddy', [(60, '60 - Pathein'), (61, '61 - Hinthada'), (62, '62 - Myaungmya'), (63, '63 - Ma U Bin'), (64, '64 - Phyarpon')]), ('15 - NayPyiTaw', [(65, '65 - Zayarthiri'), (66, '66 - Dakhenathiri'), (67, '67 - Oktayathiri'), (68, '68 - Potebathiri'), (69, '69 - Zabuthiri'), (70, '70 - Tatkon'), (71, '71 - Pyinmana'), (72, '72 - Lewe')])], blank=True, help_text='District', null=True, verbose_name='Usual place of residence'),
),
]
| 335.97619
| 10,325
| 0.534406
|
0aa274baae2cbfa6ecb08ccff1d7cfa9712c2151
| 6,193
|
py
|
Python
|
crontab_module/crons/get_audience_network.py
|
openmaker-eu/watchtower
|
af4d3e92b4cf0bf93c10e288a8b8ea97079da86d
|
[
"MIT"
] | 2
|
2017-05-16T10:57:29.000Z
|
2017-12-14T11:33:18.000Z
|
crontab_module/crons/get_audience_network.py
|
openmaker-eu/watchtower
|
af4d3e92b4cf0bf93c10e288a8b8ea97079da86d
|
[
"MIT"
] | 9
|
2018-11-29T07:44:15.000Z
|
2021-12-13T19:54:18.000Z
|
crontab_module/crons/get_audience_network.py
|
openmaker-eu/watchtower
|
af4d3e92b4cf0bf93c10e288a8b8ea97079da86d
|
[
"MIT"
] | 1
|
2019-03-17T13:58:18.000Z
|
2019-03-17T13:58:18.000Z
|
# Author: Kemal Berk Kocabagli
import sys
from decouple import config # to get current working directory
sys.path.insert(0, config("ROOT_DIR"))
from application.utils.basic import *
import tweepy # Twitter API helper package
from tweepy import OAuthHandler
import pymongo # for pymongo functions
from datetime import datetime # to print the date & time in the output log whenever this script is run OR for time related checks
from application.Connections import Connection
consumer_key = config("TWITTER_CONSUMER_KEY") # API key
consumer_secret = config("TWITTER_CONSUMER_SECRET") # API secret
access_token = config("TWITTER_ACCESS_TOKEN")
access_secret = config("TWITTER_ACCESS_SECRET")
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# Decorator for measuring execution time of functions
def timeit(method):
def timed(*args, **kw):
start = time.time()
result = method(*args, **kw)
end = time.time()
print("... {} seconds".format(end - start))
return result
return timed
@timeit
def construct_audience_members(topicID, location):
regx = location_regex.getLocationRegex(location)
# upsert many
operations = []
for audience_member in Connection.Instance().audienceDB[str(topicID)].find({'location':regx},{'id':1,'location':1}):
operations.append(
pymongo.UpdateOne(
{'id': audience_member['id']},
{
'$addToSet':{
'topics': topicID,
'locations': audience_member['location'],
},
'$setOnInsert' : {
'last_processed' : None,
'last_cursor' : None,
'finished_last_time' : False,
'followers':[], # initialize followers list empty,
}
},
upsert=True)
)
#print(operations[:10])
try:
Connection.Instance().audience_networks_DB['all_audience_members'].bulk_write(operations, ordered=False)
except Exception as e:
print("Exception in bulk_write." + str(e))
def construct_all_audience_members ():
Connection.Instance().audience_networks_DB['all_audience_members'].create_index("id",unique=True)
locations = ['italy', 'slovakia', 'spain', 'uk', 'tr'] # relevant locations
with Connection.Instance().get_cursor() as cur:
sql = (
"SELECT topic_id, topic_name "
"FROM topics "
)
cur.execute(sql)
topics = cur.fetchall() # list of all topics
print("There are {} many topic-location pairs.".format(len(topics) * len(locations)))
index = 0
for topicID,topicName in topics:
for location in locations:
print("{}) {}-{}".format(index + 1 , topicName, location) , end='', flush=True)
index += 1
construct_audience_members(topicID, location)
def is_processable(member, threshold_in_day):
# Do we need this or should the script get followers no matter what ???
if not member:
return False
if member["last_processed"] and (datetime.today()- member['last_processed']).days < threshold_in_day:
return False
return True
@timeit
def get_network_twitter_profiles(MIN_FOLLOWERS_COUNT, MAX_FOLLOWERS_COUNT):
network_member_ids = Connection.Instance().audience_networks_DB['all_audience_members'].distinct('id')
network_members = list(Connection.Instance().audienceDB['all_audience'].find({'id': {'$in': network_member_ids}, 'followers_count':{'$gt':MIN_FOLLOWERS_COUNT,'$lt':MAX_FOLLOWERS_COUNT}}, {'_id':0, 'id':1, 'followers_count':1}))
return network_members
def get_start_cursor(member):
if member["last_cursor"]:
cursor = member['last_cursor']
else:
cursor = -1
tweepy_cursor = tweepy.Cursor(api.followers_ids, id=member["id"], cursor=cursor)
# if this member is totally processed last time, move one cursor if possible
if member["finished_last_time"] and tweepy_cursor.iterator.next_cursor != 0:
cursor = tweepy_cursor.iterator.next_cursor
tweepy_cursor = tweepy.Cursor(api.followers_ids, id=member["id"], cursor=cursor)
return (cursor, tweepy_cursor)
def process_member(member):
print("Processing user : {}".format(member["id"]))
last_cursor, cursor = get_start_cursor(member)
requests = []
try:
for page in cursor.pages():
if (cursor.iterator.next_cursor != 0):
last_cursor = cursor.iterator.next_cursor
requests.append(pymongo.UpdateOne(
{"id" : member["id"]},
{
"$addToSet" : {"followers" : {"$each" : page}},
"$set" : {
"last_cursor" : last_cursor,
"last_processed" : datetime.today(),
"finished_last_time" : (cursor.iterator.next_cursor == 0)
}
}
))
except (tweepy.TweepError , tweepy.error.TweepError) as twperr:
print(twperr) # in case of errors due to protected accounts
try:
if (len(requests)!=0):
Connection.Instance().audience_networks_DB['all_audience_members'].bulk_write(requests,ordered=False)
except Exception as e:
print("Exception in bulk_write:" + str(e))
@timeit
def get_followers_of_network_members(MIN_FOLLOWERS_COUNT, MAX_FOLLOWERS_COUNT):
print("Getting twitter profiles of the members in the network")
network_twitter_profiles = get_network_twitter_profiles(MIN_FOLLOWERS_COUNT, MAX_FOLLOWERS_COUNT)
print("There are {} many members that satisfy given follower count criteria".format(len(network_twitter_profiles)))
for twitter_profile in network_twitter_profiles:
member = Connection.Instance().audience_networks_DB['all_audience_members'].find_one({'id':twitter_profile["id"]})
if is_processable(member,1):
process_member(member)
def main():
construct_all_audience_members()
if __name__ == "__main__":
main()
| 37.762195
| 231
| 0.653641
|
e1dbdd4f3d3791312c3ab8161b0e4e99e9139361
| 4,104
|
py
|
Python
|
dnacentersdk/models/validators/v1_3_0/jsd_948ea8194348bc0b.py
|
daxm/dnacentersdk
|
5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b
|
[
"MIT"
] | null | null | null |
dnacentersdk/models/validators/v1_3_0/jsd_948ea8194348bc0b.py
|
daxm/dnacentersdk
|
5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b
|
[
"MIT"
] | null | null | null |
dnacentersdk/models/validators/v1_3_0/jsd_948ea8194348bc0b.py
|
daxm/dnacentersdk
|
5baa0cb151fb9e72cf7af1ae29e7541d89c3f06b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DNA Center Create CLI credentials data model.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator948EA8194348Bc0B(object):
"""Create CLI credentials request schema definition."""
def __init__(self):
super(JSONSchemaValidator948EA8194348Bc0B, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"items": {
"properties": {
"comments": {
"description":
"",
"type": [
"string",
"null"
]
},
"credentialType": {
"description":
"",
"enum": [
"GLOBAL",
"APP",
null
],
"type": [
"string",
"null"
]
},
"description":
{
"description":
"",
"type": [
"string",
"null"
]
},
"enablePassword": {
"description":
"",
"type": [
"string",
"null"
]
},
"id": {
"description":
"",
"type": [
"string",
"null"
]
},
"instanceTenantId": {
"description":
"",
"type": [
"string",
"null"
]
},
"instanceUuid": {
"description":
"",
"type": [
"string",
"null"
]
},
"password": {
"description":
"",
"type": [
"string",
"null"
]
},
"username": {
"description":
"",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": "array"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 28.699301
| 78
| 0.442982
|
8c4a5c502afac2dabcdc6624b86db300b6d192ab
| 17,413
|
py
|
Python
|
cbi_toolbox/parallel/mpi.py
|
idiap/cbi_toolbox
|
01e1f4ebc0a4156cfc5bbf65da9d932906b304d8
|
[
"BSD-3-Clause"
] | 3
|
2021-04-21T12:19:54.000Z
|
2021-11-25T10:31:17.000Z
|
cbi_toolbox/parallel/mpi.py
|
idiap/cbi_toolbox
|
01e1f4ebc0a4156cfc5bbf65da9d932906b304d8
|
[
"BSD-3-Clause"
] | null | null | null |
cbi_toolbox/parallel/mpi.py
|
idiap/cbi_toolbox
|
01e1f4ebc0a4156cfc5bbf65da9d932906b304d8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The mpi module allows to distribute operations in MPI communicators.
It is an optional feature of cbi_toolbox that requires a working implementation
of MPI.
"""
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by François Marelli <francois.marelli@idiap.ch>
#
# This file is part of CBI Toolbox.
#
# CBI Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the 3-Clause BSD License.
#
# CBI Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# 3-Clause BSD License for more details.
#
# You should have received a copy of the 3-Clause BSD License along
# with CBI Toolbox. If not, see https://opensource.org/licenses/BSD-3-Clause.
#
# SPDX-License-Identifier: BSD-3-Clause
import mpi4py.MPI as MPI
import numpy as np
import numpy.lib.format as npformat
from cbi_toolbox import utils
from . import distribute_bin, distribute_bin_all
_MPI_dtypes = {'float64': MPI.DOUBLE}
def get_size(mpi_comm=MPI.COMM_WORLD):
"""
Get the process count in the communicator.
Parameters
----------
mpi_comm : mpi4py.MPI.Comm, optional
The MPI communicator, by default MPI.COMM_WORLD.
Returns
-------
int
The size of the MPI communicator.
"""
return mpi_comm.Get_size()
def is_root_process(mpi_comm=MPI.COMM_WORLD):
"""
Check if the current process is root.
Parameters
----------
mpi_comm : mpi4py.MPI.Comm, optional
The MPI communicator, by default MPI.COMM_WORLD.
Returns
-------
bool
True if the current process is the root of the communicator.
"""
return mpi_comm.Get_rank() == 0
def get_rank(mpi_comm=MPI.COMM_WORLD):
"""
Get this process number in the communicator.
Parameters
----------
mpi_comm : mpi4py.MPI.Comm, optional
The communicator, by default MPI.COMM_WORLD.
Returns
-------
int
The rank of the process.
"""
return mpi_comm.Get_rank()
def wait_all(mpi_comm=MPI.COMM_WORLD):
"""
Wait for all processes to reach this line (MPI barrier)
This is just a wrapper for ease.
Parameters
----------
mpi_comm : mpi4py.MPI.Comm, optional
The communicator, by default MPI.COMM_WORLD.
"""
mpi_comm.Barrier()
def distribute_mpi(dimension, mpi_comm=MPI.COMM_WORLD):
"""
Computes the start index and bin size to evenly split array-like data into
multiple bins on an MPI communicator.
Parameters
----------
dimension : int
The size of the array to distribute.
mpi_comm : mpi4py.MPI.Comm, optional
The communicator, by default MPI.COMM_WORLD.
Returns
-------
(int, int)
The start index of this bin, and its size.
The distributed data should be array[start:start + bin_size].
"""
return distribute_bin(dimension, mpi_comm.Get_rank(), mpi_comm.Get_size())
def distribute_mpi_all(dimension, mpi_comm=MPI.COMM_WORLD):
"""
Computes the start indexes and bin sizes of all splits to distribute
computations across an MPI communicator.
Parameters
----------
dimension : int
the size of the array to be distributed
mpi_comm : mpi4py.MPI.Comm, optional
the communicator, by default MPI.COMM_WORLD
Returns
-------
([int], [int])
The list of start indexes and the list of bin sizes to distribute data.
"""
return distribute_bin_all(dimension, mpi_comm.Get_size())
def to_mpi_datatype(np_datatype):
"""
Returns the MPI datatype corresponding to the numpy dtype provided.
Parameters
----------
np_datatype : numpy.dtype or str
The numpy datatype, or name.
Returns
-------
mpi4py.MPI.Datatype
The corresponding MPI datatype.
Raises
------
NotImplementedError
If the numpy datatype is not listed in the conversion table.
"""
if isinstance(np_datatype, np.dtype):
dtype = np_datatype.name
else:
dtype = np_datatype
try:
return _MPI_dtypes[dtype]
except KeyError:
raise NotImplementedError(
'Type not in conversion table: {}'.format(dtype))
def create_slice_view(axis, n_slices, array=None, shape=None, dtype=None):
"""
Create a MPI vector datatype to access given slices of a non distributed
array. If the array is not provided, its shape and dtype must be
specified.
Parameters
----------
axis : int
The axis on which to slice.
n_slices : int
How many contiguous slices to take.
array : numpy.ndarray, optional
The array to slice, by default None (then shape and dtype must be given).
shape : the shape of the array to slice, optional
The shape of the array, by default None.
dtype : numpy.dtype or str, optional
The datatype of the array, by default None.
Returns
-------
mpi4py.MPI.Datatype
The strided datatype allowing to access slices in the array.
Raises
------
ValueError
If array, shape and dtype are all None.
"""
if array is not None:
shape = array.shape
dtype = array.dtype
elif shape is None or dtype is None:
raise ValueError("array, or shape and dtype must be not None")
axis = utils.positive_index(axis, len(shape))
base_type = to_mpi_datatype(dtype)
stride = np.prod(shape[axis:], dtype=int)
block = np.prod(shape[axis + 1:], dtype=int) * n_slices
count = np.prod(shape[:axis], dtype=int)
extent = block * base_type.extent
return base_type.Create_vector(count, block, stride).Create_resized(0, extent)
def compute_vector_extent(axis, array=None, shape=None, dtype=None):
"""
Compute the extent in bytes of a sliced view of a given array.
Parameters
----------
axis : int
Axis on which the slices are taken.
array : numpy.ndarray, optional
The array to slice, by default None (then shape and dtype must be given).
shape : the shape of the array to slice, optional
The shape of the array, by default None.
dtype : numpy.dtype or str, optional
The datatype of the array, by default None.
Returns
-------
int
The extent of the slices underlying data.
Raises
------
ValueError
If array, shape and dtype are all None.
"""
if array is not None:
shape = array.shape
dtype = array.dtype
elif shape is None or dtype is None:
raise ValueError("array, or shape and dtype must be not None")
ndims = len(shape)
axis = utils.positive_index(axis, ndims)
base_type = to_mpi_datatype(dtype)
return np.prod(shape[axis + 1:], dtype=int) * base_type.extent
def create_vector_type(src_axis, tgt_axis, array=None, shape=None, dtype=None,
block_size=1):
"""
Create a MPI vector datatype to communicate a distributed array and split it
along a different axis.
Parameters
----------
src_axis : int
The original axis on which the array is distributed.
tgt_axis : int
The axis on which the array is to be distributed.
array : numpy.ndarray, optional
The array to slice, by default None (then shape and dtype must be given).
shape : the shape of the array to slice, optional
The shape of the array, by default None.
dtype : numpy.dtype or str, optional
The datatype of the array, by default None.
block_size : int, optional
The size of the distributed bin, by default 1.
Returns
-------
mpi4py.MPI.Datatype
The vector datatype used for transmission/reception of the data.
Raises
------
ValueError
If array, shape and dtype are all None.
ValueError
If the source and destination axes are the same.
NotImplementedError
If the array has more than 4 axes (should work, but tests needed).
ValueError
If the block size is bigger than the source axis.
"""
if array is not None:
shape = array.shape
dtype = array.dtype
elif shape is None or dtype is None:
raise ValueError("array, or shape and dtype must be not None")
ndims = len(shape)
src_axis = utils.positive_index(src_axis, ndims)
tgt_axis = utils.positive_index(tgt_axis, ndims)
if src_axis == tgt_axis:
raise ValueError(
"Source and target are identical, no communication should be "
"performed")
if len(shape) > 4:
raise NotImplementedError(
"This has never been tested for arrays with more than 4 axes.\n"
"It will probably work, but please run a test before"
"(and if works, tell me!)")
if block_size > shape[src_axis]:
raise ValueError(
"Block size cannot be bigger than the dimension of the source axis")
base_type = to_mpi_datatype(dtype)
min_axis = min(src_axis, tgt_axis)
max_axis = max(src_axis, tgt_axis)
i_count = np.prod(shape[min_axis + 1:max_axis], dtype=int)
i_block = np.prod(shape[max_axis + 1:], dtype=int)
i_stride = np.prod(shape[max_axis:], dtype=int)
i_extent = np.prod(shape[src_axis + 1:], dtype=int) * base_type.extent
# only happens if the array is empty, avoid division by zero warnings
if i_extent == 0:
i_extent = 1
inner_stride = base_type.Create_vector(
i_count, i_block, i_stride).Create_resized(0, i_extent)
o_count = np.prod(shape[:min_axis], dtype=int)
o_block = block_size
o_stride = (np.prod(shape[min_axis:], dtype=int)
* base_type.extent) // i_extent
o_extent = np.prod(shape[tgt_axis + 1:], dtype=int) * base_type.extent
outer_stride = inner_stride.Create_vector(
o_count, o_block, o_stride).Create_resized(0, o_extent)
return outer_stride
def gather_full_shape(array, axis, mpi_comm=MPI.COMM_WORLD):
"""
Gather the full shape of an array distributed across an MPI communicator
along a given axis.
Parameters
----------
array : numpy.ndarray
The distributed array.
axis : int
The axis on which the array is distributed.
mpi_comm : mpi4py.MPI.Comm, optional
The communicator, by default MPI.COMM_WORLD.
Raises
------
NotImplementedError
This is not implemented yet.
"""
raise NotImplementedError
def load(file_name, axis, mpi_comm=MPI.COMM_WORLD):
"""
Load a numpy array across parallel jobs in the MPI communicator.
The array is sliced along the chosen dimension, with minimal bandwidth.
Parameters
----------
file_name : str
The numpy array file to load.
axis : int
The axis on which to distribute the array.
mpi_comm : mpi4py.MPI.Comm, optional
The MPI communicator used to distribute, by default MPI.COMM_WORLD.
Returns
-------
(numpy.ndarray, tuple(int))
The distributed array, and the size of the full array.
Raises
------
ValueError
If the numpy version used to save the file is not supported.
NotImplementedError
If the array is saved in Fortran order.
"""
header = None
if is_root_process(mpi_comm):
with open(file_name, 'rb') as fp:
version, _ = npformat.read_magic(fp)
if version == 1:
header = npformat.read_array_header_1_0(fp)
elif version == 2:
header = npformat.read_array_header_2_0(fp)
else:
raise ValueError(
"Invalid numpy format version: {}".format(version))
header = *header, fp.tell()
header = mpi_comm.bcast(header, root=0)
full_shape, fortran, dtype, header_offset = header
if fortran:
raise NotImplementedError(
"Fortran-ordered (column-major) arrays are not supported")
ndims = len(full_shape)
axis = utils.positive_index(axis, ndims)
i_start, bin_size = distribute_mpi(full_shape[axis], mpi_comm)
l_shape = list(full_shape)
l_shape[axis] = bin_size
l_array = np.empty(l_shape, dtype=dtype)
slice_type = create_slice_view(
axis, bin_size, shape=full_shape, dtype=dtype)
slice_type.Commit()
single_slice_extent = slice_type.extent
if bin_size != 0:
single_slice_extent /= bin_size
displacement = header_offset + i_start * single_slice_extent
base_type = to_mpi_datatype(l_array.dtype)
fh = MPI.File.Open(mpi_comm, file_name, MPI.MODE_RDONLY)
fh.Set_view(displacement, filetype=slice_type)
fh.Read_all([l_array, l_array.size, base_type])
fh.Close()
slice_type.Free()
return l_array, full_shape
def save(file_name, array, axis, full_shape=None, mpi_comm=MPI.COMM_WORLD):
"""
Save a numpy array from parallel jobs in the MPI communicator.
The array is gathered along the chosen dimension.
Parameters
----------
file_name : str
The numpy array file to load.
array : numpy.ndarray
The distributed array.
axis : int
The axis on which to distribute the array.
full_shape : tuple(int), optional
The size of the full array, by default None.
mpi_comm : mpi4py.MPI.Comm, optional
The MPI communicator used to distribute, by default MPI.COMM_WORLD.
"""
if full_shape is None:
full_shape = gather_full_shape(array, axis, mpi_comm)
axis = utils.positive_index(axis, len(full_shape))
header_offset = None
if is_root_process(mpi_comm):
header_dict = {'shape': full_shape,
'fortran_order': False,
'descr': npformat.dtype_to_descr(array.dtype)}
with open(file_name, 'wb') as fp:
try:
npformat.write_array_header_1_0(fp, header_dict)
except ValueError:
npformat.write_array_header_2_0(fp, header_dict)
header_offset = fp.tell()
header_offset = mpi_comm.bcast(header_offset, root=0)
i_start, bin_size = distribute_mpi(full_shape[axis], mpi_comm)
slice_type = create_slice_view(
axis, bin_size, shape=full_shape, dtype=array.dtype)
slice_type.Commit()
single_slice_extent = slice_type.extent
if bin_size != 0:
single_slice_extent /= bin_size
displacement = header_offset + i_start * single_slice_extent
base_type = to_mpi_datatype(array.dtype)
fh = MPI.File.Open(mpi_comm, file_name, MPI.MODE_WRONLY | MPI.MODE_APPEND)
fh.Set_view(displacement, filetype=slice_type)
fh.Write_all([array, array.size, base_type])
fh.Close()
slice_type.Free()
def redistribute(array, src_axis, tgt_axis, full_shape=None,
mpi_comm=MPI.COMM_WORLD):
"""
Redistribute an array along a different dimension.
Parameters
----------
array : numpy.ndarray
The distributed array.
src_axis : int
The original axis on which the array is distributed.
tgt_axis : int
The axis on which the array is to be distributed.
full_shape : tuple(int), optional
The full shape of the array, by default None.
mpi_comm : mpi4py.MPI.Comm, optional
The MPI communicator used to distribute, by default MPI.COMM_WORLD.
Returns
-------
np.ndarray
The array distributed along the new axis.
"""
if full_shape is None:
full_shape = gather_full_shape(array, src_axis, mpi_comm)
ndims = len(full_shape)
src_axis = utils.positive_index(src_axis, ndims)
tgt_axis = utils.positive_index(tgt_axis, ndims)
if src_axis == tgt_axis:
return array
rank = mpi_comm.Get_rank()
size = mpi_comm.Get_size()
src_starts, src_bins = distribute_mpi_all(full_shape[src_axis], mpi_comm)
tgt_starts, tgt_bins = distribute_mpi_all(full_shape[tgt_axis], mpi_comm)
src_has_data = np.atleast_1d(src_bins)
src_has_data[src_has_data > 0] = 1
tgt_has_data = np.atleast_1d(tgt_bins)
tgt_has_data[tgt_has_data > 0] = 1
n_shape = list(full_shape)
n_shape[tgt_axis] = tgt_bins[rank]
n_array = np.empty(n_shape, dtype=array.dtype)
send_datatypes = []
recv_datatypes = []
for ji in range(size):
send_datatypes.append(create_vector_type(
src_axis, tgt_axis, array, block_size=src_bins[rank]))
recv_datatypes.append(create_vector_type(
src_axis, tgt_axis, n_array, block_size=src_bins[ji]))
send_extent = compute_vector_extent(tgt_axis, array)
recv_extent = compute_vector_extent(src_axis, n_array)
send_counts = np.multiply(tgt_bins, src_has_data[rank])
send_displs = np.multiply(tgt_starts, send_extent)
sendbuf = [array, send_counts, send_displs, send_datatypes]
recv_counts = np.multiply(src_has_data, tgt_bins[rank])
recv_displs = np.multiply(src_starts, recv_extent)
recvbuf = [n_array, recv_counts, recv_displs, recv_datatypes]
for ji in range(size):
send_datatypes[ji].Commit()
recv_datatypes[ji].Commit()
mpi_comm.Alltoallw(sendbuf, recvbuf)
for ji in range(size):
send_datatypes[ji].Free()
recv_datatypes[ji].Free()
return n_array
| 28.87728
| 82
| 0.65721
|
e9dbafcdbb2ba6426befcd0979083233f5688117
| 111
|
py
|
Python
|
api/app/routes/index.py
|
BD103/Reflux
|
a00b8ec51249f2d59aa5164d46237899c69ff2a7
|
[
"MIT"
] | 30
|
2021-01-27T22:55:56.000Z
|
2021-12-26T10:54:48.000Z
|
api/app/routes/index.py
|
BD103/Reflux
|
a00b8ec51249f2d59aa5164d46237899c69ff2a7
|
[
"MIT"
] | 12
|
2021-01-26T14:48:59.000Z
|
2021-12-24T17:37:48.000Z
|
api/app/routes/index.py
|
BD103/Reflux
|
a00b8ec51249f2d59aa5164d46237899c69ff2a7
|
[
"MIT"
] | 7
|
2021-01-29T13:39:53.000Z
|
2022-01-15T10:17:41.000Z
|
import flask
from app import app
@app.route("/")
def index_route():
return flask.jsonify({"status": 200})
| 15.857143
| 41
| 0.684685
|
7cf600ec0f47e7f373da654d3e4c88668a2da1c8
| 8,008
|
py
|
Python
|
test/widgets/test_cmus.py
|
1kyu/qtile
|
da93518b2ab924e803552decdb364d64de46088d
|
[
"MIT"
] | 1
|
2021-11-15T04:03:59.000Z
|
2021-11-15T04:03:59.000Z
|
test/widgets/test_cmus.py
|
1kyu/qtile
|
da93518b2ab924e803552decdb364d64de46088d
|
[
"MIT"
] | null | null | null |
test/widgets/test_cmus.py
|
1kyu/qtile
|
da93518b2ab924e803552decdb364d64de46088d
|
[
"MIT"
] | 1
|
2020-04-27T22:20:11.000Z
|
2020-04-27T22:20:11.000Z
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import subprocess
import pytest
import libqtile.config
from libqtile.bar import Bar
from libqtile.widget import cmus
class MockCmusRemoteProcess:
CalledProcessError = None
EXTRA = [
"set aaa_mode all",
"set continue true",
"set play_library true",
"set play_sorted false",
"set replaygain disabled",
"set replaygain_limit true",
"set replaygain_preamp 0.000000",
"set repeat false",
"set repeat_current false",
"set shuffle false",
"set softvol false",
"set vol_left 100",
"set vol_right 100"
]
info = {}
is_error = False
index = 0
@classmethod
def reset(cls):
cls.info = [
[
"status playing",
"file /playing/file/rickroll.mp3",
"duration 222",
"position 14",
"tag artist Rick Astley",
"tag album Whenever You Need Somebody",
"tag title Never Gonna Give You Up"
],
[
"status playing",
"file http://playing/file/sweetcaroline.mp3",
"duration 222",
"position 14",
"tag artist Neil Diamond",
"tag album Greatest Hits",
"tag title Sweet Caroline"
],
[
"status stopped",
"file http://streaming.source/tomjones.m3u",
"duration -1",
"position -9",
"tag title It's Not Unusual",
"stream tomjones"
],
[
"status playing",
"file /playing/file/always.mp3",
"duration 222",
"position 14",
"tag artist Above & Beyond",
"tag album Anjunabeats 14",
"tag title Always - Tinlicker Extended Mix"
],
]
cls.index = 0
cls.is_error = False
@classmethod
def call_process(cls, cmd):
if cls.is_error:
raise subprocess.CalledProcessError(
-1,
cmd=cmd,
output="Couldn't connect to cmus."
)
if cmd[1:] == ["-C", "status"]:
track = cls.info[cls.index]
track.extend(cls.EXTRA)
output = "\n".join(track)
return output
elif cmd[1] == "-p":
cls.info[cls.index][0] = "status playing"
elif cmd[1] == "-u":
if cls.info[cls.index][0] == "status playing":
cls.info[cls.index][0] = "status paused"
elif cls.info[cls.index][0] == "status paused":
cls.info[cls.index][0] = "status playing"
elif cmd[1] == "-n":
cls.index = (cls.index + 1) % len(cls.info)
elif cmd[1] == "-r":
cls.index = (cls.index - 1) % len(cls.info)
@classmethod
def Popen(cls, cmd): # noqa: N802
cls.call_process(cmd)
def no_op(*args, **kwargs):
pass
@pytest.fixture
def patched_cmus(monkeypatch):
MockCmusRemoteProcess.reset()
monkeypatch.setattr("libqtile.widget.cmus.subprocess", MockCmusRemoteProcess)
monkeypatch.setattr("libqtile.widget.cmus.subprocess.CalledProcessError", subprocess.CalledProcessError)
monkeypatch.setattr("libqtile.widget.cmus.base.ThreadPoolText.call_process", MockCmusRemoteProcess.call_process)
return cmus
def test_cmus(fake_qtile, patched_cmus, fake_window):
widget = patched_cmus.Cmus()
fakebar = Bar([widget], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
widget._configure(fake_qtile, fakebar)
text = widget.poll()
assert text == "♫ Rick Astley - Never Gonna Give You Up"
assert widget.layout.colour == widget.play_color
widget.play()
text = widget.poll()
assert text == "♫ Rick Astley - Never Gonna Give You Up"
assert widget.layout.colour == widget.noplay_color
def test_cmus_play_stopped(fake_qtile, patched_cmus, fake_window):
widget = patched_cmus.Cmus()
# Set track to a stopped item
MockCmusRemoteProcess.index = 2
fakebar = Bar([widget], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
widget._configure(fake_qtile, fakebar)
text = widget.poll()
# It's stopped so colour should reflect this
assert text == "♫ tomjones"
assert widget.layout.colour == widget.noplay_color
widget.play()
text = widget.poll()
assert text == "♫ tomjones"
assert widget.layout.colour == widget.play_color
def test_cmus_buttons(minimal_conf_noscreen, manager_nospawn, patched_cmus):
widget = patched_cmus.Cmus(update_interval=30)
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar([widget], 10)
)
]
manager_nospawn.start(config)
topbar = manager_nospawn.c.bar["top"]
cmuswidget = manager_nospawn.c.widget["cmus"]
assert cmuswidget.info()["text"] == "♫ Rick Astley - Never Gonna Give You Up"
# Play next track
# Non-local file source so widget just displays title
topbar.fake_button_press(0, "top", 0, 0, button=4)
cmuswidget.eval("self.update(self.poll())")
assert cmuswidget.info()["text"] == "♫ Sweet Caroline"
# Play next track
# Stream source so widget just displays stream info
topbar.fake_button_press(0, "top", 0, 0, button=4)
cmuswidget.eval("self.update(self.poll())")
assert cmuswidget.info()["text"] == "♫ tomjones"
# Play previous track
# Non-local file source so widget just displays title
topbar.fake_button_press(0, "top", 0, 0, button=5)
cmuswidget.eval("self.update(self.poll())")
assert cmuswidget.info()["text"] == "♫ Sweet Caroline"
def test_cmus_error_handling(fake_qtile, patched_cmus, fake_window):
widget = patched_cmus.Cmus()
MockCmusRemoteProcess.is_error = True
fakebar = Bar([widget], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
widget._configure(fake_qtile, fakebar)
text = widget.poll()
# Widget does nothing with error message so text is blank
# TODO: update widget to show error?
assert text == ""
def test_escape_text(fake_qtile, patched_cmus, fake_window):
widget = patched_cmus.Cmus()
# Set track to a stopped item
MockCmusRemoteProcess.index = 3
fakebar = Bar([widget], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
widget._configure(fake_qtile, fakebar)
text = widget.poll()
# It's stopped so colour should reflect this
assert text == "♫ Above & Beyond - Always - Tinlicker Extended Mix"
| 32.032
| 116
| 0.622378
|
6a15c3ce0d6e3fcadf9ce916fc80384f6bdfc4fc
| 478
|
py
|
Python
|
accounts/views.py
|
lobo-death/uvfriendly-web
|
af69b2ceafcb3bad5e2f28840846bf97f1b85748
|
[
"CC0-1.0"
] | null | null | null |
accounts/views.py
|
lobo-death/uvfriendly-web
|
af69b2ceafcb3bad5e2f28840846bf97f1b85748
|
[
"CC0-1.0"
] | null | null | null |
accounts/views.py
|
lobo-death/uvfriendly-web
|
af69b2ceafcb3bad5e2f28840846bf97f1b85748
|
[
"CC0-1.0"
] | null | null | null |
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
from django.shortcuts import render
template_name = 'accounts'
def forgot_password(request):
return render(request, 'forgot-password.html')
def login(request):
return render(request, 'login.html')
class SignUp(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'register.html'
| 26.555556
| 54
| 0.778243
|
d3ebe854b305638644258300223827afa4e6b6a1
| 11,316
|
py
|
Python
|
pch2csd/app.py
|
holotape/pch2csd
|
9fab9dcc8d9fe9c4ff272461685d4dfc586d38a9
|
[
"MIT"
] | null | null | null |
pch2csd/app.py
|
holotape/pch2csd
|
9fab9dcc8d9fe9c4ff272461685d4dfc586d38a9
|
[
"MIT"
] | null | null | null |
pch2csd/app.py
|
holotape/pch2csd
|
9fab9dcc8d9fe9c4ff272461685d4dfc586d38a9
|
[
"MIT"
] | null | null | null |
import argparse
import os
import sys
from io import StringIO
from tabulate import tabulate
from pch2csd import __version__, __homepage__
from pch2csd.csdgen import ZakSpace, Csd, UdoTemplate, UdoTemplateValidation, Udo
from pch2csd.parse import parse_pch2
from pch2csd.patch import Patch, Location
from pch2csd.resources import get_template_module_path, ProjectData
def get_test_resource(path: str) -> str:
return os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'tests', 'resources', path))
def _all_modules_implemented(patch: Patch):
not_implemented = [x.type_name for x in patch.modules
if not os.path.isfile(get_template_module_path(x.type))]
if len(not_implemented) > 0:
print('The patch file contains some modules that has not been implemented yet:')
print(', '.join(not_implemented))
print('Please, consider contributing these modules, following our tutorial:')
print('https://github.com/gleb812/pch2csd/wiki/How-to-add-new-modules')
return False
return True
def validate_udo(type_id: int, io=sys.stdout, print_action=True):
if print_action:
print("checking module type '{id}' ({id}.txt)".format(id=type_id),
file=io)
pch2_files = [get_test_resource(s) for s in ['test_all_modules_1.pch2',
'test_all_modules_2.pch2']]
data, mod, patch = ProjectData(), None, None
for p in map(lambda x: parse_pch2(data, x), pch2_files):
for m in p.modules:
if m.type == type_id:
mod, patch = m, p
break
if mod is not None:
if print_action:
print('module name: {}'.format(mod.type_name), file=io)
udo = UdoTemplate(mod)
v = UdoTemplateValidation(data, udo)
v.print_errors(io)
return v.is_valid(with_todos=True)
else:
print("error: unknown module type '{}'".format(type_id), file=io)
return False
def print_module(fn_pch2: str, mod_id: int, loc: Location):
if not fn_pch2.lower().endswith('.pch2'):
print("error: patch file should have extension '.pch2'")
exit(-1)
data = ProjectData()
path = os.path.abspath(os.path.expanduser(fn_pch2))
p = parse_pch2(data, path)
m = p.find_module(mod_id, loc)
if m is None:
print('error: cannot find module with id {} in the {} location'.format(mod_id, loc.short_str()))
exit(-1)
udo = Udo(p, m)
params_midi = p.find_mod_params(loc, mod_id)
params_mapped = udo.get_params()
assert params_midi.num_params == len(params_mapped)
tbl = [['Type', 'Raw', 'Mapped']]
for raw, mapped in zip(params_midi.values, params_mapped):
tbl.append(['Parameter', str(raw), str(mapped)])
for mode in m.modes:
tbl.append(['Mode', str(mode), ''])
print('Patch: {}'.format(fn_pch2))
print('Details of the module:\n{}'.format(m))
print()
print(tabulate(tbl, headers='firstrow', tablefmt='simple'))
def print_pch2(fn: str):
if not fn.lower().endswith('.pch2'):
print("error: patch file should have extension '.pch2'")
exit(-1)
data = ProjectData()
path = os.path.abspath(os.path.expanduser(fn))
patch = parse_pch2(data, path)
mod_table = [['Name', 'ID', 'Type', 'Parameters', 'Modes', 'Area']]
for m in patch.modules:
p = patch.find_mod_params(m.location, m.id)
mod_table.append([m.type_name,
m.id,
m.type,
str(p.values),
str(m.modes),
m.location.short_str()])
cab_table = [['From', '', 'To', 'Color', 'Type', 'Area']]
for c in patch.cables:
mf_name = patch.find_module(c.module_from, c.loc).type_name
mt_name = patch.find_module(c.module_to, c.loc).type_name
pin1, pin2 = c.type.short_str().split('-')
cab_table.append([
'{}(id={}, {}={})'.format(mf_name, c.module_from, pin1, c.jack_from),
'->',
'{}(id={}, {}={})'.format(mt_name, c.module_to, pin2, c.jack_to),
c.color.short_str(),
c.type.short_str(),
c.loc.short_str()])
print('Patch file: {}\n'.format(os.path.basename(path)))
print('Modules')
print(tabulate(mod_table, headers='firstrow', tablefmt='simple'))
print('\nCables')
print(tabulate(cab_table, headers='firstrow', tablefmt='simple'))
def convert_pch2(fn: str):
if not fn.lower().endswith('.pch2'):
print("error: the patch file should have extension '.pch2'")
exit(-1)
data = ProjectData()
path = os.path.abspath(os.path.expanduser(fn))
p = parse_pch2(data, path)
zak = ZakSpace()
try:
udos = zak.connect_patch(p)
except ValueError as e:
print('error: {}'.format(e))
exit(-1)
csd = Csd(p, zak, udos)
dirname = os.path.dirname(path)
csd_save_path = os.path.join(dirname, os.path.basename(path) + '.csd')
with open(csd_save_path, 'w') as f:
f.write(csd.get_code())
def gen_udo_status_doc():
tpl_url = 'https://github.com/gleb812/pch2csd/blob/master/pch2csd/resources/templates/modules/{}.txt'
data = ProjectData()
with open('Module-implementation-status.md', 'w') as md:
md.write('This file is automatically generated.\n\n')
md.write('| Template | Module name | Status |\n')
md.write('|----------|-------------|--------|\n')
for p in [parse_pch2(data, get_test_resource(pch2file)) for pch2file
in ['test_all_modules_1.pch2', 'test_all_modules_2.pch2']]:
for m in p.modules:
status = StringIO()
validate_udo(m.type, status, print_action=False)
md.write('| [{}]({}) | {} | {} |\n'.format(
'{}.txt'.format(m.type),
tpl_url.format(m.type),
m.type_name,
'<br>'.join(status.getvalue().splitlines())))
def main():
arg_parser = argparse.ArgumentParser(
prog='pch2csd',
description='convert Clavia Nord Modular G2 patches to the Csound code',
epilog='Version {}, homepage: {}'.format(__version__, __homepage__))
arg_parser.add_argument('arg', metavar='arg', nargs='?', default='patch.pch2',
help='a pch2 file path or an UDO numerical ID')
arg_parser.add_argument('-d', '--debug', action='store_const', const=True,
help='print a stack trace in case of error')
group = arg_parser.add_mutually_exclusive_group()
group.add_argument('-p', '--print', action='store_const', const=True,
help='parse the patch file and print its content')
group.add_argument('-m', '--mod-print', nargs=2, metavar=('module_id', '{voice,fx}'),
help='print extensive information about the module in'
'the file {arg}. You should provide two values:'
'an integer module id and an area.')
group.add_argument('-c', '--check-udo', action='store_const', const=True,
help="validate the UDO template file (overrides '-p')")
group.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
group.add_argument('-e', action='store_const', const=True,
help='show the elephant and exit')
args = arg_parser.parse_args()
if args.check_udo:
try:
type_id = int(args.arg)
validate_udo(type_id)
except ValueError:
print("you should pass the integer as the 'arg' parameter when using '--check-udo'")
elif args.mod_print:
print_module(args.arg, int(args.mod_print[0]), Location.from_str(args.mod_print[1]))
elif args.print:
print_pch2(args.arg)
elif args.e:
show_elephant()
else:
if args.arg == 'gen_udo_status_doc':
gen_udo_status_doc()
else:
try:
convert_pch2(args.arg)
except Exception as e:
print(e)
if args.debug:
import traceback
_, _, tb = sys.exc_info()
print()
print('-----------')
traceback.print_tb(tb, file=sys.stdout)
def show_elephant():
print('///////////////////osyyo///////////////////////////////////////////////////////')
print('//////////////+oshmNMMMmNmhyso+//////////////////+++++////////////////////+o///')
print('///////////+oshydNMMMMMMMMMMMNh++++++++++ossssyysssyshhys+//////////////+hNmhys')
print('/////////+oydmmNNNNNNNNNNNMMNNdhyyyyyyyhhddy+++::/ooossyyhs+///////////omMMMNNN')
print('///////+oyyhhhdhhhhhhdmdmmddhyshhyysys++ossys+--+syyyyyysoo++/////////+hmmmmmdy')
print('///+++++++++ooooooosossssoo+++syyyssyyss+-..`.ydmmddyo+/+++/++++++++++shhhhhyys')
print('+++ oooyhyyhyyyhhdso+/:sddyo+//++/////++++++++++ooosssss')
print('+++ Clavia Nord Modular G2 sshhhyyyyyys+-+hho/ys+///++/////:+++++++++++++++++++')
print('+++ Patch Converter ooossosyyy+:``.--`.//+/+/://+/o+++++++++++++++++++++')
print('+++ oo+oysysso/:-.``````.-:/+/-/+syso+++++++++++++++++++')
print('++oooooooooooooooooooooooooooosssysoosys+-``` ``-:////://oosooooooooo++++++++++')
print('ooooooooooooosssssooosssssssssshyyso+shdh.` `-/:-:-:--/++ooooooooooooooooyso')
print('ssssssssyyyyyyyyyyyyyyyyyssssooooso+++yhh- .:/--````-::-/oooooooooooosyhhdd')
print('ossosssssssssssssssssssssssss/++++/--/+hs` `.`-...````-..`oooooooosssyssssyyy')
print('ooooooosssssssssssssssyysssss/////-` sNm ` .` ``` /oooosoosyhdhysooyhd')
print('oooooosssssssssssssshdyysssym/:::-`/.:mmo ` :sssssyyyyyysoosyyyyy')
print('osssssssssssssssyyhdy+++shmNs-.```.Ny`` +ssssyyyhhyyyyyssssoo')
print('sssshddhysyssyyyhdds/oyhsdysh-. omm- -. :.+yssssyyyyysyhyyysyh')
print('yhhhdhhhhhyyyyyhhhh/.:ysydmNh. .hmmy `:-` `` `yo-ohyyyyyyyyyyyysssss')
print('syyyyyyyyyyhddhddmmy.`.:/++o/` `yhhdh. ..` ```` ohhyyyyyyyyyyyyyyyyyyss')
print('hysyyyyhhhyhhhhhyyhhy/` `-:. `/yyhhhyo `.` `` +yyyyyyyyyyyyysyssssssss')
print('hyyhhyyhhdhhhhyyyyyyyyyo+///+syyyyyyyhy- ..``:yo` :hhyyyyysyyyssyysssssssss')
print('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyo .-.``syy- syyyyyyssssssssssssssssss')
print('ssssyyyyyyyyyyyyyyyysssssosssoooooooooos-`--.`-sss. `ssssooooooooooooooooooooo')
print('sssyyysssssssssssssooooooooossssssssssss+-:-.`oysy -yyyyyyyyyyyyyyyyyysyysyyy')
print('yyyyyyyyyyyyyhhhhyhhhhhdhhdddddddhdddddd/.:-``yddy :ddddhhdddddddddhhhhhhdhhh')
print('hhddddddddddddddddddddddmmmmmmmdddmmdddh/.:.../hd+ .ddddddddddddhhhyhhhhhhhhh')
print('hhhhhhhhhhhhdddhdddddddddhhhhhhhhdhhhhhh-.o+/--hy. -osyhhhddddhhhhyyyyyyyyys')
print('dddhhhhhhhhhhhhdddhdhhhhhhhhhhyyyssyo//:`-hyys:` ```.-::/+osyyysyyyyyyyyyhyyhys')
print('hhhyyhhhhhdhddhhhhyyysosoysosooo//:----.`+yysssssossooyyysyhhhhyhhyyyyyyhhyyyyy')
if __name__ == '__main__':
main()
| 45.813765
| 105
| 0.58351
|
22cc25fbf63fb6f4acec09e62a7550166b4e7eab
| 63
|
py
|
Python
|
count.py
|
jtr4fh/cs3240-labdemo
|
f909e93439610e760e289cea032d2fd6ddbbf19e
|
[
"MIT"
] | null | null | null |
count.py
|
jtr4fh/cs3240-labdemo
|
f909e93439610e760e289cea032d2fd6ddbbf19e
|
[
"MIT"
] | null | null | null |
count.py
|
jtr4fh/cs3240-labdemo
|
f909e93439610e760e289cea032d2fd6ddbbf19e
|
[
"MIT"
] | null | null | null |
def countdown():
print(3)
print(2)
print(1)
print('PARTY!')
| 12.6
| 16
| 0.634921
|
3846cc3507730ee89781e439dbeccf981b0af744
| 147
|
py
|
Python
|
tests/classification/FourClass_100/ws_FourClass_100_LogisticRegression_sqlite_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 1
|
2019-07-09T14:45:18.000Z
|
2019-07-09T14:45:18.000Z
|
tests/classification/FourClass_100/ws_FourClass_100_LogisticRegression_sqlite_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 5
|
2017-11-13T13:35:37.000Z
|
2021-11-11T12:57:20.000Z
|
tests/classification/FourClass_100/ws_FourClass_100_LogisticRegression_sqlite_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T15:05:33.000Z
|
2021-09-19T15:05:33.000Z
|
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("LogisticRegression" , "FourClass_100" , "sqlite")
| 29.4
| 72
| 0.823129
|
5bcad05c8f92b1c3d9e9fa44388f7d05e513ed5c
| 398
|
py
|
Python
|
Scripts/Discord Nitro Generator.py
|
Wtf-Is-This-x1337/Useful-Scripts
|
0f51ad216849e3f1dd05b1699d9f1c168b05a5d5
|
[
"Unlicense"
] | 1
|
2021-02-03T17:48:15.000Z
|
2021-02-03T17:48:15.000Z
|
Scripts/Discord Nitro Generator.py
|
Wtf-Is-This-x1337/Useful-Scripts
|
0f51ad216849e3f1dd05b1699d9f1c168b05a5d5
|
[
"Unlicense"
] | null | null | null |
Scripts/Discord Nitro Generator.py
|
Wtf-Is-This-x1337/Useful-Scripts
|
0f51ad216849e3f1dd05b1699d9f1c168b05a5d5
|
[
"Unlicense"
] | null | null | null |
import random, string
amount = int(input('! (all the codes are unchecked) How many Nitro codes do you want to be generated?: '))
value = 1
while value <= amount:
code = "https://discord.gift/" + ('').join(random.choices(string.ascii_letters + string.digits, k=16))
f = open('Codes.txt', "a+")
f.write(f'{code}\n')
f.close()
print(f'[GENERATED] {code}')
value += 1
| 36.181818
| 107
| 0.613065
|
a55c2b964f4ad73639e884d274d84577c231c158
| 5,627
|
py
|
Python
|
sdk/python/pulumi_azure_native/botservice/v20210301/list_channel_with_keys.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/botservice/v20210301/list_channel_with_keys.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/botservice/v20210301/list_channel_with_keys.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListChannelWithKeysResult',
'AwaitableListChannelWithKeysResult',
'list_channel_with_keys',
]
@pulumi.output_type
class ListChannelWithKeysResult:
"""
Bot channel resource definition
"""
def __init__(__self__, etag=None, id=None, kind=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Entity Tag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Required. Gets or sets the Kind of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
The set of properties specific to bot channel resource
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Gets or sets the SKU of the resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableListChannelWithKeysResult(ListChannelWithKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListChannelWithKeysResult(
etag=self.etag,
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def list_channel_with_keys(channel_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListChannelWithKeysResult:
"""
Bot channel resource definition
:param str channel_name: The name of the Channel resource.
:param str resource_group_name: The name of the Bot resource group in the user subscription.
:param str resource_name: The name of the Bot resource.
"""
__args__ = dict()
__args__['channelName'] = channel_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:botservice/v20210301:listChannelWithKeys', __args__, opts=opts, typ=ListChannelWithKeysResult).value
return AwaitableListChannelWithKeysResult(
etag=__ret__.etag,
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 32.33908
| 150
| 0.615426
|
96d2e399ec14c8c01a8c0eaf6c0f045006803fce
| 2,900
|
py
|
Python
|
examples/versioned_rows/versioned_rows.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | 1
|
2020-07-21T16:06:40.000Z
|
2020-07-21T16:06:40.000Z
|
examples/versioned_rows/versioned_rows.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | 4
|
2020-04-23T19:00:28.000Z
|
2021-09-28T18:14:58.000Z
|
examples/versioned_rows/versioned_rows.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | 1
|
2022-02-28T20:16:29.000Z
|
2022-02-28T20:16:29.000Z
|
"""Illustrates a method to intercept changes on objects, turning
an UPDATE statement on a single row into an INSERT statement, so that a new
row is inserted with the new data, keeping the old row intact.
"""
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import make_transient
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
class Versioned:
def new_version(self, session):
# make us transient (removes persistent
# identity).
make_transient(self)
# set 'id' to None.
# a new PK will be generated on INSERT.
self.id = None
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, Versioned):
continue
if not session.is_modified(instance):
continue
if not attributes.instance_state(instance).has_identity:
continue
# make it transient
instance.new_version(session)
# re-add
session.add(instance)
Base = declarative_base()
engine = create_engine("sqlite://", echo=True)
Session = sessionmaker(engine)
# example 1, simple versioning
class Example(Versioned, Base):
__tablename__ = "example"
id = Column(Integer, primary_key=True)
data = Column(String)
Base.metadata.create_all(engine)
session = Session()
e1 = Example(data="e1")
session.add(e1)
session.commit()
e1.data = "e2"
session.commit()
assert session.query(Example.id, Example.data).order_by(Example.id).all() == (
[(1, "e1"), (2, "e2")]
)
# example 2, versioning with a parent
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
child_id = Column(Integer, ForeignKey("child.id"))
child = relationship("Child", backref=backref("parent", uselist=False))
class Child(Versioned, Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
data = Column(String)
def new_version(self, session):
# expire parent's reference to us
session.expire(self.parent, ["child"])
# create new version
Versioned.new_version(self, session)
# re-add ourselves to the parent
self.parent.child = self
Base.metadata.create_all(engine)
session = Session()
p1 = Parent(child=Child(data="c1"))
session.add(p1)
session.commit()
p1.child.data = "c2"
session.commit()
assert p1.child_id == 2
assert session.query(Child.id, Child.data).order_by(Child.id).all() == (
[(1, "c1"), (2, "c2")]
)
| 24.166667
| 78
| 0.697586
|
89a2b4dae766e12e0ae1757a1fa9046b37cf3af5
| 502
|
py
|
Python
|
sampleapp/migrations/0004_job_title.py
|
ptevans/django-thresher
|
3a82d7e3828c2537eb9a56bd2e9fffa554f8fbd3
|
[
"MIT"
] | null | null | null |
sampleapp/migrations/0004_job_title.py
|
ptevans/django-thresher
|
3a82d7e3828c2537eb9a56bd2e9fffa554f8fbd3
|
[
"MIT"
] | null | null | null |
sampleapp/migrations/0004_job_title.py
|
ptevans/django-thresher
|
3a82d7e3828c2537eb9a56bd2e9fffa554f8fbd3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-26 01:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sampleapp', '0003_auto_20180226_0138'),
]
operations = [
migrations.AddField(
model_name='job',
name='title',
field=models.CharField(default='Worker', max_length=100),
preserve_default=False,
),
]
| 22.818182
| 69
| 0.615538
|
cf41a6aa2751c841adda537d92a9f589edc64e77
| 16,770
|
py
|
Python
|
deps/protobuf/objectivec/DevTools/pddm_tests.py
|
dlminvestments/coremltools
|
cf6db67bab18346e132124783d46a32b8a7f52c6
|
[
"BSD-3-Clause"
] | null | null | null |
deps/protobuf/objectivec/DevTools/pddm_tests.py
|
dlminvestments/coremltools
|
cf6db67bab18346e132124783d46a32b8a7f52c6
|
[
"BSD-3-Clause"
] | 75
|
2020-11-24T05:37:45.000Z
|
2022-02-25T15:14:23.000Z
|
deps/protobuf/objectivec/DevTools/pddm_tests.py
|
dlminvestments/coremltools
|
cf6db67bab18346e132124783d46a32b8a7f52c6
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2015 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for pddm.py."""
import io
import unittest
import pddm
class TestParsingMacros(unittest.TestCase):
def testParseEmpty(self):
f = io.StringIO(u'')
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 0)
def testParseOne(self):
f = io.StringIO(u"""PDDM-DEFINE foo( )
body""")
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 1)
macro = result._macros.get('foo')
self.assertIsNotNone(macro)
self.assertEqual(macro.name, 'foo')
self.assertEqual(macro.args, tuple())
self.assertEqual(macro.body, 'body')
def testParseGeneral(self):
# Tests multiple defines, spaces in all places, etc.
f = io.StringIO(u"""
PDDM-DEFINE noArgs( )
body1
body2
PDDM-DEFINE-END
PDDM-DEFINE oneArg(foo)
body3
PDDM-DEFINE twoArgs( bar_ , baz )
body4
body5""")
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 3)
macro = result._macros.get('noArgs')
self.assertIsNotNone(macro)
self.assertEqual(macro.name, 'noArgs')
self.assertEqual(macro.args, tuple())
self.assertEqual(macro.body, 'body1\nbody2\n')
macro = result._macros.get('oneArg')
self.assertIsNotNone(macro)
self.assertEqual(macro.name, 'oneArg')
self.assertEqual(macro.args, ('foo',))
self.assertEqual(macro.body, 'body3')
macro = result._macros.get('twoArgs')
self.assertIsNotNone(macro)
self.assertEqual(macro.name, 'twoArgs')
self.assertEqual(macro.args, ('bar_', 'baz'))
self.assertEqual(macro.body, 'body4\nbody5')
# Add into existing collection
f = io.StringIO(u"""
PDDM-DEFINE another(a,b,c)
body1
body2""")
result.ParseInput(f)
self.assertEqual(len(result._macros), 4)
macro = result._macros.get('another')
self.assertIsNotNone(macro)
self.assertEqual(macro.name, 'another')
self.assertEqual(macro.args, ('a', 'b', 'c'))
self.assertEqual(macro.body, 'body1\nbody2')
def testParseDirectiveIssues(self):
test_list = [
# Unknown directive
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINED foo\nbaz',
'Hit a line with an unknown directive: '),
# End without begin
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nPDDM-DEFINE-END\n',
'Got DEFINE-END directive without an active macro: '),
# Line not in macro block
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nmumble\n',
'Hit a line that wasn\'t a directive and no open macro definition: '),
# Redefine macro
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE foo(a)\nmumble\n',
'Attempt to redefine macro: '),
]
for idx, (input_str, expected_prefix) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
result = pddm.MacroCollection(f)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertTrue(e.message.startswith(expected_prefix),
'Entry %d failed: %r' % (idx, e))
def testParseBeginIssues(self):
test_list = [
# 1. No name
(u'PDDM-DEFINE\nmumble',
'Failed to parse macro definition: '),
# 2. No name (with spaces)
(u'PDDM-DEFINE \nmumble',
'Failed to parse macro definition: '),
# 3. No open paren
(u'PDDM-DEFINE foo\nmumble',
'Failed to parse macro definition: '),
# 4. No close paren
(u'PDDM-DEFINE foo(\nmumble',
'Failed to parse macro definition: '),
# 5. No close paren (with args)
(u'PDDM-DEFINE foo(a, b\nmumble',
'Failed to parse macro definition: '),
# 6. No name before args
(u'PDDM-DEFINE (a, b)\nmumble',
'Failed to parse macro definition: '),
# 7. No name before args
(u'PDDM-DEFINE foo bar(a, b)\nmumble',
'Failed to parse macro definition: '),
# 8. Empty arg name
(u'PDDM-DEFINE foo(a, ,b)\nmumble',
'Empty arg name in macro definition: '),
(u'PDDM-DEFINE foo(a,,b)\nmumble',
'Empty arg name in macro definition: '),
# 10. Duplicate name
(u'PDDM-DEFINE foo(a,b,a,c)\nmumble',
'Arg name "a" used more than once in macro definition: '),
# 11. Invalid arg name
(u'PDDM-DEFINE foo(a b,c)\nmumble',
'Invalid arg name "a b" in macro definition: '),
(u'PDDM-DEFINE foo(a.b,c)\nmumble',
'Invalid arg name "a.b" in macro definition: '),
(u'PDDM-DEFINE foo(a-b,c)\nmumble',
'Invalid arg name "a-b" in macro definition: '),
(u'PDDM-DEFINE foo(a,b,c.)\nmumble',
'Invalid arg name "c." in macro definition: '),
# 15. Extra stuff after the name
(u'PDDM-DEFINE foo(a,c) foo\nmumble',
'Failed to parse macro definition: '),
(u'PDDM-DEFINE foo(a,c) foo)\nmumble',
'Failed to parse macro definition: '),
]
for idx, (input_str, expected_prefix) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
result = pddm.MacroCollection(f)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertTrue(e.message.startswith(expected_prefix),
'Entry %d failed: %r' % (idx, e))
class TestExpandingMacros(unittest.TestCase):
def testExpandBasics(self):
f = io.StringIO(u"""
PDDM-DEFINE noArgs( )
body1
body2
PDDM-DEFINE-END
PDDM-DEFINE oneArg(a)
body3 a
PDDM-DEFINE-END
PDDM-DEFINE twoArgs(b,c)
body4 b c
body5
PDDM-DEFINE-END
""")
mc = pddm.MacroCollection(f)
test_list = [
(u'noArgs()',
'body1\nbody2\n'),
(u'oneArg(wee)',
'body3 wee\n'),
(u'twoArgs(having some, fun)',
'body4 having some fun\nbody5'),
# One arg, pass empty.
(u'oneArg()',
'body3 \n'),
# Two args, gets empty in each slot.
(u'twoArgs(, empty)',
'body4 empty\nbody5'),
(u'twoArgs(empty, )',
'body4 empty \nbody5'),
(u'twoArgs(, )',
'body4 \nbody5'),
]
for idx, (input_str, expected) in enumerate(test_list, 1):
result = mc.Expand(input_str)
self.assertEqual(result, expected,
'Entry %d --\n Result: %r\n Expected: %r' %
(idx, result, expected))
def testExpandArgOptions(self):
f = io.StringIO(u"""
PDDM-DEFINE bar(a)
a-a$S-a$l-a$L-a$u-a$U
PDDM-DEFINE-END
""")
mc = pddm.MacroCollection(f)
self.assertEqual(mc.Expand('bar(xYz)'), 'xYz- -xYz-xyz-XYz-XYZ')
self.assertEqual(mc.Expand('bar(MnoP)'), 'MnoP- -mnoP-mnop-MnoP-MNOP')
# Test empty
self.assertEqual(mc.Expand('bar()'), '-----')
def testExpandSimpleMacroErrors(self):
f = io.StringIO(u"""
PDDM-DEFINE foo(a, b)
<a-z>
PDDM-DEFINE baz(a)
a - a$z
""")
mc = pddm.MacroCollection(f)
test_list = [
# 1. Unknown macro
(u'bar()',
'No macro named "bar".'),
(u'bar(a)',
'No macro named "bar".'),
# 3. Arg mismatch
(u'foo()',
'Expected 2 args, got: "foo()".'),
(u'foo(a b)',
'Expected 2 args, got: "foo(a b)".'),
(u'foo(a,b,c)',
'Expected 2 args, got: "foo(a,b,c)".'),
# 6. Unknown option in expansion
(u'baz(mumble)',
'Unknown arg option "a$z" while expanding "baz(mumble)".'),
]
for idx, (input_str, expected_err) in enumerate(test_list, 1):
try:
result = mc.Expand(input_str)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message, expected_err,
'Entry %d failed: %r' % (idx, e))
def testExpandReferences(self):
f = io.StringIO(u"""
PDDM-DEFINE StartIt()
foo(abc, def)
foo(ghi, jkl)
PDDM-DEFINE foo(a, b)
bar(a, int)
bar(b, NSString *)
PDDM-DEFINE bar(n, t)
- (t)n;
- (void)set##n$u##:(t)value;
""")
mc = pddm.MacroCollection(f)
expected = """- (int)abc;
- (void)setAbc:(int)value;
- (NSString *)def;
- (void)setDef:(NSString *)value;
- (int)ghi;
- (void)setGhi:(int)value;
- (NSString *)jkl;
- (void)setJkl:(NSString *)value;
"""
self.assertEqual(mc.Expand('StartIt()'), expected)
def testCatchRecursion(self):
f = io.StringIO(u"""
PDDM-DEFINE foo(a, b)
bar(1, a)
bar(2, b)
PDDM-DEFINE bar(x, y)
foo(x, y)
""")
mc = pddm.MacroCollection(f)
try:
result = mc.Expand('foo(A,B)')
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'Found macro recusion, invoking "foo(1, A)":\n...while expanding "bar(1, A)".\n...while expanding "foo(A,B)".')
class TestParsingSource(unittest.TestCase):
def testBasicParse(self):
test_list = [
# 1. no directives
(u'a\nb\nc',
(3,) ),
# 2. One define
(u'a\n//%PDDM-DEFINE foo()\n//%body\nc',
(1, 2, 1) ),
# 3. Two defines
(u'a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE bar()\n//%body2\nc',
(1, 4, 1) ),
# 4. Two defines with ends
(u'a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE-END\n'
u'//%PDDM-DEFINE bar()\n//%body2\n//%PDDM-DEFINE-END\nc',
(1, 6, 1) ),
# 5. One expand, one define (that runs to end of file)
(u'a\n//%PDDM-EXPAND foo()\nbody\n//%PDDM-EXPAND-END\n'
u'//%PDDM-DEFINE bar()\n//%body2\n',
(1, 1, 2) ),
# 6. One define ended with an expand.
(u'a\nb\n//%PDDM-DEFINE bar()\n//%body2\n'
u'//%PDDM-EXPAND bar()\nbody2\n//%PDDM-EXPAND-END\n',
(2, 2, 1) ),
# 7. Two expands (one end), one define.
(u'a\n//%PDDM-EXPAND foo(1)\nbody\n//%PDDM-EXPAND foo(2)\nbody2\n//%PDDM-EXPAND-END\n'
u'//%PDDM-DEFINE foo()\n//%body2\n',
(1, 2, 2) ),
]
for idx, (input_str, line_counts) in enumerate(test_list, 1):
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
sf._ParseFile()
self.assertEqual(len(sf._sections), len(line_counts),
'Entry %d -- %d != %d' %
(idx, len(sf._sections), len(line_counts)))
for idx2, (sec, expected) in enumerate(zip(sf._sections, line_counts), 1):
self.assertEqual(sec.num_lines_captured, expected,
'Entry %d, section %d -- %d != %d' %
(idx, idx2, sec.num_lines_captured, expected))
def testErrors(self):
test_list = [
# 1. Directive within expansion
(u'//%PDDM-EXPAND a()\n//%PDDM-BOGUS',
'Ran into directive ("//%PDDM-BOGUS", line 2) while in "//%PDDM-EXPAND a()".'),
(u'//%PDDM-EXPAND a()\n//%PDDM-DEFINE a()\n//%body\n',
'Ran into directive ("//%PDDM-DEFINE", line 2) while in "//%PDDM-EXPAND a()".'),
# 3. Expansion ran off end of file
(u'//%PDDM-EXPAND a()\na\nb\n',
'Hit the end of the file while in "//%PDDM-EXPAND a()".'),
# 4. Directive within define
(u'//%PDDM-DEFINE a()\n//%body\n//%PDDM-BOGUS',
'Ran into directive ("//%PDDM-BOGUS", line 3) while in "//%PDDM-DEFINE a()".'),
(u'//%PDDM-DEFINE a()\n//%body\n//%PDDM-EXPAND-END a()',
'Ran into directive ("//%PDDM-EXPAND-END", line 3) while in "//%PDDM-DEFINE a()".'),
# 6. Directives that shouldn't start sections
(u'a\n//%PDDM-DEFINE-END a()\n//a\n',
'Unexpected line 2: "//%PDDM-DEFINE-END a()".'),
(u'a\n//%PDDM-EXPAND-END a()\n//a\n',
'Unexpected line 2: "//%PDDM-EXPAND-END a()".'),
(u'//%PDDM-BOGUS\n//a\n',
'Unexpected line 1: "//%PDDM-BOGUS".'),
]
for idx, (input_str, expected_err) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
pddm.SourceFile(f)._ParseFile()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message, expected_err,
'Entry %d failed: %r' % (idx, e))
class TestProcessingSource(unittest.TestCase):
def testBasics(self):
input_str = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
//%PDDM-EXPAND-END
bar
//%PDDM-EXPAND mumble(def)
//%PDDM-EXPAND mumble(ghi)
//%PDDM-EXPAND-END
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
input_str2 = u"""
//%PDDM-DEFINE getName(x_)
//%do##x_$u##(int x_);
"""
expected = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
// This block of code is generated, do not edit it directly.
abc: doAbc(int abc);
//%PDDM-EXPAND-END mumble(abc)
bar
//%PDDM-EXPAND mumble(def)
// This block of code is generated, do not edit it directly.
def: doDef(int def);
//%PDDM-EXPAND mumble(ghi)
// This block of code is generated, do not edit it directly.
ghi: doGhi(int ghi);
//%PDDM-EXPAND-END (2 expansions)
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
expected_stripped = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
//%PDDM-EXPAND-END mumble(abc)
bar
//%PDDM-EXPAND mumble(def)
//%PDDM-EXPAND mumble(ghi)
//%PDDM-EXPAND-END (2 expansions)
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
def _Resolver(name):
self.assertEqual(name, 'ImportFile')
return io.StringIO(input_str2)
f = io.StringIO(input_str)
sf = pddm.SourceFile(f, _Resolver)
sf.ProcessContent()
self.assertEqual(sf.processed_content, expected)
# Feed it through and nothing should change.
f2 = io.StringIO(sf.processed_content)
sf2 = pddm.SourceFile(f2, _Resolver)
sf2.ProcessContent()
self.assertEqual(sf2.processed_content, expected)
self.assertEqual(sf2.processed_content, sf.processed_content)
# Test stripping (with the original input and expanded version).
f2 = io.StringIO(input_str)
sf2 = pddm.SourceFile(f2)
sf2.ProcessContent(strip_expansion=True)
self.assertEqual(sf2.processed_content, expected_stripped)
f2 = io.StringIO(sf.processed_content)
sf2 = pddm.SourceFile(f2, _Resolver)
sf2.ProcessContent(strip_expansion=True)
self.assertEqual(sf2.processed_content, expected_stripped)
def testProcessFileWithMacroParseError(self):
input_str = u"""
foo
//%PDDM-DEFINE mumble(a_)
//%body
//%PDDM-DEFINE mumble(x_)
//%body2
"""
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
try:
sf.ProcessContent()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'Attempt to redefine macro: "PDDM-DEFINE mumble(x_)"\n'
'...while parsing section that started:\n'
' Line 3: //%PDDM-DEFINE mumble(a_)')
def testProcessFileWithExpandError(self):
input_str = u"""
foo
//%PDDM-DEFINE mumble(a_)
//%body
//%PDDM-EXPAND foobar(x_)
//%PDDM-EXPAND-END
"""
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
try:
sf.ProcessContent()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'No macro named "foobar".\n'
'...while expanding "foobar(x_)" from the section that'
' started:\n Line 5: //%PDDM-EXPAND foobar(x_)')
if __name__ == '__main__':
unittest.main()
| 32.5
| 134
| 0.620215
|
c28d2c158a84a3cc09db3d92a668b046d12ad578
| 15,047
|
py
|
Python
|
grafanalib/elasticsearch.py
|
locan11/grafanalib
|
002b819ae307674d03e60bacdc3abc7092faa35b
|
[
"Apache-2.0"
] | null | null | null |
grafanalib/elasticsearch.py
|
locan11/grafanalib
|
002b819ae307674d03e60bacdc3abc7092faa35b
|
[
"Apache-2.0"
] | null | null | null |
grafanalib/elasticsearch.py
|
locan11/grafanalib
|
002b819ae307674d03e60bacdc3abc7092faa35b
|
[
"Apache-2.0"
] | null | null | null |
"""Helpers to create Elasticsearch-specific Grafana queries."""
import attr
import itertools
from attr.validators import instance_of
from grafanalib.core import AlertCondition
DATE_HISTOGRAM_DEFAULT_FIELD = 'time_iso8601'
ORDER_ASC = 'asc'
ORDER_DESC = 'desc'
@attr.s
class CountMetricAgg(object):
"""An aggregator that counts the number of values.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html
It's the default aggregator for elasticsearch queries.
:param hide: show/hide the metric in the final panel display
:param id: id of the metric
:param inline: script to apply to the data, using '_value'
"""
id = attr.ib(default=0, validator=instance_of(int))
hide = attr.ib(default=False, validator=instance_of(bool))
inline = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
self.settings = {}
if self.inline:
self.settings['script'] = {'inline': self.inline}
return {
'id': str(self.id),
'hide': self.hide,
'type': 'count',
'field': 'select field',
'inlineScript': self.inline,
'settings': self.settings,
}
@attr.s
class MaxMetricAgg(object):
"""An aggregator that provides the max. value among the values.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html
:param field: name of elasticsearch field to provide the maximum for
:param hide: show/hide the metric in the final panel display
:param id: id of the metric
:param inline: script to apply to the data, using '_value'
"""
field = attr.ib(default="", validator=instance_of(str))
id = attr.ib(default=0, validator=instance_of(int))
hide = attr.ib(default=False, validator=instance_of(bool))
inline = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
self.settings = {}
if self.inline:
self.settings['script'] = {'inline': self.inline}
return {
'id': str(self.id),
'hide': self.hide,
'type': 'max',
'field': self.field,
'inlineScript': self.inline,
'settings': self.settings,
}
@attr.s
class CardinalityMetricAgg(object):
"""An aggregator that provides the cardinality. value among the values.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
:param field: name of elasticsearch field to provide the maximum for
:param id: id of the metric
:param hide: show/hide the metric in the final panel display
:param inline: script to apply to the data, using '_value'
"""
field = attr.ib(default="", validator=instance_of(str))
id = attr.ib(default=0, validator=instance_of(int))
hide = attr.ib(default=False, validator=instance_of(bool))
inline = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
self.settings = {}
if self.inline:
self.settings['script'] = {'inline': self.inline}
return {
'id': str(self.id),
'hide': self.hide,
'type': 'cardinality',
'field': self.field,
'inlineScript': self.inline,
'settings': self.settings,
}
@attr.s
class AverageMetricAgg(object):
"""An aggregator that provides the average. value among the values.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
:param field: name of elasticsearch field to provide the maximum for
:param id: id of the metric
:param hide: show/hide the metric in the final panel display
:param inline: script to apply to the data, using '_value'
"""
field = attr.ib(default="", validator=instance_of(str))
id = attr.ib(default=0, validator=instance_of(int))
hide = attr.ib(default=False, validator=instance_of(bool))
inline = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
self.settings = {}
if self.inline:
self.settings['script'] = {'inline': self.inline}
return {
'id': str(self.id),
'hide': self.hide,
'type': 'avg',
'field': self.field,
'inlineScript': self.inline,
'settings': self.settings,
'meta': {}
}
@attr.s
class DerivativeMetricAgg(object):
"""An aggregator that takes the derivative of another metric aggregator.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html
:param field: id of elasticsearch metric aggregator to provide the derivative of
:param hide: show/hide the metric in the final panel display
:param id: id of the metric
:param pipelineAgg: pipeline aggregator id
:param unit: derivative units
"""
field = attr.ib(default="", validator=instance_of(str))
hide = attr.ib(default=False, validator=instance_of(bool))
id = attr.ib(default=0, validator=instance_of(int))
pipelineAgg = attr.ib(default=1, validator=instance_of(int))
unit = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
settings = {}
if self.unit != "":
settings['unit'] = self.unit
return {
'id': str(self.id),
'pipelineAgg': str(self.pipelineAgg),
'hide': self.hide,
'type': 'derivative',
'field': self.field,
'settings': settings,
}
@attr.s
class SumMetricAgg(object):
"""An aggregator that provides the sum of the values.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html
:param field: name of elasticsearch field to provide the sum over
:param hide: show/hide the metric in the final panel display
:param id: id of the metric
:param inline: script to apply to the data, using '_value'
"""
field = attr.ib(default="", validator=instance_of(str))
id = attr.ib(default=0, validator=instance_of(int))
hide = attr.ib(default=False, validator=instance_of(bool))
inline = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
self.settings = {}
if self.inline:
self.settings['script'] = {'inline': self.inline}
return {
'id': str(self.id),
'hide': self.hide,
'type': 'sum',
'field': self.field,
'inlineScript': self.inline,
'settings': self.settings,
}
@attr.s
class DateHistogramGroupBy(object):
"""A bucket aggregator that groups results by date.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
:param id: ascending unique number per GroupBy clause
:param field: name of the elasticsearch field to group by
:param interval: interval to group by
:param minDocCount: min. amount of records in the timespan to return a
result
"""
id = attr.ib(default=0, validator=instance_of(int))
field = attr.ib(
default=DATE_HISTOGRAM_DEFAULT_FIELD,
validator=instance_of(str),
)
interval = attr.ib(default='auto', validator=instance_of(str))
minDocCount = attr.ib(default=0, validator=instance_of(int))
def to_json_data(self):
return {
'field': self.field,
'id': str(self.id),
'settings': {
'interval': self.interval,
'min_doc_count': self.minDocCount,
'trimEdges': 0,
},
'type': 'date_histogram',
}
@attr.s
class BucketScriptAgg(object):
"""An aggregator that applies a bucket script to the results of previous aggregations.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html
:param fields: dictionary of field names mapped to aggregation IDs to be used in the bucket script
e.g. { "field1":1 }, which allows the output of aggregate ID 1 to be referenced as
params.field1 in the bucket script
:param script: script to apply to the data using the variables specified in 'fields'
:param id: id of the aggregator
:param hide: show/hide the metric in the final panel display
"""
fields = attr.ib(default={}, validator=instance_of(dict))
id = attr.ib(default=0, validator=instance_of(int))
hide = attr.ib(default=False, validator=instance_of(bool))
script = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
pipelineVars = []
for field in self.fields:
pipelineVars.append({
'name': str(field),
'pipelineAgg': str(self.fields[field])
})
return {
'field': 'select field',
'type': 'bucket_script',
'id': str(self.id),
'hide': self.hide,
'pipelineVariables': pipelineVars,
'settings': {
'script': self.script
},
}
@attr.s
class Filter(object):
""" A Filter for a FilterGroupBy aggregator.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
:param label: label for the metric that is shown in the graph
:param query: the query to filter by
"""
label = attr.ib(default="", validator=instance_of(str))
query = attr.ib(default="", validator=instance_of(str))
def to_json_data(self):
return {
'label': self.label,
'query': self.query,
}
@attr.s
class FiltersGroupBy(object):
""" A bucket aggregator that groups records by a filter expression.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
:param id: ascending unique number per GroupBy clause
:param filters: list of Filter objects
"""
id = attr.ib(default=0, validator=instance_of(int))
filters = attr.ib(default=attr.Factory(list))
def to_json_data(self):
return {
'id': str(self.id),
'settings': {
'filters': self.filters,
},
'type': 'filters',
}
@attr.s
class TermsGroupBy(object):
""" A multi-bucket aggregator based on field values.
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
:param id: ascending unique number per GroupBy clause
:param field: name of the field to group by
:param minDocCount: min. amount of matching records to return a result
:param order: ORDER_ASC or ORDER_DESC
:param orderBy: term to order the bucket Term value: '_term', Doc Count: '_count'
or to use metric function use the string value "2"
:param size: how many buckets are returned
"""
field = attr.ib(validator=instance_of(str))
id = attr.ib(default=0, validator=instance_of(int))
minDocCount = attr.ib(default=1, validator=instance_of(int))
order = attr.ib(default=ORDER_DESC, validator=instance_of(str))
orderBy = attr.ib(default='_term', validator=instance_of(str))
size = attr.ib(default=0, validator=instance_of(int))
def to_json_data(self):
return {
'id': str(self.id),
'type': 'terms',
'field': self.field,
'settings': {
'min_doc_count': self.minDocCount,
'order': self.order,
'orderBy': self.orderBy,
'size': self.size,
},
}
@attr.s
class ElasticsearchTarget(object):
"""Generates Elasticsearch target JSON structure.
Grafana docs on using Elasticsearch:
http://docs.grafana.org/features/datasources/elasticsearch/
Elasticsearch docs on querying or reading data:
https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html
:param alias: legend alias
:param bucketAggs: Bucket aggregators
:param metricAggs: Metric Aggregators
:param query: query
:param refId: target reference id
:param timeField: name of the elasticsearch time field
"""
alias = attr.ib(default=None)
bucketAggs = attr.ib(
default=attr.Factory(lambda: [DateHistogramGroupBy()]),
)
metricAggs = attr.ib(default=attr.Factory(lambda: [CountMetricAgg()]))
query = attr.ib(default="", validator=instance_of(str))
refId = attr.ib(default="", validator=instance_of(str))
timeField = attr.ib(default="@timestamp", validator=instance_of(str))
def _map_bucket_aggs(self, f):
return attr.evolve(self, bucketAggs=list(map(f, self.bucketAggs)))
def auto_bucket_agg_ids(self):
"""Give unique IDs all bucketAggs without ID.
Returns a new ``ElasticsearchTarget`` that is the same as this one,
except all of the bucketAggs have their ``id`` property set. Any panels
which had an ``id`` property set will keep that property, all others
will have auto-generated IDs provided for them.
If the bucketAggs don't have unique ID associated with it, the
generated graph will be broken.
"""
ids = set([agg.id for agg in self.bucketAggs if agg.id])
auto_ids = (i for i in itertools.count(1) if i not in ids)
def set_id(agg):
if agg.id:
return agg
return attr.evolve(agg, id=next(auto_ids))
return self._map_bucket_aggs(set_id)
def to_json_data(self):
return {
'alias': self.alias,
'bucketAggs': self.bucketAggs,
'metrics': self.metricAggs,
'query': self.query,
'refId': self.refId,
'timeField': self.timeField,
}
@attr.s
class ElasticsearchAlertCondition(AlertCondition):
"""
Override alert condition to support Elasticseach target.
See AlertCondition for more information.
:param Target target: Metric the alert condition is based on.
:param Evaluator evaluator: How we decide whether we should alert on the
metric. e.g. ``GreaterThan(5)`` means the metric must be greater than 5
to trigger the condition. See ``GreaterThan``, ``LowerThan``,
``WithinRange``, ``OutsideRange``, ``NoValue``.
:param TimeRange timeRange: How long the condition must be true for before
we alert.
:param operator: One of ``OP_AND`` or ``OP_OR``. How this condition
combines with other conditions.
:param reducerType: RTYPE_*
:param type: CTYPE_*
"""
target = attr.ib(validator=instance_of(ElasticsearchTarget))
| 34.670507
| 127
| 0.639131
|
059e0f450521da36cb63c0b8fe990f3e468a744c
| 2,979
|
py
|
Python
|
nevow/taglibrary/tabbedPane.py
|
wthie/nevow
|
e630de8f640f27df85c38bc37ecdaf4e7b931afc
|
[
"MIT"
] | 49
|
2015-03-18T15:29:16.000Z
|
2021-11-17T12:30:51.000Z
|
nevow/taglibrary/tabbedPane.py
|
wthie/nevow
|
e630de8f640f27df85c38bc37ecdaf4e7b931afc
|
[
"MIT"
] | 62
|
2015-01-21T08:48:08.000Z
|
2021-04-02T17:31:29.000Z
|
nevow/taglibrary/tabbedPane.py
|
wthie/nevow
|
e630de8f640f27df85c38bc37ecdaf4e7b931afc
|
[
"MIT"
] | 30
|
2015-02-26T09:35:39.000Z
|
2021-07-24T12:45:04.000Z
|
from nevow import tags as t, static, util, loaders, athena, inevow
class tabbedPaneGlue:
"""
Record which holds information about the Javascript & CSS requirements
of L{TabbedPane} and L{TabbedPaneFragment}.
@type stylesheetPath: C{str}
@ivar stylesheetPath: Filesystem path of the tabbed pane stylesheet.
@type fileCSS: L{static.File}
@ivar fileCSS: Resource which serves L{stylesheetPath}.
@type inlineCSS: L{t.style}
@ivar inlineCSS: <style> tag containing the tabbedpane CSS inline.
"""
stylesheetPath = util.resource_filename('nevow', 'css/Nevow/TagLibrary/TabbedPane.css')
fileCSS = static.File(stylesheetPath, 'text/css')
inlineCSS = t.style(type_='text/css')[ t.xml(file(stylesheetPath).read()) ]
class TabbedPaneFragment(athena.LiveFragment):
jsClass = u'Nevow.TagLibrary.TabbedPane.TabbedPane'
cssModule = u'Nevow.TagLibrary.TabbedPane'
docFactory = loaders.xmlstr("""
<div class="nevow-tabbedpane"
xmlns:nevow="http://nevow.com/ns/nevow/0.1"
xmlns:athena="http://divmod.org/ns/athena/0.7"
nevow:render="liveFragment"
style="opacity: .3">
<ul class="nevow-tabbedpane-tabs" id="tab-container">
<nevow:invisible nevow:render="tabs" />
</ul>
<li nevow:pattern="tab"
><athena:handler event="onclick"
handler="dom_tabClicked" /><nevow:attr name="class"><nevow:slot
name="class" /></nevow:attr><nevow:slot name="tab-name" /></li>
<div nevow:pattern="page">
<nevow:attr name="class"><nevow:slot name="class" /></nevow:attr>
<nevow:slot name="page-content" />
</div>
<div id="pane-container"><nevow:invisible nevow:render="pages" /></div>
</div>""".replace('\n', ''))
def __init__(self, pages, selected=0, name='default'):
self.pages = pages
self.selected = selected
self.name = name
super(TabbedPaneFragment, self).__init__()
def getInitialArguments(self):
return (unicode(self.pages[self.selected][0], 'utf-8'),)
def render_tabs(self, ctx, data):
tabPattern = inevow.IQ(self.docFactory).patternGenerator('tab')
for (i, (name, content)) in enumerate(self.pages):
if self.selected == i:
cls = 'nevow-tabbedpane-selected-tab'
else:
cls = 'nevow-tabbedpane-tab'
yield tabPattern.fillSlots(
'tab-name', name).fillSlots(
'class', cls)
def render_pages(self, ctx, data):
pagePattern = inevow.IQ(self.docFactory).patternGenerator('page')
for (i, (name, content)) in enumerate(self.pages):
if self.selected == i:
cls = 'nevow-tabbedpane-selected-pane'
else:
cls = 'nevow-tabbedpane-pane'
yield pagePattern.fillSlots(
'page-content', content).fillSlots(
'class', cls)
__all__ = [ "tabbedPaneGlue", "TabbedPaneFragment" ]
| 35.047059
| 91
| 0.627056
|
ea502194a3b41abbc1bfb3f8a8b660e376fd83ee
| 268
|
py
|
Python
|
api/app/util/request.py
|
Test01DezWebSite/covidex
|
ce5afe7185a2c3cb02aaf23aca8b185c832a88f5
|
[
"Apache-2.0"
] | 1
|
2020-05-18T10:09:35.000Z
|
2020-05-18T10:09:35.000Z
|
api/app/util/request.py
|
Test01DezWebSite/covidex
|
ce5afe7185a2c3cb02aaf23aca8b185c832a88f5
|
[
"Apache-2.0"
] | null | null | null |
api/app/util/request.py
|
Test01DezWebSite/covidex
|
ce5afe7185a2c3cb02aaf23aca8b185c832a88f5
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import Request
def get_request_ip(request: Request):
forwarded_header = 'X-Forwarded-For'
request_ip = request.client.host
if forwarded_header in request.headers:
request_ip = request.headers[forwarded_header]
return request_ip
| 24.363636
| 54
| 0.75
|
243a46dc71994534bc3d49e7e45ba5adfcca3119
| 1,134
|
py
|
Python
|
lib/matplotlib/tests/test_table.py
|
adrn/matplotlib
|
7a9f2347a3b1c1efaef6e547930661a547c20ef2
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/tests/test_table.py
|
adrn/matplotlib
|
7a9f2347a3b1c1efaef6e547930661a547c20ef2
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/tests/test_table.py
|
adrn/matplotlib
|
7a9f2347a3b1c1efaef6e547930661a547c20ef2
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.testing.decorators import image_comparison
@image_comparison(baseline_images=['table_zorder'],
extensions=['png'],
remove_text=True)
def test_zorder():
data = [[ 66386, 174296,],
[ 58230, 381139,]]
colLabels = ('Freeze', 'Wind')
rowLabels = ['%d year' % x for x in (100, 50)]
cellText = []
yoff = np.array([0.0] * len(colLabels))
for row in reversed(data):
yoff += row
cellText.append(['%1.1f' % (x/1000.0) for x in yoff])
t = np.linspace(0, 2*np.pi, 100)
plt.plot(t, np.cos(t), lw=4, zorder=2)
plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels,
loc='center',
zorder=-2,
)
plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels,
loc='upper center',
zorder=4,
)
plt.yticks([])
| 25.772727
| 82
| 0.560847
|
0709381537b1d79edefff323acd6fb3fa557bc25
| 3,333
|
py
|
Python
|
pyconnectomist/tests/test_wrappers.py
|
neurospin/pyconnectomist
|
81881fd88d94b3c2bd401602783261a64e818a05
|
[
"CECILL-B"
] | 1
|
2018-05-24T13:02:41.000Z
|
2018-05-24T13:02:41.000Z
|
pyconnectomist/tests/test_wrappers.py
|
neurospin/pyconnectomist
|
81881fd88d94b3c2bd401602783261a64e818a05
|
[
"CECILL-B"
] | null | null | null |
pyconnectomist/tests/test_wrappers.py
|
neurospin/pyconnectomist
|
81881fd88d94b3c2bd401602783261a64e818a05
|
[
"CECILL-B"
] | 2
|
2018-06-25T14:20:34.000Z
|
2022-03-02T14:43:38.000Z
|
##########################################################################
# NSAp - Copyright (C) CEA, 2016
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
import unittest
import sys
import os
# COMPATIBILITY: since python 3.3 mock is included in unittest module
python_version = sys.version_info
if python_version[:2] <= (3, 3):
import mock
from mock import patch
mock_builtin = "__builtin__"
else:
import unittest.mock as mock
from unittest.mock import patch
mock_builtin = "builtins"
# pyConnectomist import
from pyconnectomist.wrappers import ConnectomistWrapper
class ConnectomistWrappers(unittest.TestCase):
""" Test the Connectomist wrappers:
'pyconnectomist.wrappers.ConnectomistWrapper'
"""
@mock.patch("os.path")
def test_badfileerror_raise(self, mock_path):
""" Bad configuration file -> raise ValueError.
"""
# Set the mocked functions returned values
mock_path.isfile.side_effect = [False]
# Test execution
self.assertRaises(ValueError,
ConnectomistWrapper._connectomist_version_check,
"/my/path/mock_conf")
@mock.patch("{0}.ValueError".format(mock_builtin))
@mock.patch("{0}.open".format(mock_builtin))
@mock.patch("os.path")
def test_noreleaseerror_raise(self, mock_path, mock_open, mock_error):
""" No PTK release found -> raise ValueError.
"""
# Set the mocked functions returned values
mock_path.isfile.side_effect = [True]
mock_context_manager = mock.Mock()
mock_open.return_value = mock_context_manager
mock_file = mock.Mock()
mock_file.read.return_value = "WRONG"
mock_enter = mock.Mock()
mock_enter.return_value = mock_file
mock_exit = mock.Mock()
setattr(mock_context_manager, "__enter__", mock_enter)
setattr(mock_context_manager, "__exit__", mock_exit)
# Test execution
ConnectomistWrapper._connectomist_version_check("/my/path/mock_conf")
self.assertEqual(len(mock_error.call_args_list), 1)
@mock.patch("warnings.warn")
@mock.patch("{0}.open".format(mock_builtin))
@mock.patch("os.path")
def test_normal_execution(self, mock_path, mock_open, mock_warn):
""" Test the normal behaviour of the function.
"""
# Set the mocked functions returned values
mock_path.isfile.side_effect = [True]
mock_context_manager = mock.Mock()
mock_open.return_value = mock_context_manager
mock_file = mock.Mock()
mock_file.read.return_value = "PTK_RELEASE=4.0\n"
mock_enter = mock.Mock()
mock_enter.return_value = mock_file
mock_exit = mock.Mock()
setattr(mock_context_manager, "__enter__", mock_enter)
setattr(mock_context_manager, "__exit__", mock_exit)
# Test execution
ConnectomistWrapper._connectomist_version_check("/my/path/mock_conf")
self.assertEqual(len(mock_warn.call_args_list), 1)
if __name__ == "__main__":
unittest.main()
| 36.626374
| 77
| 0.649865
|
a285a3d45e0f8d8b4b47b43187b2799fae6e9eb7
| 1,038
|
py
|
Python
|
pydron/test_picklesupport.py
|
DelphianCalamity/pydron
|
1518dc71b5cf64fde563b864db2a4de74e092c8e
|
[
"MIT"
] | 5
|
2020-04-06T15:20:56.000Z
|
2022-01-05T23:11:13.000Z
|
pydron/test_picklesupport.py
|
mahmoudimus/pydron
|
a7b484dec8bcc2730ba9bd76bc63bf3362c05e4d
|
[
"MIT"
] | null | null | null |
pydron/test_picklesupport.py
|
mahmoudimus/pydron
|
a7b484dec8bcc2730ba9bd76bc63bf3362c05e4d
|
[
"MIT"
] | 2
|
2020-11-27T20:21:34.000Z
|
2021-02-26T23:02:11.000Z
|
# Copyright (C) 2015 Stefan C. Mueller
import unittest
import datetime
import pickle
import pydron.picklesupport
class Dummy(object):
def __init__(self):
self.value = 1
def f(self):
return 42 + self.value
class TestPickleSupport(unittest.TestCase):
def test_module(self):
dump = pickle.dumps(unittest, pickle.HIGHEST_PROTOCOL)
load = pickle.loads(dump)
self.assertIs(load, unittest)
def test_builtin_function(self):
dump = pickle.dumps(map, pickle.HIGHEST_PROTOCOL)
load = pickle.loads(dump)
self.assertIs(load, map)
def test_builtin_method(self):
delta = datetime.timedelta(seconds=10)
func = delta.total_seconds
dump = pickle.dumps(func, pickle.HIGHEST_PROTOCOL)
load = pickle.loads(dump)
self.assertEqual(10, load())
def test_method(self):
x = Dummy()
dump = pickle.dumps(x.f, pickle.HIGHEST_PROTOCOL)
load = pickle.loads(dump)
self.assertEqual(43, load())
| 28.054054
| 62
| 0.647399
|
98e955afb14680b3e1758ee0286635da84160e1e
| 20,766
|
py
|
Python
|
malmo/MalmoEnv/malmoenv/core.py
|
SmaleZ/vcl_diayn
|
b2c47a681675b405d2011bc4a43c3914f3af4ecc
|
[
"MIT"
] | null | null | null |
malmo/MalmoEnv/malmoenv/core.py
|
SmaleZ/vcl_diayn
|
b2c47a681675b405d2011bc4a43c3914f3af4ecc
|
[
"MIT"
] | null | null | null |
malmo/MalmoEnv/malmoenv/core.py
|
SmaleZ/vcl_diayn
|
b2c47a681675b405d2011bc4a43c3914f3af4ecc
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2018 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
from lxml import etree
import struct
import socket
import time
import random
import numpy as np
from malmoenv import comms
from malmoenv.commands import CommandParser
import uuid
import gym.spaces
from malmoenv.comms import retry
from malmoenv.version import malmo_version
class StringActionSpace(gym.spaces.Discrete):
"""Malmo actions as their strings."""
def __init__(self):
gym.spaces.Discrete.__init__(self, 1)
def __getitem__(self, action):
return action
class ActionSpace(gym.spaces.Box):
"""Malmo actions as gym action space"""
def __init__(self, actions):
self.actions = actions
# gym.spaces.Discrete.__init__(self, len(self.actions))
# gym.spaces.Box.__init__(self, -1, 1, shape=(3, ))
gym.spaces.Box.__init__(self, low=0, high=5, shape=(1,))
def sample(self):
return random.randint(1, len(self.actions)) - 1
def __getitem__(self, action):
#print("the actions input is:")
#print(action)
curr_action = 0
# continue space to discrete space
# TODU: nomolize the action space to [-1, 1]
if 0 <= action < 1.0:
curr_action = 0
elif 1.0 <= action < 2.0:
curr_action = 1
elif 2.0 <= action < 3.0:
curr_action = 2
elif 3.0 <= action < 4.0:
curr_action = 3
else:
curr_action = random.randint(1, len(self.actions)) - 1
pass
# curr_action = 4
#print("the next action is:")
#print(self.actions[curr_action])
return self.actions[curr_action]
def __len__(self):
return len(self.actions)
class VisualObservationSpace(gym.spaces.Box):
"""Space for visual observations: width x height x depth as a flat array.
Where depth is 3 or 4 if encoding scene depth.
"""
def __init__(self, width, height, depth):
gym.spaces.Box.__init__(self,
low=np.iinfo(np.uint8).min, high=np.iinfo(np.uint8).max,
shape=(height, width, depth), dtype=np.uint8)
class EnvException(Exception):
def __init__(self, message):
super(EnvException, self).__init__(message)
class MissionInitException(Exception):
def __init__(self, message):
super(MissionInitException, self).__init__(message)
MAX_WAIT = 60 * 3
class Env:
"""Malmo "Env" open ai gym compatible environment API"""
def __init__(self, reshape=False):
self.action_space = None
self.observation_space = None
self.metadata = {'render.modes': ['rgb_array']}
self.xml = None
self.integratedServerPort = 0
self.role = 0
self.agent_count = 0
self.resets = 0
self.ns = '{http://ProjectMalmo.microsoft.com}'
self.client_socket = None
self.server = 'localhost' # The mission server
self.port = 9000 # The mission server port
self.server2 = self.server # optional server for agent (role <> 0)
self.port2 = self.port + self.role # optional server port for agent
self.resync_period = 0
self.turn_key = ""
self.exp_uid = ""
self.done = True
self.step_options = None
self.width = 0
self.height = 0
self.depth = 0
self.reshape = reshape
self.last_obs = None
self.reward_range = None
def init(self, xml, port, server=None,
server2=None, port2=None,
role=0, exp_uid=None, episode=0,
action_filter=None, resync=0, step_options=0, action_space=None, reshape=False):
""""Initialize a Malmo environment.
xml - the mission xml.
port - the MalmoEnv service's port.
server - the MalmoEnv service address. Default is localhost.
server2 - the MalmoEnv service address for given role if not 0.
port2 - the MalmoEnv service port for given role if not 0.
role - the agent role (0..N-1) for missions with N agents. Defaults to 0.
exp_uid - the experiment's unique identifier. Generated if not given.
episode - the "reset" start count for experiment re-starts. Defaults to 0.
action_filter - an optional list of valid actions to filter by. Defaults to simple commands.
step_options - encodes withTurnKey and withInfo in step messages. Defaults to info included,
turn if required.
"""
if action_filter is None:
action_filter = {"move", "turn", "use", "attack"}
if not xml.startswith('<Mission'):
i = xml.index("<Mission")
if i == -1:
raise EnvException("Mission xml must contain <Mission> tag.")
xml = xml[i:]
self.xml = etree.fromstring(xml)
self.role = role
if exp_uid is None:
self.exp_uid = str(uuid.uuid4())
else:
self.exp_uid = exp_uid
command_parser = CommandParser(action_filter)
commands = command_parser.get_commands_from_xml(self.xml, self.role)
actions = command_parser.get_actions(commands)
# print("role " + str(self.role) + " actions " + str(actions)
print("test")
print(actions)
if action_space:
self.action_space = action_space
else:
self.action_space = ActionSpace(actions)
self.port = port
if server is not None:
self.server = server
if server2 is not None:
self.server2 = server2
else:
self.server2 = self.server
if port2 is not None:
self.port2 = port2
else:
self.port2 = self.port + self.role
self.agent_count = len(self.xml.findall(self.ns + 'AgentSection'))
turn_based = self.xml.find('.//' + self.ns + 'TurnBasedCommands') is not None
if turn_based:
self.turn_key = 'AKWozEre'
else:
self.turn_key = ""
if step_options is None:
self.step_options = 0 if not turn_based else 2
else:
self.step_options = step_options
self.done = True
# print("agent count " + str(self.agent_count) + " turn based " + turn_based)
self.resync_period = resync
self.resets = episode
e = etree.fromstring("""<MissionInit xmlns="http://ProjectMalmo.microsoft.com"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
SchemaVersion="" PlatformVersion=""" + '\"' + malmo_version + '\"' +
""">
<ExperimentUID></ExperimentUID>
<ClientRole>0</ClientRole>
<ClientAgentConnection>
<ClientIPAddress>127.0.0.1</ClientIPAddress>
<ClientMissionControlPort>0</ClientMissionControlPort>
<ClientCommandsPort>0</ClientCommandsPort>
<AgentIPAddress>127.0.0.1</AgentIPAddress>
<AgentMissionControlPort>0</AgentMissionControlPort>
<AgentVideoPort>0</AgentVideoPort>
<AgentDepthPort>0</AgentDepthPort>
<AgentLuminancePort>0</AgentLuminancePort>
<AgentObservationsPort>0</AgentObservationsPort>
<AgentRewardsPort>0</AgentRewardsPort>
<AgentColourMapPort>0</AgentColourMapPort>
</ClientAgentConnection>
</MissionInit>""")
e.insert(0, self.xml)
self.xml = e
self.xml.find(self.ns + 'ClientRole').text = str(self.role)
self.xml.find(self.ns + 'ExperimentUID').text = self.exp_uid
if self.role != 0 and self.agent_count > 1:
e = etree.Element(self.ns + 'MinecraftServerConnection',
attrib={'address': self.server,
'port': str(0)
})
self.xml.insert(2, e)
self.reshape = reshape
video_producers = self.xml.findall('.//' + self.ns + 'VideoProducer')
assert len(video_producers) == self.agent_count
video_producer = video_producers[self.role]
self.width = int(video_producer.find(self.ns + 'Width').text)
self.height = int(video_producer.find(self.ns + 'Height').text)
want_depth = video_producer.attrib["want_depth"]
self.depth = 4 if want_depth is not None and (want_depth == "true" or want_depth == "1") else 3
# print(str(self.width) + "x" + str(self.height) + "x" + str(self.depth))
self.observation_space = VisualObservationSpace(self.width, self.height, self.depth)
# print(etree.tostring(self.xml))
self.reward_range = (-float("inf"), float("inf"))
@staticmethod
def _hello(sock):
comms.send_message(sock, ("<MalmoEnv" + malmo_version + "/>").encode())
def reset(self):
"""gym api reset"""
if self.resync_period > 0 and (self.resets + 1) % self.resync_period == 0:
self.exit_resync()
while not self.done:
self.done = self._quit_episode()
if not self.done:
time.sleep(0.1)
return self._start_up()
@retry
def _start_up(self):
self.last_obs = None
self.resets += 1
if self.role != 0:
self._find_server()
if not self.client_socket:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# print("connect " + self.server2 + ":" + str(self.port2))
sock.connect((self.server2, self.port2))
self._hello(sock)
self.client_socket = sock # Now retries will use connected socket.
self._init_mission()
self.done = False
return self._peek_obs()
def _peek_obs(self):
obs = None
start_time = time.time()
while not self.done and (obs is None or len(obs) == 0):
peek_message = "<Peek/>"
comms.send_message(self.client_socket, peek_message.encode())
obs = comms.recv_message(self.client_socket)
reply = comms.recv_message(self.client_socket)
done, = struct.unpack('!b', reply)
self.done = done == 1
if obs is None or len(obs) == 0:
if time.time() - start_time > MAX_WAIT:
self.client_socket.close()
self.client_socket = None
raise MissionInitException('too long waiting for first observation')
time.sleep(0.1)
obs = np.frombuffer(obs, dtype=np.uint8)
if obs is None or len(obs) == 0 or obs.size == 0:
if self.reshape:
obs = np.zeros((self.height, self.width, self.depth), dtype=np.uint8)
else:
obs = np.zeros(self.height * self.width * self.depth, dtype=np.uint8)
elif self.reshape:
obs = obs.reshape((self.height, self.width, self.depth)).astype(np.uint8)
self.last_obs = obs
return obs
def _quit_episode(self):
comms.send_message(self.client_socket, "<Quit/>".encode())
reply = comms.recv_message(self.client_socket)
ok, = struct.unpack('!I', reply)
return ok != 0
def render(self, mode=None):
"""gym api render"""
if self.last_obs is None:
if self.reshape:
self.last_obs = np.zeros((self.height, self.width, self.depth), dtype=np.uint8)
else:
self.last_obs = np.zeros(self.height * self.width * self.depth, dtype=np.uint8)
return np.flipud(self.last_obs)
def seed(self):
pass
def step(self, action):
"""gym api step"""
obs = None
reward = None
info = None
turn = True
withturnkey = self.step_options < 2
withinfo = self.step_options == 0 or self.step_options == 2
while not self.done and \
((obs is None or len(obs) == 0) or
(withinfo and info is None) or turn):
step_message = "<Step" + str(self.step_options) + ">" + \
self.action_space[action] + \
"</Step" + str(self.step_options) + " >"
comms.send_message(self.client_socket, step_message.encode())
if withturnkey:
comms.send_message(self.client_socket, self.turn_key.encode())
obs = comms.recv_message(self.client_socket)
reply = comms.recv_message(self.client_socket)
reward, done, sent = struct.unpack('!dbb', reply)
self.done = done == 1
if withinfo:
info = comms.recv_message(self.client_socket).decode('utf-8')
turn_key = comms.recv_message(self.client_socket).decode('utf-8') if withturnkey else ""
# print("[" + str(self.role) + "] TK " + turn_key + " self.TK " + str(self.turn_key))
if turn_key != "":
if sent != 0:
turn = False
# Done turns if: turn = self.turn_key == turn_key
self.turn_key = turn_key
else:
turn = sent == 0
if (obs is None or len(obs) == 0) or turn:
time.sleep(0.1)
obs = np.frombuffer(obs, dtype=np.uint8)
#print("obs before reshap : ")
#print(obs.shape)
if self.reshape:
if obs.size == 0:
obs = np.zeros((self.height, self.width, self.depth), dtype=np.uint8)
else:
obs = obs.reshape((self.height, self.width, self.depth)).astype(np.uint8)
self.last_obs = obs
#print("obs after reshap : ")
# print(obs.shape)
if self.done:
print("has done")
else:
print("continue")
# print(info)
# return obs, reward, self.done, info
return obs, reward, self.done, {}
def close(self):
"""gym api close"""
try:
# Purge last token from head node with <Close> message.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.server, self.port))
self._hello(sock)
comms.send_message(sock, ("<Close>" + self._get_token() + "</Close>").encode())
reply = comms.recv_message(sock)
ok, = struct.unpack('!I', reply)
assert ok
sock.close()
except Exception as e:
self._log_error(e)
if self.client_socket:
self.client_socket.close()
self.client_socket = None
def reinit(self):
"""Use carefully to reset the episode count to 0."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.server, self.port))
self._hello(sock)
comms.send_message(sock, ("<Init>" + self._get_token() + "</Init>").encode())
reply = comms.recv_message(sock)
sock.close()
ok, = struct.unpack('!I', reply)
return ok != 0
def status(self, head):
"""Get status from server.
head - Ping the the head node if True.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if head:
sock.connect((self.server, self.port))
else:
sock.connect((self.server2, self.port2))
self._hello(sock)
comms.send_message(sock, "<Status/>".encode())
status = comms.recv_message(sock).decode('utf-8')
sock.close()
return status
def exit(self):
"""Use carefully to cause the Minecraft service to exit (and hopefully restart).
Likely to throw communication errors so wrap in exception handler.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.server2, self.port2))
self._hello(sock)
comms.send_message(sock, ("<Exit>" + self._get_token() + "</Exit>").encode())
reply = comms.recv_message(sock)
sock.close()
ok, = struct.unpack('!I', reply)
return ok != 0
def resync(self):
"""make sure we can ping the head and assigned node.
Possibly after an env.exit()"""
success = 0
for head in [True, False]:
for _ in range(30):
try:
self.status(head)
success += 1
break
except Exception as e:
self._log_error(e)
time.sleep(10)
if success != 2:
raise EnvException("Failed to contact service" + (" head" if success == 0 else ""))
def exit_resync(self):
"""Exit the current Minecraft and wait for new one to replace it."""
print("********** exit & resync **********")
try:
if self.client_socket:
self.client_socket.close()
self.client_socket = None
try:
self.exit()
except Exception as e:
self._log_error(e)
print("Pause for exit(s) ...")
time.sleep(60)
except (socket.error, ConnectionError):
pass
self.resync()
def _log_error(self, exn):
pass # Keeping pylint happy
def _find_server(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.server, self.port))
self._hello(sock)
start_time = time.time()
port = 0
while port == 0:
comms.send_message(sock, ("<Find>" + self._get_token() + "</Find>").encode())
reply = comms.recv_message(sock)
port, = struct.unpack('!I', reply)
if port == 0:
if time.time() - start_time > MAX_WAIT:
if self.client_socket:
self.client_socket.close()
self.client_socket = None
raise MissionInitException('too long finding mission to join')
time.sleep(1)
sock.close()
# print("Found mission integrated server port " + str(port))
self.integratedServerPort = port
e = self.xml.find(self.ns + 'MinecraftServerConnection')
if e is not None:
e.attrib['port'] = str(self.integratedServerPort)
def _init_mission(self):
ok = 0
while ok != 1:
xml = etree.tostring(self.xml)
token = (self._get_token() + ":" + str(self.agent_count)).encode()
# print(xml.decode())
comms.send_message(self.client_socket, xml)
comms.send_message(self.client_socket, token)
reply = comms.recv_message(self.client_socket)
ok, = struct.unpack('!I', reply)
self.turn_key = comms.recv_message(self.client_socket).decode('utf-8')
if ok != 1:
time.sleep(1)
def _get_token(self):
return self.exp_uid + ":" + str(self.role) + ":" + str(self.resets)
def make():
return Env()
| 38.742537
| 104
| 0.558943
|
dcc8ca9f110c22976bf2e69ea393e13ab9d5c9c0
| 2,202
|
py
|
Python
|
cove_cli.py
|
iainelder/aws-org-inventory
|
4e433699695d6b740a0995d70705d30792d803df
|
[
"MIT"
] | 1
|
2021-12-24T12:43:11.000Z
|
2021-12-24T12:43:11.000Z
|
cove_cli.py
|
iainelder/aws-org-inventory
|
4e433699695d6b740a0995d70705d30792d803df
|
[
"MIT"
] | 6
|
2021-11-01T15:03:20.000Z
|
2022-03-19T22:42:16.000Z
|
cove_cli.py
|
iainelder/aws-org-inventory
|
4e433699695d6b740a0995d70705d30792d803df
|
[
"MIT"
] | null | null | null |
import json
import sys
from argparse import ArgumentParser, Namespace
from datetime import datetime
from inspect import getfullargspec
from typing import Any, Callable, Dict, TextIO, get_args
from boto3 import Session
from botocove import CoveOutput, cove # type: ignore[import]
from typing_inspect import is_generic_type, is_optional_type # type: ignore[import]
class Encoder(json.JSONEncoder):
def default(self, obj: Any) -> Any:
if isinstance(obj, Exception):
return repr(obj)
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def main() -> None:
cove_args = get_cove_arg_parser().parse_args()
cove_func = make_cove_func(sys.stdin.read(), cove_args)
result = cove_func()
dump_json(result, sys.stdout)
# Inspired by sqlite_utils' _compile_code.
def make_cove_func(body: str, cove_args: Namespace) -> Callable[..., CoveOutput]:
body = body if body else "pass"
header = "def func(session: Session):"
func_source = "\n".join([header] + [f" {line}" for line in body.splitlines()])
code = compile(source=func_source, filename="<string>", mode="exec")
locals: Dict[str, Any] = {}
exec(code, None, locals)
func = locals["func"]
# TODO: Fix cove typing.
return cove( # type: ignore[no-any-return]
func, **{k: v for k, v in vars(cove_args).items() if v is not None}
)
def dump_json(result: CoveOutput, file: TextIO) -> None:
json.dump(result, file, indent=4, cls=Encoder)
def get_cove_arg_parser() -> ArgumentParser:
spec = getfullargspec(cove)
parser = ArgumentParser()
for arg in spec.kwonlyargs:
hint = spec.annotations[arg]
if is_optional_type(hint):
type_arg = get_args(hint)[0]
if is_generic_type(type_arg):
type_arg = get_args(type_arg)[0]
nargs = "*"
else:
nargs = "?"
else:
type_arg = hint
print(f"{arg=} {hint=} {type_arg=}", file=sys.stderr)
parser.add_argument(f"--{arg}", type=type_arg, nargs=nargs)
return parser
if __name__ == "__main__":
main()
| 28.973684
| 85
| 0.642598
|
83cb6554eccc51ed9baa5ce2d4d53b192a95ab94
| 9,115
|
py
|
Python
|
python/ingestion/standardized_columns.py
|
vanshkumar/health-equity-tracker
|
e3be05ee665f1c6aecab761527f6faf8a5183ea6
|
[
"MIT"
] | null | null | null |
python/ingestion/standardized_columns.py
|
vanshkumar/health-equity-tracker
|
e3be05ee665f1c6aecab761527f6faf8a5183ea6
|
[
"MIT"
] | null | null | null |
python/ingestion/standardized_columns.py
|
vanshkumar/health-equity-tracker
|
e3be05ee665f1c6aecab761527f6faf8a5183ea6
|
[
"MIT"
] | null | null | null |
from enum import Enum, unique
from collections import namedtuple
import pandas
# The name of the column for a unique string id for the race category. Should be
# semi-human readable. See Race enum below for values.
RACE_CATEGORY_ID_COL = "race_category_id"
# The name of the column that displays the fully-qualified race name, including
# whether Hispanic/Latino are included.
RACE_OR_HISPANIC_COL = "race_and_ethnicity"
# The name of the column for Whether a person is Hispanic/Latino or not.
HISPANIC_COL = "hispanic_or_latino"
# The name of the column for whether the race category includes Hispanic/Latino
# people.
RACE_INCLUDES_HISPANIC_COL = "race_includes_hispanic"
# The name of the column that displays the basic race name, not specifying
# whether Hispanic/Latino people are included.
RACE_COL = "race"
AGE_COL = "age"
SEX_COL = "sex"
STATE_FIPS_COL = "state_fips"
STATE_NAME_COL = "state_name"
STATE_POSTAL_COL = "state_postal" # State 2-letter postal abberviation.
COUNTY_FIPS_COL = "county_fips"
COUNTY_NAME_COL = "county_name"
POPULATION_COL = "population"
INCOME_COL = "income"
TOTAL_VALUE = "Total"
ALL_VALUE = "All"
# Standardized column names for Covid cases, hospitalizations, and deaths.
COVID_CASES = "cases"
COVID_HOSP_Y = "hosp_y"
COVID_HOSP_N = "hosp_n"
COVID_HOSP_UNKNOWN = "hosp_unknown"
COVID_DEATH_Y = "death_y"
COVID_DEATH_N = "death_n"
COVID_DEATH_UNKNOWN = "death_unknown"
# Standard Health Insurance Population Cols
TOTAL_HEALTH_INSURANCE_COL = "total_health_insurance"
WITH_HEALTH_INSURANCE_COL = "with_health_insurance"
WITHOUT_HEALTH_INSURANCE_COL = "without_health_insurance"
ABOVE_POVERTY_COL = "above_poverty_line"
BELOW_POVERTY_COL = "below_poverty_line"
COPD_PCT = "copd_pct"
DIABETES_PCT = "diabetes_pct"
RaceTuple = namedtuple("RaceTuple", [
"race_category_id",
"race",
"race_includes_hispanic",
"race_and_ethnicity"
])
@unique
class Race(Enum):
# These categories are one format of standard categories used in ACS data,
# where categories are mutually exclusive but each one includes
# Hispanic/Latino. The sum of these values is equal to the total population.
# OTHER_STANDARD is defined as anyone who does not fall into one of the
# other categories in this format.
AIAN = ("AIAN", "American Indian and Alaska Native", True)
ASIAN = ("ASIAN", "Asian", True)
BLACK = ("BLACK", "Black or African American", True)
NHPI = ("NHPI", "Native Hawaiian and Pacific Islander", True)
MULTI = ("MULTI", "Two or more races", True)
WHITE = ("WHITE", "White", True)
OTHER_STANDARD = ("OTHER_STANDARD", "Some other race", True)
# These categories are another format of standard categories used in ACS
# data, where categories are mutually exclusive and exclude Hispanic/Latino.
# Hispanic/Latino is its own category. Each one of these is a strict subset
# of its counterpart in the above format. OTHER_STANDARD_NH is defined as
# anyone who does not fall into one of the other categories in this format.
# Where possible, this format is preferred.
AIAN_NH = ("AIAN_NH", "American Indian and Alaska Native", False)
ASIAN_NH = ("ASIAN_NH", "Asian", False)
BLACK_NH = ("BLACK_NH", "Black or African American", False)
NHPI_NH = ("NHPI_NH", "Native Hawaiian and Pacific Islander", False)
MULTI_NH = ("MULTI_NH", "Two or more races", False)
WHITE_NH = ("WHITE_NH", "White", False)
HISP = ("HISP", "Hispanic or Latino", True)
OTHER_STANDARD_NH = ("OTHER_STANDARD_NH", "Some other race", False)
# Below are special values that have slightly different characteristics.
# Hispanic vs Non-Hispanic can be defined differently across datasets.
# Sometimes Hispanic/Latino is treated as mutually exclusive with other
# racial categories, so when a person is categorized as Hispanic or Latino
# they are excluded from the data for any other racial category they belong
# to. Other times, a person may be reported as both Hispanic/Latino and as
# another race. In some datasets, these are reported entirely independently
# so we do not know the relationship between Hispanic/Latino and race.
# ETHNICITY_UNKNOWN refers to data that does not know whether the person is
# Hispanic or Latino. (Some datasets use "ethnicity" to refer to whether
# someone is Hispanic or Latino)
NH = ("NH", "Not Hispanic or Latino", False)
ETHNICITY_UNKNOWN = ("ETHNICITY_UNKNOWN", "Unknown ethnicity", None)
# OTHER_STANDARD and OTHER_STANDARD_NH define "other" in a specific way (see
# above). Some datasets may group additional races into "other"
# when reporting (for example "other" may sometimes include AIAN or NHPI).
# These datasets should use OTHER_NONSTANDARD/OTHER_NONSTANDARD_NH to
# prevent joining with the incorrect population data.
# TODO: The frontend uses the race_and_ethnicity column as a unique
# identifier in some places. Until we migrate to using race_category_id,
# we add a * for the non-standard other so it doesn't accidentally get
# joined with the standard other on the frontend.
OTHER_NONSTANDARD = ("OTHER_NONSTANDARD", "Some other race*", True)
OTHER_NONSTANDARD_NH = ("OTHER_NONSTANDARD_NH", "Some other race*", False)
# Categories that are combinations of other categories
API = ("API", "Asian and Pacific Islander", True)
API_NH = ("API_NH", "Asian and Pacific Islander", False)
# Combines AIAN and NHPI
INDIGENOUS = ("INDIGENOUS", "Indigenous", True)
# Combines AIAN_NH and NHPI_NH
INDIGENOUS_NH = ("INDIGENOUS_NH", "Indigenous", False)
MULTI_OR_OTHER_STANDARD = (
"MULTI_OR_OTHER_STANDARD",
"Two or more races & Some other race",
True)
MULTI_OR_OTHER_STANDARD_NH = (
"MULTI_OR_OTHER_STANDARD_NH",
"Two or more races & Some other race",
False)
# When the race is unknown. Different from ETHNICITY_UNKNOWN, which
# specifically refers to whether Hispanic/Latino is unknown.
UNKNOWN = ("UNKNOWN", "Unknown race", True)
UNKNOWN_NH = ("UNKNOWN_NH", "Unknown race", False)
# The total across races. This must always be included when the other race
# values do not sum to 100%
TOTAL = ("TOTAL", TOTAL_VALUE, None)
ALL = ("ALL", ALL_VALUE, None)
# We set the enum value to the first arg, which is the race category id, or
# a unique code that can be used to reference that race. Race category ids
# should be set to the same value as the enum name.
# Arguments are set as optional to accommodate a mypy bug with enums:
# https://github.com/python/mypy/issues/1021.
def __new__(cls, value, race=None, includes_hispanic=None):
obj = object.__new__(cls)
obj._value_ = value
obj._race = race
obj._includes_hispanic = includes_hispanic
return obj
@property
def race_category_id(self) -> str:
"""The race category id; uniquely identifies this enum member."""
return self.value
@property
def race(self):
"""The basic display name for this race."""
return self._race
@property
def includes_hispanic(self):
"""Whether this race includes Hispanic or Latino."""
return self._includes_hispanic
@property
def race_and_ethnicity(self) -> str:
"""The fully-qualified dispaly name that specifies both race and whether
the category includes Hispanic or Latino."""
if (self.includes_hispanic is True or
self.includes_hispanic is None or
self.race in ["Hispanic or Latino", "Not Hispanic or Latino"]):
return self.race
else:
return self.race + " (Non-Hispanic)"
@staticmethod
def get_col_names() -> list:
"""The list of column names for putting the race attributes into a
table. Columns are returned in the same order as `as_tuple()`."""
return list(RaceTuple._fields)
@staticmethod
def from_category_id(category_id: str):
"""Gets an instance of this Enum from the provided race category id."""
# Instances of an enum can be constructed from their value, and since we
# set the enum value to the category id, we can construct an instance
# without providing other params.
# pylint: disable=no-value-for-parameter
return Race(category_id)
def as_tuple(self) -> RaceTuple:
"""The race attributes, in the same order as `get_col_names()`."""
return RaceTuple(self.race_category_id, self.race,
self.includes_hispanic, self.race_and_ethnicity)
def add_race_columns_from_category_id(df):
"""Adds all race-related columns to the dataframe using the race category id
to determine these values."""
df["race_tuple"] = df.apply(
lambda r: Race.from_category_id(r[RACE_CATEGORY_ID_COL]).as_tuple(),
axis=1)
df[Race.get_col_names()] = pandas.DataFrame(
df["race_tuple"].tolist(), index=df.index)
df.drop("race_tuple", axis=1, inplace=True)
| 41.811927
| 80
| 0.707296
|
f76f1fb954ada0f955275d43f302adf3b1516bfd
| 8,875
|
py
|
Python
|
openbb_terminal/cryptocurrency/overview/coinpaprika_view.py
|
JerBouma/OpenBBTerminal
|
0c60d70cb29b0a6e4db41d6dd0d34f79a6169b27
|
[
"MIT"
] | null | null | null |
openbb_terminal/cryptocurrency/overview/coinpaprika_view.py
|
JerBouma/OpenBBTerminal
|
0c60d70cb29b0a6e4db41d6dd0d34f79a6169b27
|
[
"MIT"
] | null | null | null |
openbb_terminal/cryptocurrency/overview/coinpaprika_view.py
|
JerBouma/OpenBBTerminal
|
0c60d70cb29b0a6e4db41d6dd0d34f79a6169b27
|
[
"MIT"
] | null | null | null |
"""CoinPaprika view"""
__docformat__ = "numpy"
import logging
import os
from pandas.plotting import register_matplotlib_converters
import openbb_terminal.cryptocurrency.overview.coinpaprika_model as paprika
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_long_number_format_with_type_check,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
# pylint: disable=inconsistent-return-statements
# pylint: disable=C0302, too-many-lines
CURRENCIES = [
"BTC",
"ETH",
"USD",
"EUR",
"PLN",
"KRW",
"GBP",
"CAD",
"JPY",
"RUB",
"TRY",
"NZD",
"AUD",
"CHF",
"UAH",
"HKD",
"SGD",
"NGN",
"PHP",
"MXN",
"BRL",
"THB",
"CLP",
"CNY",
"CZK",
"DKK",
"HUF",
"IDR",
"ILS",
"INR",
"MYR",
"NOK",
"PKR",
"SEK",
"TWD",
"ZAR",
"VND",
"BOB",
"COP",
"PEN",
"ARS",
"ISK",
]
# see https://github.com/OpenBB-finance/OpenBBTerminal/pull/562#issuecomment-887842888
# EXCHANGES = paprika.get_list_of_exchanges()
@log_start_end(log=logger)
def display_global_market(export: str) -> None:
"""Return data frame with most important global crypto statistics like:
market_cap_usd, volume_24h_usd, bitcoin_dominance_percentage, cryptocurrencies_number,
market_cap_ath_value, market_cap_ath_date, volume_24h_ath_value, volume_24h_ath_date,
market_cap_change_24h, volume_24h_change_24h, last_updated [Source: CoinPaprika]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_global_market()
df_data = df.copy()
df["Value"] = df["Value"].apply( # pylint:disable=unsupported-assignment-operation
lambda x: lambda_long_number_format_with_type_check(x)
)
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Global Crypto Statistics"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"global",
df_data,
)
@log_start_end(log=logger)
def display_all_coins_market_info(
currency: str, sortby: str, descend: bool, top: int, export: str
) -> None:
"""Displays basic market information for all coins from CoinPaprika API. [Source: CoinPaprika]
Parameters
----------
currency: str
Quoted currency
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_coins_market_info(quotes=currency).sort_values(
by=sortby, ascending=descend
)
df_data = df.copy()
if df.empty:
console.print("No data found", "\n")
return
cols = [col for col in df.columns if col != "rank"]
df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))
console.print(f"\nDisplaying data vs {currency}")
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Basic Market Information",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"markets",
df_data,
)
@log_start_end(log=logger)
def display_all_coins_info(
currency: str, sortby: str, descend: bool, top: int, export: str
) -> None:
"""Displays basic coin information for all coins from CoinPaprika API. [Source: CoinPaprika]
Parameters
----------
currency: str
Quoted currency
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_coins_info(quotes=currency).sort_values(
by=sortby, ascending=descend
)
df_data = df.copy()
if df.empty:
console.print("Not data found", "\n")
return
cols = [col for col in df.columns if col != "rank"]
df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))
console.print(f"\nDisplaying data vs {currency}")
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Basic Coin Information",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"info",
df_data,
)
@log_start_end(log=logger)
def display_all_exchanges(
currency: str, sortby: str, descend: bool, top: int, export: str
) -> None:
"""List exchanges from CoinPaprika API. [Source: CoinPaprika]
Parameters
----------
currency: str
Quoted currency
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_list_of_exchanges(quotes=currency).sort_values(
by=sortby, ascending=descend
)
df_data = df.copy()
if df.empty:
console.print("No data found", "\n")
return
cols = [col for col in df.columns if col != "rank"]
df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))
console.print(f"\nDisplaying data vs {currency}")
print_rich_table(
df.head(top), headers=list(df.columns), show_index=False, title="List Exchanges"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"exchanges",
df_data,
)
@log_start_end(log=logger)
def display_exchange_markets(
exchange: str, sortby: str, descend: bool, top: int, links: bool, export: str
) -> None:
"""Get all markets for given exchange [Source: CoinPaprika]
Parameters
----------
exchange: str
Exchange identifier e.g Binance
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
links: bool
Flag to display urls
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_exchanges_market(exchange_id=exchange)
df_data = df.copy()
if df.empty:
console.print("No data found", "\n")
return
df = df.sort_values(by=sortby, ascending=descend)
if links is True:
df = df[["exchange_id", "pair", "trust_score", "market_url"]]
else:
df.drop("market_url", axis=1, inplace=True)
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Exchange Markets",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"exmarkets",
df_data,
)
@log_start_end(log=logger)
def display_all_platforms(export: str) -> None:
"""List all smart contract platforms like ethereum, solana, cosmos, polkadot, kusama. [Source: CoinPaprika]
Parameters
----------
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_all_contract_platforms()
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Smart Contract Platforms"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"platforms",
df,
)
@log_start_end(log=logger)
def display_contracts(
platform: str, sortby: str, descend: bool, top: int, export: str
) -> None:
"""Gets all contract addresses for given platform. [Source: CoinPaprika]
Parameters
----------
platform: str
Blockchain platform like eth-ethereum
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
df = paprika.get_contract_platform(platform)
if df.empty:
console.print(f"Nothing found for platform: {platform}", "\n")
return
df = df.sort_values(by=sortby, ascending=descend)
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Contract Addresses",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"contracts",
df,
)
| 23.729947
| 111
| 0.629296
|
822e9b586a1bab13972c7713ade7befedb9466b9
| 1,180
|
py
|
Python
|
python/product_annoter/mapper/votable_merger.py
|
loumir/modelinstanceinvot-code
|
246bd04b9eab558813ef09ad69a2a2f3ed7d2d92
|
[
"Unlicense"
] | null | null | null |
python/product_annoter/mapper/votable_merger.py
|
loumir/modelinstanceinvot-code
|
246bd04b9eab558813ef09ad69a2a2f3ed7d2d92
|
[
"Unlicense"
] | 2
|
2022-03-09T18:39:21.000Z
|
2022-03-11T16:27:45.000Z
|
python/product_annoter/mapper/votable_merger.py
|
loumir/modelinstanceinvot-code
|
246bd04b9eab558813ef09ad69a2a2f3ed7d2d92
|
[
"Unlicense"
] | 1
|
2021-07-09T12:30:57.000Z
|
2021-07-09T12:30:57.000Z
|
'''
Created on 16 avr. 2020
@author: laurentmichel
'''
from product_annoter.mapper import logger
class VOTableMerger(object):
'''
classdocs
'''
def __init__(self, raw_votable_path, mapping_block_path, output_path):
'''
Constructor
'''
self.raw_votable_path = raw_votable_path
self.mapping_block_path = mapping_block_path
self.output_path = output_path
def _get_mapping(self):
with open(self.mapping_block_path, 'r') as mapping:
return mapping.read()
def insert_mapping(self):
logger.info("save annotated VOTable in %s", self.output_path)
with open(self.output_path, 'w') as output_votable:
with open(self.raw_votable_path, 'r') as raw_votable:
prelude = False
for line in raw_votable:
output_votable.write(line)
if line.startswith("<VOTABLE") is True:
prelude = True
if prelude is True and ">" in line:
output_votable.write(self._get_mapping())
prelude = False
| 30.25641
| 74
| 0.570339
|
359e10528bbce247d42f091c6aa0f7830af3bf57
| 3,067
|
py
|
Python
|
evaluate.py
|
seanjh/DSRecommendationSystems
|
a08630b0113d35227f56c89e1d1b9394516f6ff6
|
[
"Apache-2.0"
] | 1
|
2017-04-13T20:15:43.000Z
|
2017-04-13T20:15:43.000Z
|
evaluate.py
|
seanjh/DSRecommendationSystems
|
a08630b0113d35227f56c89e1d1b9394516f6ff6
|
[
"Apache-2.0"
] | null | null | null |
evaluate.py
|
seanjh/DSRecommendationSystems
|
a08630b0113d35227f56c89e1d1b9394516f6ff6
|
[
"Apache-2.0"
] | null | null | null |
import os
import math
import config
import configspark
import ml_parse
from pyspark.mllib.recommendation import ALS
RANKS = [10, 20, 30, 40, 50]
LAMBDA_VALUES = [0.01, 0.1, 1.0, 10.0]
ITERATIONS = 10
sc = configspark.SPARK_CONTEXT
def report_mse_results(outfile, rank, lambda_value, mse, rmse):
print("\nRank=%2d, Lambda=%4.3f\n\tMSE=%0f, RMSE=%0f\n" % (
rank, lambda_value, mse, rmse))
outfile.write("%d,%f,%f,%f\n" % (rank, lambda_value, mse, rmse))
def evaluate_parameters(train, validation, ranks, iterations, lambda_values,
implicit):
print("\n")
if implicit:
print("Training with implicit feedback")
trainFunc = ALS.trainImplicit
else:
print("Training with explicit feedback")
trainFunc = ALS.train
print("\n")
for rank in ranks:
for lambda_val in lambda_values:
model = trainFunc(train, rank, iterations, lambda_val,
nonnegative=True)
mse, rmse = evaluate_model(model, validation)
yield {
"rank": rank,
"lambda": lambda_val,
"mse": mse,
"rmse": rmse,
"model": model
}
# Evaluate the model on test data
def evaluate_model(model, validation):
users_products = validation.map(lambda row: ml_parse.user_product(row))
users_products_ratings = validation.map(ml_parse.user_product_rating)
predictions = (
model
.predictAll(users_products)
.map(ml_parse.user_product_rating))
# RDD of [((user, movie), (real_rating, predicted_rating)), ...]
ratesAndPreds = users_products_ratings.join(predictions).values()
print("\nRatings and predictions (sample 10):\n%s" % ratesAndPreds.take(10))
mse = ratesAndPreds.map(lambda result: (result[0] - result[1]) ** 2).mean()
return mse, math.sqrt(mse)
def evaluate(train, validation, results_filename, implicit=False):
min_rmse = None
best_result = None
best_model = None
with open(results_filename, "w") as outfile:
# CSV header
outfile.write("%s\n" % ",".join(["rank", "lambda", "mse", "rmse"]))
for result in evaluate_parameters(train, validation, RANKS,
ITERATIONS, LAMBDA_VALUES, implicit):
report_mse_results(
outfile,
result.get("rank"),
result.get("lambda"),
result.get("mse"),
result.get("rmse")
)
if best_result is None or result.get("rmse") < min_rmse:
best_result = result
min_rmse = result.get("rmse")
return best_result
def load_best_params(filename):
if not os.path.exists(filename):
raise RuntimeError("Cannot locate best ALS parameters file %s"
% filename)
with open(filename) as infile:
lines = [line for line in infile]
parts = lines[1].strip().split(",")
return parts[0], parts[1]
| 30.67
| 80
| 0.59374
|
f7684a51897ca7b66db658b118e9ab886946c37f
| 3,165
|
py
|
Python
|
src/olympia/files/tests/test_decorators.py
|
leplatrem/addons-server
|
8b5ebda6f33194aa9fce12c0453574e7f850e6ad
|
[
"BSD-3-Clause"
] | 1
|
2020-04-07T07:21:25.000Z
|
2020-04-07T07:21:25.000Z
|
src/olympia/files/tests/test_decorators.py
|
leplatrem/addons-server
|
8b5ebda6f33194aa9fce12c0453574e7f850e6ad
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/files/tests/test_decorators.py
|
leplatrem/addons-server
|
8b5ebda6f33194aa9fce12c0453574e7f850e6ad
|
[
"BSD-3-Clause"
] | 2
|
2018-03-04T00:11:22.000Z
|
2019-12-14T09:45:55.000Z
|
from django import http
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
import pytest
from mock import Mock, patch
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.access import acl
from olympia.files.decorators import allowed
class AllowedTest(TestCase):
def setUp(self):
super(AllowedTest, self).setUp()
self.request = Mock()
self.file = Mock()
@patch.object(acl, 'check_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_addon_ownership', lambda *args, **kwargs: True)
def test_owner_allowed(self):
assert allowed(self.request, self.file)
@patch.object(acl, 'check_addons_reviewer', lambda x: True)
@patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
def test_reviewer_allowed(self):
assert allowed(self.request, self.file)
@patch.object(acl, 'check_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_addon_ownership', lambda *args, **kwargs: False)
def test_viewer_unallowed(self):
self.assertRaises(PermissionDenied, allowed, self.request, self.file)
def test_addon_not_found(self):
class MockVersion:
@property
def addon(self):
raise ObjectDoesNotExist
self.file.version = MockVersion()
self.assertRaises(http.Http404, allowed, self.request, self.file)
def get_unlisted_addon_file(self):
addon = amo.tests.addon_factory(is_listed=False)
return addon, addon.versions.get().files.get()
@patch.object(acl, 'check_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_addon_ownership', lambda *args, **kwargs: False)
def test_unlisted_viewer_unallowed(self):
addon, file_ = self.get_unlisted_addon_file()
with pytest.raises(http.Http404):
allowed(self.request, file_)
@patch.object(acl, 'check_addons_reviewer', lambda x: True)
@patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_addon_ownership', lambda *args, **kwargs: False)
def test_unlisted_reviewer_unallowed(self):
addon, file_ = self.get_unlisted_addon_file()
with pytest.raises(http.Http404):
allowed(self.request, file_)
@patch.object(acl, 'check_addons_reviewer', lambda x: True)
@patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: True)
def test_unlisted_admin_reviewer_allowed(self):
addon, file_ = self.get_unlisted_addon_file()
assert allowed(self.request, file_)
@patch.object(acl, 'check_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_unlisted_addons_reviewer', lambda x: False)
@patch.object(acl, 'check_addon_ownership', lambda *args, **kwargs: True)
def test_unlisted_owner_allowed(self):
addon, file_ = self.get_unlisted_addon_file()
assert allowed(self.request, file_)
| 41.103896
| 78
| 0.711532
|
ec6177b844824b535c33d169371e0ffac3b07db2
| 570
|
py
|
Python
|
olist_ecommerce/config.py
|
mara/mara-olist-ecommerce-data
|
663660edff1b4cd711a172027081915771628b9f
|
[
"MIT"
] | 1
|
2021-03-12T04:47:05.000Z
|
2021-03-12T04:47:05.000Z
|
olist_ecommerce/config.py
|
mara/mara-olist-ecommerce-data
|
663660edff1b4cd711a172027081915771628b9f
|
[
"MIT"
] | null | null | null |
olist_ecommerce/config.py
|
mara/mara-olist-ecommerce-data
|
663660edff1b4cd711a172027081915771628b9f
|
[
"MIT"
] | null | null | null |
"""Configuration of loading olist e-commerce dataset to Postgres"""
import pathlib
def data_dir() -> str:
"""The directory where the csv files are located"""
return pathlib.Path(__file__).parent.parent/'data'
def db_name() -> str:
"""Database name"""
return 'olist_ecommerce'
def db_user() -> str:
"""Database user"""
return 'root'
def db_password() -> str:
"""Database password"""
return ''
def db_host() -> str:
"""Database host"""
return 'localhost'
def db_port() -> str:
"""Database port"""
return '5432'
| 16.764706
| 67
| 0.621053
|
760f4e45c0081323f7c0be0a88b943045eed3a42
| 2,682
|
py
|
Python
|
experiments/adversarial_training/cluster_cifar10.py
|
srakrnxKU/adversarial-project
|
b75b45d4e24461b804db5ab2b4b28217ed09908f
|
[
"MIT"
] | 3
|
2020-08-07T12:13:14.000Z
|
2020-12-11T02:26:17.000Z
|
experiments/adversarial_training/cluster_cifar10.py
|
srakrnxKU/adversarial-project
|
b75b45d4e24461b804db5ab2b4b28217ed09908f
|
[
"MIT"
] | 5
|
2021-04-30T21:07:02.000Z
|
2022-01-25T11:50:36.000Z
|
experiments/adversarial_training/cluster_cifar10.py
|
srakrnxKU/adversarial-project
|
b75b45d4e24461b804db5ab2b4b28217ed09908f
|
[
"MIT"
] | null | null | null |
# %%
import logging
import os
import sys
import torch
from clustre.adversarial_training import cluster_training
from clustre.helpers.datasets import cifar10_testloader, cifar10_trainloader
from clustre.helpers.metrics import (
classification_report,
classification_report_fgsm,
classification_report_pgd,
)
from clustre.models import cifar10_cnn, cifar10_wide_resnet34_10
from clustre.models.state_dicts import (
cifar10_cnn_state,
cifar10_wide_resnet34_10_state,
)
from torch import nn, optim
# %%
DEVICE = "cuda:0"
LOG_FILENAME = os.path.abspath(__file__)[:-3] + "_log.txt"
SCRIPT_PATH = os.path.dirname(__file__)
FORMAT = "%(message)s"
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, format=FORMAT)
log = logging.getLogger()
# %%
models = {
# "CIFAR-10 CNN": [
# cifar10_cnn,
# cifar10_cnn_state,
# cifar10_trainloader,
# cifar10_testloader,
# ],
"CIFAR-10 Wide ResNet34-10": [
cifar10_wide_resnet34_10,
cifar10_wide_resnet34_10_state,
cifar10_trainloader,
cifar10_testloader,
],
}
global_param = {"n_init": 3, "n_epoches": 40}
# %%
for model_name, (model, state, trainloader, testloader) in models.items():
for cluster_with in ["original_data", "fgsm_perturb"]:
for n_clusters in [500, 1000, 3000, 5000, 10000]:
model.load_state_dict(state)
logging.info(f"Training {model_name}")
logging.info(
"n_cluster = {}, cluster_with = {}".format(
n_clusters, cluster_with
)
)
new_model = cluster_training(
model,
trainloader,
device=DEVICE,
log=log,
n_clusters=n_clusters,
cluster_with=cluster_with,
**global_param,
)
torch.save(
new_model.state_dict(),
os.path.join(
SCRIPT_PATH,
f"Cluster {model_name} {cluster_with} {n_clusters}.model",
),
)
logging.info(f"Unattacked {model_name}")
logging.info(
classification_report(new_model, testloader, device=DEVICE)
)
logging.info(f"FGSM attacked {model_name}")
logging.info(
classification_report_fgsm(
new_model, testloader, device=DEVICE
)
)
logging.info(f"PGD attacked {model_name}")
logging.info(
classification_report_pgd(new_model, testloader, device=DEVICE)
)
| 29.472527
| 79
| 0.592095
|
98f485b591a0b705a9d8146f0ce629e0eef3c925
| 926
|
py
|
Python
|
I_PROYECTO_DJANGO_ICIT/colegio/estudiantes/migrations/0002_apoderados.py
|
fernandopacco71/PROGRAMACION-WEB-II-ICIT-REPOSITORIO
|
edf53e534c7c3d561981813c5cf0af061ea5c601
|
[
"MIT"
] | 2
|
2021-09-03T00:02:38.000Z
|
2021-09-09T15:59:42.000Z
|
I_PROYECTO_DJANGO_ICIT/colegio/estudiantes/migrations/0002_apoderados.py
|
fernandopacco71/PROGRAMACION-WEB-II-ICIT-REPOSITORIO
|
edf53e534c7c3d561981813c5cf0af061ea5c601
|
[
"MIT"
] | null | null | null |
I_PROYECTO_DJANGO_ICIT/colegio/estudiantes/migrations/0002_apoderados.py
|
fernandopacco71/PROGRAMACION-WEB-II-ICIT-REPOSITORIO
|
edf53e534c7c3d561981813c5cf0af061ea5c601
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-25 15:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('estudiantes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Apoderados',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idApoderados', models.IntegerField(unique=True)),
('nombreApoderados', models.TextField(default='', max_length=60)),
('apellidosApoderados', models.TextField(default='', max_length=60)),
('dniApoderados', models.CharField(max_length=8, unique=True)),
('domicilioApoderados', models.TextField(default='', max_length=100)),
('sexoApoderados', models.CharField(max_length=1)),
],
),
]
| 35.615385
| 117
| 0.596112
|
46562259897ef343769e60cee48f6dbfe76c8f05
| 10,307
|
py
|
Python
|
register.py
|
chripell/yaaca
|
9048ca5dc458f9a7dde9ca745f057f7499b19972
|
[
"Apache-2.0"
] | 1
|
2020-09-23T19:56:17.000Z
|
2020-09-23T19:56:17.000Z
|
register.py
|
chripell/yaaca
|
9048ca5dc458f9a7dde9ca745f057f7499b19972
|
[
"Apache-2.0"
] | null | null | null |
register.py
|
chripell/yaaca
|
9048ca5dc458f9a7dde9ca745f057f7499b19972
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# register.py
# outputs: [x] [y] offset for every image
import sys
import os
import multiprocessing
sys.path.append(os.path.join(os.path.dirname(__file__), "astrolove"))
sys.path.append("/usr/lib/astrolove")
import numpy as np
import astrolib as AL
import scipy.signal
import scipy.ndimage.interpolation
import imreg
from optparse import OptionParser
parser = OptionParser(usage = "usage: %prog [opts] [files or @file_list ...]")
parser.add_option("--method", type = "int", default = 0, help = "0 FFT, 1 FFT Canny, 2 geometric, 3 imreg, 100 none")
parser.add_option("--filter", type = "int", default = 0, help = "0 none 1 median 2 wavelet only ROI: 101 median 102 wavelet")
parser.add_option("--filter-par", type = "int", default = 0, help = "parameter for filter")
parser.add_option("--zoom", type = "int", default = 0, help = "zoom if > 0")
parser.add_option("--dark", type = "string", default = "N", help = "dark frame to subtract")
parser.add_option("--imtype", type = "int", default = 0, help = AL.imtype_help)
parser.add_option("--flat", type = "string", default = "N", help = "npy file with flat to divide with")
parser.add_option("--crop-x", type = "int", default = 0, help = "crop x")
parser.add_option("--crop-y", type = "int", default = 0, help = "crop y")
parser.add_option("--crop-w", type = "int", default = -1, help = "crop w")
parser.add_option("--crop-h", type = "int", default = -1, help = "crop h")
parser.add_option("--roi-x", type = "int", default = -1, help = "roi x")
parser.add_option("--roi-y", type = "int", default = -1, help = "roi y")
parser.add_option("--roi-w", type = "int", default = -1, help = "roi w")
parser.add_option("--roi-h", type = "int", default = -1, help = "roi h")
parser.add_option("--out-dir", type = "string", default = "./", help = "output dir, default to current")
parser.add_option("--defect", type = "string", default = "", help = "defect list: x,y one per line")
parser.add_option("--defect-col", type = "string", default = "", help = "defect column list: one per line")
parser.add_option("--debayer-pattern", type = "int", default = 2, help = "0:none 1:bggr 2:grbg")
parser.add_option("--debayer-method", type = "int", default = 3, help = "0:median 1:mean 2:super_pixel 3:opencv")
parser.add_option("--save-npy", type = "int", default = 0, help = "if non zero save npy of image as well")
parser.add_option("--cores", type = "int", default = 1, help = "number of workers to use")
(options, args) = parser.parse_args()
debayer_method = options.debayer_method
debayer_pattern = options.debayer_pattern
method = options.method
filter = options.filter
filter_par = options.filter_par
zoom = options.zoom
dark = options.dark
im_mode = options.imtype
if options.flat != 'N' :
flat = np.load(options.flat)
else:
flat = None
crop_x = options.crop_x
crop_y = options.crop_y
crop_w = options.crop_w
crop_h = options.crop_h
roi_x = options.roi_x
roi_y = options.roi_y
roi_w = options.roi_w
roi_h = options.roi_h
dir = options.out_dir
save_npy = options.save_npy
cores = options.cores
def bin_ndarray(ndarray, new_shape, operation='sum'):
"""
Bins an ndarray in all axes based on the target shape, by summing or
averaging.
Number of output dimensions must match number of input dimensions.
Example
-------
>>> m = np.arange(0,100,1).reshape((10,10))
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
>>> print(n)
[[ 22 30 38 46 54]
[102 110 118 126 134]
[182 190 198 206 214]
[262 270 278 286 294]
[342 350 358 366 374]]
"""
if not operation.lower() in ['sum', 'mean', 'average', 'avg']:
raise ValueError("Operation {} not supported.".format(operation))
if ndarray.ndim != len(new_shape):
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
new_shape))
compression_pairs = [(d, c//d) for d, c in zip(new_shape,
ndarray.shape)]
flattened = [l for p in compression_pairs for l in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
if operation.lower() == "sum":
ndarray = ndarray.sum(-1*(i+1))
elif operation.lower() in ["mean", "average", "avg"]:
ndarray = ndarray.mean(-1*(i+1))
return ndarray
def prev_pow(x):
p = 1
while p < x:
p *= 2
return p // 2
def prepare_image(n):
global crop_w, crop_h, roi_w, roi_h, roi_x, roi_y
imSP = None
imRGB = AL.load_pic(n, im_mode)
if crop_w == -1 :
crop_w = imRGB[0].shape[0]
if crop_h == -1 :
crop_h = imRGB[0].shape[1]
if roi_w == -1 :
roi_w = prev_pow(crop_w // 2)
if roi_h == -1 :
roi_h = prev_pow(crop_h // 2)
if roi_x == -1 :
roi_x = (imRGB[0].shape[0] - roi_w) // 2
if roi_y == -1 :
roi_y = (imRGB[0].shape[1] - roi_h) // 2
imRGB = [x.astype(AL.myfloat) for x in imRGB]
if dark != "N":
imRGB = [x - y for x,y in zip(imRGB, darkf)]
imRGB = [x.clip(0, 65535) for x in imRGB]
for defect in defects:
x = defect[0]
y = defect[1]
if x > 0 and x < (crop_w - 1) and y > 0 and y < (crop_h - 1):
for im in imRGB:
im[x,y] = (im[x-1,y] + im[x+1,y] + im[x,y-1] + im[x,y+1]) / 4
for x in defect_cols:
if x > 0 and x < (crop_w - 1) :
for im in imRGB:
im[x,:] = (im[x-1,:] + im[x+1,:]) / 2.0
imRAW = imRGB[0].astype(np.uint16)
if im_mode == 7 or im_mode == 8 or im_mode == 16:
imRGB = AL.demosaic(imRAW, debayer_pattern, debayer_method)
imRGB = [x.astype(AL.myfloat) for x in imRGB]
if not flat is None :
if imRGB[0].shape != flat.shape:
fl = bin_ndarray(flat, imRGB[0].shape, operation='mean')
imRGB = [x / fl for x in imRGB]
else:
imRGB = [x / flat for x in imRGB]
imRGB = [x[crop_x : (crop_x + crop_w), crop_y : (crop_y + crop_h)] for x in imRGB]
if im_mode == 16:
imSP1 = AL.demosaic(imRAW, debayer_pattern, AL.DEBAYER_SUPER_PIXEL)
imSP = [np.repeat(np.repeat(x, 2, axis=0), 2, axis=1) for x in imSP1]
imSP = [x.astype(AL.myfloat) for x in imSP]
if not flat is None :
if imSP[0].shape != flat.shape:
fl = bin_ndarray(flat, imSP[0].shape, operation='mean')
imSP = [x / fl for x in imSP]
else:
imSP = [x / flat for x in imSP]
imSP = [x[crop_x : (crop_x + crop_w), crop_y : (crop_y + crop_h)] for x in imSP]
if filter == 1 :
imRGB = [scipy.signal.medfilt2d(x, kernel_size = int(filter_par)) for x in imRGB]
elif filter == 2 :
imRGB = [AL.waveletDenoise(x, filter_par) for x in imRGB]
if zoom > 0 :
imRGB = [scipy.ndimage.interpolation.zoom(x, zoom) for x in imRGB]
if len(imRGB) == 1 :
imL = imRGB[0]
else:
imL = 0.299 * imRGB[0] + 0.587 * imRGB[1] + 0.114 * imRGB[2]
if zoom > 1 :
nim = imL[(roi_x - crop_x)*zoom : (roi_x - crop_x + roi_w)*zoom, (roi_y - crop_y)*zoom : (roi_y - crop_y + roi_h)*zoom]
else:
nim = imL[(roi_x - crop_x) : (roi_x - crop_x + roi_w), (roi_y - crop_y) : (roi_y - crop_y + roi_h)]
if filter == 101 :
nim = scipy.signal.medfilt2d(nim, kernel_size = int(filter_par))
elif filter == 102 :
nim = AL.waveletDenoise(nim, filter_par)
if debug :
AL.save_pic(dir + "roi_area_%s"%(os.path.basename(n)), 1, [nim])
if method == 1 :
nim = AL.canny(nim, sigma = 3)
return imRGB, nim, imL, imSP
def get_ref(nim):
if method == 0 or method == 1 :
ref = np.fft.fft2(nim)
elif method == 2:
yref,xref = AL.geometric_median(nim,threshold=0.8*np.max(nim))
ref = (yref,xref)
elif method == 3:
ref = nim
else:
ref = None
return ref
def save_image(idx, imout, mode, prefix = ""):
fname = dir + prefix + "registered_%05d"%(idx)
AL.save_pic(fname, mode, imout)
if save_npy != 0:
if len(imout) == 3:
nim = np.dstack(imout)
np.save(fname, nim)
else:
np.save(fname, imout[0])
def process_image(ii):
(idx, n, ref) = ii
(imRGB, nim, imL, imSP) = prepare_image(n)
angle = 0
success = 1
if method == 0 or method == 1 :
fft = np.fft.fft2(nim,s=ref.shape)
xshift,yshift = AL.registration_dft(ref, fft)
elif method == 2:
(yref,xref) = ref
my,mx = AL.geometric_median(nim,threshold=0.8*np.max(nim))
yshift,xshift = int(yref-my), int(xref-mx)
elif method == 3:
tran = imreg.translation(ref, nim)
xshift = int(round(tran['tvec'][0]))
yshift = int(round(tran['tvec'][1]))
angle = tran['angle']
success = tran['success']
else:
xshift = 0
yshift = 0
print(("%s: %d,%d %f %d" % (n, xshift, yshift, angle, success)))
imout = [np.roll(np.roll(x, xshift, axis=0), yshift, axis=1) for x in imRGB]
save_image(idx + 1, imout, im_mode)
if im_mode == 16:
imout = np.roll(np.roll(imL, xshift, axis=0), yshift, axis=1)
save_image(idx + 1, [imout], 1, "bw_")
imout = [np.roll(np.roll(x, xshift, axis=0), yshift, axis=1) for x in imSP]
save_image(idx + 1, imout, 0, "sp_")
if __name__ == "__main__":
defects = []
if options.defect != "" :
with open(options.defect) as fp:
for line in fp:
defects.append([int(x) for x in line.split(",")])
defect_cols = []
if options.defect_col != "" :
with open(options.defect_col) as fp:
for line in fp:
defect_cols.append(int(line))
if dark != "N" :
darkf = AL.load_pic(dark, im_mode)
ref = None
debug = False
all = AL.expand_args(args)
base, nim, baseL, baseSP = prepare_image(all[0])
ref = get_ref(nim)
save_image(0, base, im_mode)
if im_mode == 16:
save_image(0, [baseL], 1, "bw_")
save_image(0, baseSP, 0, "sp_")
todo = [(idx, im, ref) for idx, im in enumerate(all[1:])]
with multiprocessing.Pool(cores) as pool:
pool.map(process_image, todo)
| 38.602996
| 127
| 0.582905
|
43b9334c530cccb48ad6908bc13c1fbae678dd59
| 10,515
|
py
|
Python
|
train_ee.py
|
jaykay233/-
|
9e9d78cde55d9f1a7a0a431f6438b673b86ab510
|
[
"Apache-2.0"
] | 10
|
2020-08-01T16:47:32.000Z
|
2022-03-30T04:05:53.000Z
|
train_ee.py
|
jaykay233/-
|
9e9d78cde55d9f1a7a0a431f6438b673b86ab510
|
[
"Apache-2.0"
] | 2
|
2020-07-14T07:05:48.000Z
|
2021-11-03T13:20:15.000Z
|
train_ee.py
|
jaykay233/-
|
9e9d78cde55d9f1a7a0a431f6438b673b86ab510
|
[
"Apache-2.0"
] | 2
|
2021-03-02T07:29:15.000Z
|
2021-11-08T12:45:40.000Z
|
import pytorch_lightning as pl
import torch
from pytorch_lightning import Trainer
from torchcrf import CRF
from data_process import role2label, trigger2label,label2role,label2trigger
from data_process import train_loader, dev_loader, test_loader
import json
NUM_TAGS = len(role2label)
NUM_RELATIONS = len(trigger2label)
root_path = 'chinese_roberta/'
from transformers import BertModel
class DiceLoss(torch.nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) + target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
class MulticlassDiceLoss(torch.nn.Module):
"""
requires one hot encoded target. Applies DiceLoss on each class iteratively.
requires input.shape[0:1] and target.shape[0:1] to be (N, C) where N is
batch size and C is number of classes
"""
def __init__(self):
super(MulticlassDiceLoss, self).__init__()
def forward(self, input, target, weights=None):
C = target.shape[1]
# if weights is None:
# weights = torch.ones(C) #uniform weights for all classes
dice = DiceLoss()
totalLoss = 0
for i in range(C):
diceLoss = dice(input[:, i], target[:, i])
if weights is not None:
diceLoss *= weights[i]
totalLoss += diceLoss
return totalLoss
def viterbi_decode(emissions,transitions,length,num_tags=NUM_TAGS,k=5):
prev = torch.zeros(length,num_tags)
probs = torch.zeros(length,num_tags)
for i in range(length):
if i == 0:
for j in range(num_tags):
prev[0][j] = -1
probs[0][j] = torch.log(emissions[0][j])
else:
for j in range(num_tags):
res = []
for k in range(num_tags):
probs[i][j] = torch.log(probs[i-1][k]) + torch.log(transitions[k][j]) + torch.log(emissions[i][j])
res.append((k,probs[i][j]))
sorted(res,key=lambda x:x[1],reverse=True)
prev[i][j] = res[0][0]
seq_res = []
for j in range(NUM_TAGS):
last = length - 1
score = -1e10
res = []
while last!=-1:
if last == length-1:
score = probs[last][j]
prev = j
res.append(trigger2label[prev])
prev = prev[last][prev]
last -= 1
seq_res.append((score,res))
sorted(seq_res,key=lambda x:x[0])
if len(seq_res) < k:
return seq_res
else:
return seq_res[:k]
class JointModel(pl.LightningModule):
def __init__(self, root_path, num_layers, hidden_size, num_tags, num_relations, bidirectional=True, dropout=0.3,
soft_embedding_dim=1024):
super().__init__()
self.model = BertModel.from_pretrained(root_path)
self.lstm = torch.nn.LSTM(1024, hidden_size, batch_first=True, bidirectional=bidirectional)
self.num_layers = num_layers
self.hidden_size = hidden_size
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
self.num_tags = num_tags
self.num_relations = num_relations
self.embedding = torch.nn.Embedding(num_tags, embedding_dim=soft_embedding_dim)
## project_layer
self.dense = torch.nn.Linear(self.num_directions * self.hidden_size, self.num_tags)
### crf decode layer
self.crf = CRF(self.num_tags, batch_first=True)
self.subject_dense = torch.nn.Linear(soft_embedding_dim, num_relations)
self.object_dense = torch.nn.Linear(soft_embedding_dim, num_relations)
self.relation_dense = torch.nn.Linear(num_relations*num_relations,num_relations)
self.doc_dense = torch.nn.Sequential(torch.nn.Linear(1024, 256), torch.nn.ReLU(), torch.nn.Linear(256, 10),
torch.nn.Softmax(dim=-1))
def training_step(self, batch, batch_idx):
input, label, relation_matrix, classes, length = batch
maxlen = input.shape[-1]
emissions, predicted_relation_matrix, cls_ = self(input)
tags = label.long()
mask = self.sequence_mask(length,max_len=maxlen)
log_loss = -self.crf(emissions, tags, mask=mask)
rel_loss = MulticlassDiceLoss()(predicted_relation_matrix,relation_matrix)
classes = classes.squeeze(dim=1)
cls_loss = torch.nn.functional.binary_cross_entropy_with_logits(cls_,classes.float())
return {'loss': log_loss + rel_loss + cls_loss}
def validation_step(self,batch,batch_idx):
output = self.training_step(batch,batch_idx)
val_loss = output['loss']
return {'val_loss':val_loss}
def validation_epoch_end(self,outputs):
outputs = torch.stack([x['val_loss'] for x in outputs]).mean()
print("validation_loss: {}".format(outputs.data))
return {'val_loss':outputs.data}
def test_step(self, batch, batch_idx):
input, id, length,original_text = batch
maxlen = input.shape[-1]
mask = self.sequence_mask(length,max_len=maxlen)
emissions, predicted_relation_matrix, cls = self(input)
return {'emissions':emissions, 'predicted_relation_matrix':predicted_relation_matrix, 'cls':cls,'length':length,'id':id,'original_text':original_text}
def write_by_line(self,path, data, t_code="utf-8"):
with open(path,'w') as outfile:
outfile.write(data.encode(t_code)+'\n')
def test_step_end(self, outputs):
emissions = outputs['emissions']
predicted_relation_matrix = outputs['predicted_relation_matrix']
cls = outputs['cls']
length = outputs['length']
id = outputs['id']
original_text = outputs['original_text']
if emissions.shape == 4:
emissions = emissions.squeeze(dim=0)
decoded_sequence = viterbi_decode(emissions,self.crf.transitions,length=length,num_tags=NUM_TAGS)
predicted_relation_matrix = torch.round(predicted_relation_matrix)
events = {}
events['text'] = original_text
events['id'] = id
events['event_list'] = []
for i in range(length):
for j in range(length):
for k in range(NUM_RELATIONS):
if predicted_relation_matrix[i][j][k]>0.8:
event = {}
event_type = label2trigger[k]
event['event_type'] = event_type
event['arguments'] = []
for seq in decoded_sequence:
start_i = i
start_j = j
while start_i>=0:
if seq[start_i]=='O':
continue
elif seq[start_i].start_with('B-'):
role = seq[start_i][2:]
argument = original_text[start_i:i+1]
event['arguments'].append({'role':role,'argument':argument})
while start_j >=0:
if seq[start_j] == 'O':
continue
elif seq[start_j].start_with('B-'):
role = seq[start_j][2:]
argument = original_text[start_j:j+1]
event['arguments'].append({'role':role,'argument':argument})
events['event_list'].append(event)
data = json.dumps(events)
self.write_by_line('test1_pred.json',data)
return {}
def sequence_mask(self,sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.range(0, max_len - 1).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
# seq_range_expand = torch.tensor(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (sequence_length.unsqueeze(1)
.expand_as(seq_range_expand))
return seq_range_expand < seq_length_expand
def forward(self, inputs):
batch_size = inputs.shape[0]
max_len = inputs.shape[-1]
inputs = inputs.squeeze(dim=1)
words_embedding, seq_embedding = self.model(inputs)
## words_embedding: batch_size, seq_len,1024
output, (hn, cn) = self.lstm(words_embedding)
## output: batch_size seq_len, num_directions * hidden_size
## hn: batch, num_layers*num_directions, hidden_size
## cn: batch, num_layers*num_directions, hidden_size
## num_tag: 243, num_relations: 66
emissions = self.dense(output)
emissions = torch.nn.functional.softmax(emissions, dim=-1)
soft_label = torch.einsum('bsn,nf->bsnf', emissions, self.embedding.weight)
### batch seqlen embedding_dim
soft_label = soft_label.mean(dim=2)
### batch seqlen relation_num
subject_embedding = self.subject_dense(soft_label)
### batch seqlen relation_num
object_embedding = self.object_dense(soft_label)
predicted_relation_matrix = torch.einsum('bsr,bfh->bsfrh', subject_embedding, object_embedding)
predicted_relation_matrix = predicted_relation_matrix.reshape(batch_size,max_len,max_len,-1)
predicted_relation_matrix = self.relation_dense(predicted_relation_matrix)
cls_ = self.doc_dense(seq_embedding)
return emissions, predicted_relation_matrix, cls_
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.001)
def train_dataloader(self):
return train_loader
def val_dataloader(self):
return dev_loader
def test_loader(self):
return test_loader
model = JointModel(root_path=root_path,num_layers=2, hidden_size=1024, num_tags=NUM_TAGS, num_relations=NUM_RELATIONS)
trainer = Trainer()
trainer.fit(model)
| 38.097826
| 158
| 0.602853
|
8fae9e04abeed7a7b78776dc79f145cf806698e4
| 12,097
|
py
|
Python
|
art/estimators/generation/tensorflow.py
|
synergit/adversarial-robustness-toolbox
|
192c4beda1f66776f6ede94a11808eb7b4651f01
|
[
"MIT"
] | null | null | null |
art/estimators/generation/tensorflow.py
|
synergit/adversarial-robustness-toolbox
|
192c4beda1f66776f6ede94a11808eb7b4651f01
|
[
"MIT"
] | null | null | null |
art/estimators/generation/tensorflow.py
|
synergit/adversarial-robustness-toolbox
|
192c4beda1f66776f6ede94a11808eb7b4651f01
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the classifier `TensorFlowGenerator` for TensorFlow models.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Any, Dict, List, Optional, Union, Tuple, TYPE_CHECKING
import numpy as np
from art.estimators.generation.generator import GeneratorMixin
from art.estimators.tensorflow import TensorFlowEstimator, TensorFlowV2Estimator
if TYPE_CHECKING:
# pylint: disable=C0412
import tensorflow.compat.v1 as tf
from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE
from art.defences.preprocessor import Preprocessor
from art.defences.postprocessor import Postprocessor
logger = logging.getLogger(__name__)
class TensorFlowGenerator(GeneratorMixin, TensorFlowEstimator): # lgtm [py/missing-call-to-init]
"""
This class implements a DGM with the TensorFlow framework.
"""
estimator_params = TensorFlowEstimator.estimator_params + [
"input_ph",
"loss",
"sess",
"feed_dict",
]
def __init__(
self,
input_ph: "tf.Placeholder",
model: "tf.Tensor",
loss: Optional["tf.Tensor"] = None,
sess: Optional["tf.compat.v1.Session"] = None,
channels_first=False,
clip_values: Optional["CLIP_VALUES_TYPE"] = None,
preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None,
postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None,
preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0),
feed_dict: Optional[Dict[Any, Any]] = None,
):
"""
Initialization specific to TensorFlow generator implementations.
:param input_ph: The input placeholder.
:param model: TensorFlow model, neural network or other.
:param loss: The loss function for which to compute gradients. This parameter is necessary when training the
model and when computing gradients w.r.t. the loss function.
:param sess: Computation session.
:param channels_first: Set channels first or last.
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
maximum values allowed for features. If floats are provided, these will be used as the range
of all features. If arrays are provided, each value will be considered the bound for a
feature, thus the shape of clip values needs to match the total number of features.
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input
will then be divided by the second one.
:param feed_dict: A feed dictionary for the session run evaluating the classifier. This dictionary includes all
additionally required placeholders except the placeholders defined in this class.
"""
import tensorflow.compat.v1 as tf # lgtm [py/repeated-import]
super().__init__(
model=model,
clip_values=clip_values,
channels_first=channels_first,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
preprocessing=preprocessing,
)
self._input_ph = input_ph
self._encoding_length = self.input_ph.shape[1]
self._loss = loss
if self.loss is not None:
self._grad = tf.gradients(self.loss, self.input_ph)
if feed_dict is None:
self._feed_dict = {}
else:
self._feed_dict = feed_dict
# Assign session
if sess is None: # pragma: no cover
raise ValueError("A session cannot be None.")
# TODO do the same thing for all not None variables
self._sess = sess
@property
def input_shape(self) -> Tuple[int, ...]:
"""
Return the shape of one input sample.
:return: Shape of one input sample.
"""
return self._input_shape # type: ignore
@property
def input_ph(self) -> "tf.Placeholder":
"""
Return the input placeholder.
:return: The input placeholder.
"""
return self._input_ph # type: ignore
@property
def loss(self) -> "tf.Tensor":
"""
Return the loss function
:return: The loss function.
"""
return self._loss # type: ignore
@property
def feed_dict(self) -> Dict[Any, Any]:
"""
Return the feed dictionary for the session run evaluating the classifier.
:return: The feed dictionary for the session run evaluating the classifier.
"""
return self._feed_dict # type: ignore
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray:
"""
Perform projections over a batch of encodings.
:param x: Encodings.
:param batch_size: Batch size.
:return: Array of prediction projections of shape `(num_inputs, nb_classes)`.
"""
logging.info("Projecting new sample from z value")
feed_dict = {self.input_ph: x}
if self.feed_dict is not None:
feed_dict.update(self.feed_dict)
y = self._sess.run(self._model, feed_dict=feed_dict)
return y
def loss_gradient(self, x, y, training_mode: bool = False, **kwargs) -> np.ndarray: # pylint: disable=W0221
raise NotImplementedError
def fit(self, x, y, batch_size=128, nb_epochs=10, **kwargs):
"""
Do nothing.
"""
raise NotImplementedError
def get_activations(
self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False
) -> np.ndarray:
"""
Do nothing.
"""
raise NotImplementedError
def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
raise NotImplementedError
@property
def model(self) -> "tf.Tensor":
"""
Returns the generator tensor.
:return: The generator tensor.
"""
return self._model
@property
def encoding_length(self) -> int:
"""
Returns the length of the encoding size output.
:return: The length of the encoding size output.
"""
return self._encoding_length
class TensorFlowV2Generator(GeneratorMixin, TensorFlowV2Estimator): # lgtm [py/missing-call-to-init]
"""
This class implements a DGM with the TensorFlow framework.
"""
estimator_params = TensorFlowV2Estimator.estimator_params + [
"encoding_length",
]
def __init__(
self,
encoding_length: int,
model: "tf.Tensor",
channels_first: bool = False,
clip_values: Optional["CLIP_VALUES_TYPE"] = None,
preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None,
postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None,
preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0),
):
"""
Initialization specific to TensorFlow generator implementations.
:encoding_length: length of the input seed
:model: TensorFlow model, neural network or other.
:param channels_first: Set channels first or last.
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
maximum values allowed for features. If floats are provided, these will be used as the range
of all features. If arrays are provided, each value will be considered the bound for a
feature, thus the shape of clip values needs to match the total number of features.
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier.
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input
will then be divided by the second one.
"""
super().__init__(
model=model,
clip_values=clip_values,
channels_first=channels_first,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
preprocessing=preprocessing,
)
self._encoding_length = encoding_length
@property
def model(self) -> "tf.Tensor":
"""
:return: The generator tensor.
"""
return self._model
@property
def encoding_length(self) -> int:
"""
:return: The length of the encoding size output.
"""
return self._encoding_length
@property
def input_shape(self) -> Tuple[int, ...]:
raise NotImplementedError
def predict( # pylint: disable=W0221
self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs
) -> np.ndarray:
"""
Perform projections over a batch of encodings.
:param x: Encodings.
:param batch_size: Batch size.
:param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.
:return: Array of prediction projections of shape `(num_inputs, nb_classes)`.
"""
# Run prediction with batch processing
results_list = []
num_batch = int(np.ceil(len(x) / float(batch_size)))
for m in range(num_batch):
# Batch indexes
begin, end = (
m * batch_size,
min((m + 1) * batch_size, x.shape[0]),
)
# Run prediction
results_list.append(self._model(x[begin:end], training=training_mode).numpy())
results = np.vstack(results_list)
return results
def loss_gradient(self, x, y, **kwargs) -> np.ndarray:
raise NotImplementedError
def fit(self, x, y, batch_size=128, nb_epochs=10, **kwargs):
"""
Do nothing.
"""
raise NotImplementedError
def get_activations(
self, x: np.ndarray, layer: Union[int, str], batch_size: int, framework: bool = False
) -> np.ndarray:
"""
Do nothing.
"""
raise NotImplementedError
| 39.53268
| 120
| 0.642473
|
dc95ba9b445ea680f0e358c5a7d6a16c3eb525e8
| 10,248
|
py
|
Python
|
contrib/spendfrom/spendfrom.py
|
hss5747/HashShare-Core
|
d448f7cad7f6a3947154017bdfe432cdc772d2b9
|
[
"MIT"
] | 2
|
2019-11-13T09:21:40.000Z
|
2019-12-09T00:49:25.000Z
|
contrib/spendfrom/spendfrom.py
|
hss5747/HashShare-Core
|
d448f7cad7f6a3947154017bdfe432cdc772d2b9
|
[
"MIT"
] | null | null | null |
contrib/spendfrom/spendfrom.py
|
hss5747/HashShare-Core
|
d448f7cad7f6a3947154017bdfe432cdc772d2b9
|
[
"MIT"
] | 1
|
2019-10-31T00:51:52.000Z
|
2019-10-31T00:51:52.000Z
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend HSSs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a hashsharecoind or hashsharecoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the hashsharecoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/HashShareCoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "HashShareCoin")
return os.path.expanduser("~/.hashsharecoin")
def read_bitcoin_config(dbdir):
"""Read the hashsharecoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "hashsharecoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a hashsharecoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 51475 if testnet else 15113
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the hashsharecoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(hashsharecoind):
info = hashsharecoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
hashsharecoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = hashsharecoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(hashsharecoind):
address_summary = dict()
address_to_account = dict()
for info in hashsharecoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = hashsharecoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = hashsharecoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-hashsharecoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(hashsharecoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(hashsharecoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to hashsharecoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = hashsharecoind.createrawtransaction(inputs, outputs)
signed_rawtx = hashsharecoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(hashsharecoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = hashsharecoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(hashsharecoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = hashsharecoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(hashsharecoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get HSSs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send HSSs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of hashsharecoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
hashsharecoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(hashsharecoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(hashsharecoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(hashsharecoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(hashsharecoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = hashsharecoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 38.238806
| 112
| 0.640222
|
12ccffc815de32595d9539fec0cced5913f799e1
| 2,588
|
py
|
Python
|
tutorials/01-basics/logistic_regression/main.py
|
NegatioN/pytorch-tutorial
|
1ec90fad7260e7871a4912fce552cad90f6c2f4a
|
[
"MIT"
] | 12
|
2018-03-07T00:44:56.000Z
|
2019-01-25T11:07:43.000Z
|
tutorials/01-basics/logistic_regression/main.py
|
NegatioN/pytorch-tutorial
|
1ec90fad7260e7871a4912fce552cad90f6c2f4a
|
[
"MIT"
] | 3
|
2018-03-02T03:38:41.000Z
|
2018-03-20T00:45:06.000Z
|
tutorials/01-basics/logistic_regression/main.py
|
NegatioN/pytorch-tutorial
|
1ec90fad7260e7871a4912fce552cad90f6c2f4a
|
[
"MIT"
] | 16
|
2017-10-04T19:38:09.000Z
|
2021-10-04T14:26:14.000Z
|
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
# Hyper Parameters
input_size = 784
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001
# MNIST Dataset (Images and Labels)
train_dataset = dsets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='./data',
train=False,
transform=transforms.ToTensor())
# Dataset Loader (Input Pipline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Model
class LogisticRegression(nn.Module):
def __init__(self, input_size, num_classes):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
model = LogisticRegression(input_size, num_classes)
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Training the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images.view(-1, 28*28))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
% (epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images.view(-1, 28*28))
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
# Save the Model
torch.save(model.state_dict(), 'model.pkl')
| 31.560976
| 94
| 0.601236
|
ad506c384debff1a78fb376464e22afdaa4f199d
| 166
|
py
|
Python
|
actors/actions/damage_action.py
|
Catsuko/Westward
|
3c04df668f7e04ca45e622017ffa9dfe6d3c242c
|
[
"MIT"
] | 3
|
2019-12-22T22:44:43.000Z
|
2020-02-11T11:14:10.000Z
|
actors/actions/damage_action.py
|
Catsuko/Westward
|
3c04df668f7e04ca45e622017ffa9dfe6d3c242c
|
[
"MIT"
] | null | null | null |
actors/actions/damage_action.py
|
Catsuko/Westward
|
3c04df668f7e04ca45e622017ffa9dfe6d3c242c
|
[
"MIT"
] | null | null | null |
from actors.actions.action import Action
class DamageAction(Action):
def on(self, actor, tile, root):
return actor.attempt("damage", root, tile), self
| 20.75
| 56
| 0.698795
|
40f93829d398d4a48129be0b51cfe9554cb5f143
| 2,671
|
py
|
Python
|
model/loss.py
|
happygirlzt/soft_alignment_model_bug_deduplication
|
9c529542749a52e377baeb99d1782920bc72df49
|
[
"Unlicense"
] | 2
|
2020-11-11T00:26:25.000Z
|
2020-12-21T16:17:28.000Z
|
model/loss.py
|
happygirlzt/soft_alignment_model_bug_deduplication
|
9c529542749a52e377baeb99d1782920bc72df49
|
[
"Unlicense"
] | 5
|
2020-12-22T10:59:38.000Z
|
2021-07-13T15:00:46.000Z
|
model/loss.py
|
irving-muller/soft_alignment_model_bug_deduplication
|
abf786a17f526d965f1b6c303b06f26662d22f32
|
[
"Unlicense"
] | 6
|
2020-09-25T01:01:37.000Z
|
2022-02-20T19:29:31.000Z
|
import logging
from torch.nn.modules.loss import _Loss
import torch
import torch.nn as nn
import torch.nn.functional as F
class NeculoiuLoss(_Loss):
"""
Learning Text Similarity with Siamese Recurrent Networks - Paul Neculoiu, Maarten Versteegh and Mihai Rotaru
Contrastive loss for cosine similarity
"""
def __init__(self, margin=0.0, size_average=None, reduce=None, reduction='mean'):
super(NeculoiuLoss, self).__init__(size_average, reduce, reduction)
self.margin = margin
def forward(self, similarity, y):
lossPos = y * 0.25 * (1 - similarity) ** 2
lossNegative = (1 - y) * torch.where(similarity < self.margin, torch.zeros(similarity.shape), similarity) ** 2
loss = lossPos + lossNegative
if self.reduction == 'none':
return loss
elif self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
class CosineLoss(_Loss):
def __init__(self, margin=0.0, size_average=None, reduce=None, reduction='mean'):
super(CosineLoss, self).__init__(size_average, reduce, reduction)
self.margin = margin
def forward(self, similarity, y):
loss = y * (1 - similarity) + (1 - y) * F.relu(similarity - self.margin)
if self.reduction == 'none':
return loss
elif self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
class CosineLoss(_Loss):
def __init__(self, margin=0.0, size_average=None, reduce=None, reduction='mean'):
super(CosineLoss, self).__init__(size_average, reduce, reduction)
self.margin = margin
def forward(self, similarity, y):
loss = y * (1 - similarity) + (1 - y) * F.relu(similarity - self.margin)
if self.reduction == 'none':
return loss
elif self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
class TripletLoss(_Loss):
def __init__(self, margin=1, size_average=None, reduce=None, reduction='mean'):
super(TripletLoss, self).__init__(size_average, reduce, reduction)
self.margin = margin
self.logger = logging.getLogger(__name__)
def forward(self, output, target):
simAnchorPos, simAnchorNeg = output
rs = self.margin - simAnchorPos + simAnchorNeg
loss = F.relu(rs)
if self.reduction == 'none':
return loss
elif self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
| 31.05814
| 118
| 0.618121
|
7cb5ed77a1b707b0a1e1c138808c0b5df9d68291
| 458
|
py
|
Python
|
numba/tests/test_typenames.py
|
tolysz/numba
|
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
numba/tests/test_typenames.py
|
tolysz/numba
|
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 1
|
2019-02-11T13:46:30.000Z
|
2019-02-11T13:46:30.000Z
|
numba/tests/test_typenames.py
|
asodeur/numba
|
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from numba import types
from numba import unittest_support as unittest
class TestTypeNames(unittest.TestCase):
def test_numpy_integers(self):
expect = getattr(types, "int%d" % (np.dtype("int").itemsize * 8))
self.assertEqual(types.int_, expect)
expect = getattr(types, "uint%d" % (np.dtype("uint").itemsize * 8))
self.assertEqual(types.uint, expect)
if __name__ == '__main__':
unittest.main()
| 25.444444
| 75
| 0.676856
|
e0fc1b9fecabf3876f2450f7fd47bcdbe8e92635
| 6,503
|
py
|
Python
|
lib/googlecloudsdk/command_lib/iot/resource_args.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/iot/resource_args.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/iot/resource_args.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared resource flags for Cloud IoT commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.command_lib.util.concepts import presentation_specs
def DeviceAttributeConfig(name='device'):
return concepts.ResourceParameterAttributeConfig(
name=name,
help_text='The device of the {resource}.',
completion_request_params={'fieldMask': 'name'},
completion_id_field='id')
def RegistryAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='registry',
help_text='The device registry for the {resource}.')
def RegionAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='region',
help_text='The Cloud region for the {resource}.')
def GetDeviceResourceSpec(resource_name='device'):
return concepts.ResourceSpec(
'cloudiot.projects.locations.registries.devices',
resource_name=resource_name,
devicesId=DeviceAttributeConfig(name=resource_name),
registriesId=RegistryAttributeConfig(),
locationsId=RegionAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
disable_auto_completers=False)
def GetRegistryResourceSpec():
return concepts.ResourceSpec(
'cloudiot.projects.locations.registries',
resource_name='registry',
registriesId=RegistryAttributeConfig(),
locationsId=RegionAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
disable_auto_completers=False)
def GetRegionResourceSpec():
return concepts.ResourceSpec(
'cloudiot.projects.locations',
resource_name='region',
locationsId=RegionAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG)
def AddDeviceResourceArg(parser, verb, positional=True):
"""Add a resource argument for a cloud IOT device.
NOTE: Must be used only if it's the only resource arg in the command.
Args:
parser: the parser for the command.
verb: str, the verb to describe the resource, such as 'to update'.
positional: bool, if True, means that the device ID is a positional rather
than a flag.
"""
if positional:
name = 'device'
else:
name = '--device'
concept_parsers.ConceptParser.ForResource(
name,
GetDeviceResourceSpec(),
'The device {}.'.format(verb),
required=True).AddToParser(parser)
def AddRegistryResourceArg(parser, verb, positional=True):
"""Add a resource argument for a cloud IOT device registry.
NOTE: Must be used only if it's the only resource arg in the command.
Args:
parser: the parser for the command.
verb: str, the verb to describe the resource, such as 'to update'.
positional: bool, if True, means that the device ID is a positional rather
than a flag.
"""
if positional:
name = 'registry'
else:
name = '--registry'
concept_parsers.ConceptParser.ForResource(
name,
GetRegistryResourceSpec(),
'The device registry {}.'.format(verb),
required=True).AddToParser(parser)
def AddRegionResourceArg(parser, verb):
"""Add a resource argument for a cloud IOT region.
NOTE: Must be used only if it's the only resource arg in the command.
Args:
parser: the parser for the command.
verb: str, the verb to describe the resource, such as 'to update'.
"""
concept_parsers.ConceptParser.ForResource(
'--region',
GetRegionResourceSpec(),
'The Cloud region {}.'.format(verb),
required=True).AddToParser(parser)
def CreateDevicePresentationSpec(verb, help_text='The device {}.',
name='device', required=False,
prefixes=True, positional=False):
"""Build ResourcePresentationSpec for generic device Resource.
NOTE: Should be used when there are multiple resources args in the command.
Args:
verb: string, the verb to describe the resource, such as 'to bind'.
help_text: string, the help text for the entire resource arg group. Should
have a format specifier (`{}`) to insert verb.
name: string, name of resource anchor argument.
required: bool, whether or not this resource arg is required.
prefixes: bool, if True the resource name will be used as a prefix for
the flags in the resource group.
positional: bool, if True, means that the device ID is a positional rather
than a flag.
Returns:
ResourcePresentationSpec, presentation spec for device.
"""
arg_name = name if positional else '--' + name
arg_help = help_text.format(verb)
return presentation_specs.ResourcePresentationSpec(
arg_name,
GetDeviceResourceSpec(name),
arg_help,
required=required,
prefixes=prefixes
)
def _GetBindResourceConcepts(verb='to bind to'):
"""Build ConceptParser for (un)bind commands resource args."""
arg_specs = [
CreateDevicePresentationSpec( # gateway spec
verb,
help_text='The gateway device {}.',
name='gateway',
required=True),
CreateDevicePresentationSpec( # device spec
verb,
help_text='The device {} the gateway.',
required=True),
]
fallthroughs = {
'--device.registry': ['--gateway.registry'],
'--gateway.registry': ['--device.registry']
}
return concept_parsers.ConceptParser(arg_specs, fallthroughs)
def AddBindResourceArgsToParser(parser):
"""Add resource args for gateways (un)bind commands to parser."""
_GetBindResourceConcepts().AddToParser(parser)
def BindAdditionalArgsHook():
return [_GetBindResourceConcepts()]
def UnBindAdditionalArgsHook():
return [_GetBindResourceConcepts('to unbind from')]
| 32.193069
| 78
| 0.714901
|
ea2c9b4aeb5c7d3e5cafc9ff3b448f6112901a24
| 21,074
|
py
|
Python
|
django_q/tasks.py
|
ashkov/django-q
|
49e57a6d548ab2bc69b3372bf9eed1617cb59b62
|
[
"MIT"
] | null | null | null |
django_q/tasks.py
|
ashkov/django-q
|
49e57a6d548ab2bc69b3372bf9eed1617cb59b62
|
[
"MIT"
] | null | null | null |
django_q/tasks.py
|
ashkov/django-q
|
49e57a6d548ab2bc69b3372bf9eed1617cb59b62
|
[
"MIT"
] | 2
|
2020-11-10T01:14:24.000Z
|
2021-06-11T12:50:19.000Z
|
"""Provides task functionality."""
# Standard
from time import sleep, time
# django
from django.db import IntegrityError
from django.utils import timezone
from multiprocessing import Value
from django_q.brokers import get_broker
# local
from django_q.conf import Conf, logger
from django_q.humanhash import uuid
from django_q.models import Schedule, Task
from django_q.queues import Queue
from django_q.signals import pre_enqueue
from django_q.signing import SignedPackage
def async_task(func, *args, **kwargs):
"""Queue a task for the cluster."""
keywords = kwargs.copy()
opt_keys = (
'hook', 'group', 'save', 'sync', 'cached', 'ack_failure', 'iter_count', 'iter_cached', 'chain', 'broker', 'timeout')
q_options = keywords.pop('q_options', {})
# get an id
tag = uuid()
# build the task package
task = {'id': tag[1],
'name': keywords.pop('task_name', None) or q_options.pop('task_name', None) or tag[0],
'func': func,
'args': args}
# push optionals
for key in opt_keys:
if q_options and key in q_options:
task[key] = q_options[key]
elif key in keywords:
task[key] = keywords.pop(key)
# don't serialize the broker
broker = task.pop('broker', get_broker())
# overrides
if 'cached' not in task and Conf.CACHED:
task['cached'] = Conf.CACHED
if 'sync' not in task and Conf.SYNC:
task['sync'] = Conf.SYNC
if 'ack_failure' not in task and Conf.ACK_FAILURES:
task['ack_failure'] = Conf.ACK_FAILURES
# finalize
task['kwargs'] = keywords
task['started'] = timezone.now()
# signal it
pre_enqueue.send(sender="django_q", task=task)
# sign it
pack = SignedPackage.dumps(task)
if task.get('sync', False):
return _sync(pack)
# push it
enqueue_id = broker.enqueue(pack)
logger.info('Enqueued {}'.format(enqueue_id))
logger.debug('Pushed {}'.format(tag))
return task['id']
def schedule(func, *args, **kwargs):
"""
Create a schedule.
:param func: function to schedule.
:param args: function arguments.
:param name: optional name for the schedule.
:param hook: optional result hook function.
:type schedule_type: Schedule.TYPE
:param repeats: how many times to repeat. 0=never, -1=always.
:param next_run: Next scheduled run.
:type next_run: datetime.datetime
:param kwargs: function keyword arguments.
:return: the schedule object.
:rtype: Schedule
"""
name = kwargs.pop('name', None)
hook = kwargs.pop('hook', None)
schedule_type = kwargs.pop('schedule_type', Schedule.ONCE)
minutes = kwargs.pop('minutes', None)
repeats = kwargs.pop('repeats', -1)
next_run = kwargs.pop('next_run', timezone.now())
# check for name duplicates instead of am unique constraint
if name and Schedule.objects.filter(name=name).exists():
raise IntegrityError("A schedule with the same name already exists.")
# create and return the schedule
return Schedule.objects.create(name=name,
func=func,
hook=hook,
args=args,
kwargs=kwargs,
schedule_type=schedule_type,
minutes=minutes,
repeats=repeats,
next_run=next_run
)
def result(task_id, wait=0, cached=Conf.CACHED):
"""
Return the result of the named task.
:type task_id: str or uuid
:param task_id: the task name or uuid
:type wait: int
:param wait: number of milliseconds to wait for a result
:param bool cached: run this against the cache backend
:return: the result object of this task
:rtype: object
"""
if cached:
return result_cached(task_id, wait)
start = time()
while True:
r = Task.get_result(task_id)
if r:
return r
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
def result_cached(task_id, wait=0, broker=None):
"""
Return the result from the cache backend
"""
if not broker:
broker = get_broker()
start = time()
while True:
r = broker.cache.get('{}:{}'.format(broker.list_key, task_id))
if r:
return SignedPackage.loads(r)['result']
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
def result_group(group_id, failures=False, wait=0, count=None, cached=Conf.CACHED):
"""
Return a list of results for a task group.
:param str group_id: the group id
:param bool failures: set to True to include failures
:param int count: Block until there are this many results in the group
:param bool cached: run this against the cache backend
:return: list or results
"""
if cached:
return result_group_cached(group_id, failures, wait, count)
start = time()
if count:
while True:
if count_group(group_id) == count or wait and (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
while True:
r = Task.get_result_group(group_id, failures)
if r:
return r
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
def result_group_cached(group_id, failures=False, wait=0, count=None, broker=None):
"""
Return a list of results for a task group from the cache backend
"""
if not broker:
broker = get_broker()
start = time()
if count:
while True:
if count_group_cached(group_id) == count or wait and (time() - start) * 1000 >= wait > 0:
break
sleep(0.01)
while True:
group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))
if group_list:
result_list = []
for task_key in group_list:
task = SignedPackage.loads(broker.cache.get(task_key))
if task['success'] or failures:
result_list.append(task['result'])
return result_list
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
def fetch(task_id, wait=0, cached=Conf.CACHED):
"""
Return the processed task.
:param task_id: the task name or uuid
:type task_id: str or uuid
:param wait: the number of milliseconds to wait for a result
:type wait: int
:param bool cached: run this against the cache backend
:return: the full task object
:rtype: Task
"""
if cached:
return fetch_cached(task_id, wait)
start = time()
while True:
t = Task.get_task(task_id)
if t:
return t
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
def fetch_cached(task_id, wait=0, broker=None):
"""
Return the processed task from the cache backend
"""
if not broker:
broker = get_broker()
start = time()
while True:
r = broker.cache.get('{}:{}'.format(broker.list_key, task_id))
if r:
task = SignedPackage.loads(r)
t = Task(id=task['id'],
name=task['name'],
func=task['func'],
hook=task.get('hook'),
args=task['args'],
kwargs=task['kwargs'],
started=task['started'],
stopped=task['stopped'],
result=task['result'],
success=task['success'])
return t
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
def fetch_group(group_id, failures=True, wait=0, count=None, cached=Conf.CACHED):
"""
Return a list of Tasks for a task group.
:param str group_id: the group id
:param bool failures: set to False to exclude failures
:param bool cached: run this against the cache backend
:return: list of Tasks
"""
if cached:
return fetch_group_cached(group_id, failures, wait, count)
start = time()
if count:
while True:
if count_group(group_id) == count or wait and (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
while True:
r = Task.get_task_group(group_id, failures)
if r:
return r
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
def fetch_group_cached(group_id, failures=True, wait=0, count=None, broker=None):
"""
Return a list of Tasks for a task group in the cache backend
"""
if not broker:
broker = get_broker()
start = time()
if count:
while True:
if count_group_cached(group_id) == count or wait and (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
while True:
group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))
if group_list:
task_list = []
for task_key in group_list:
task = SignedPackage.loads(broker.cache.get(task_key))
if task['success'] or failures:
t = Task(id=task['id'],
name=task['name'],
func=task['func'],
hook=task.get('hook'),
args=task['args'],
kwargs=task['kwargs'],
started=task['started'],
stopped=task['stopped'],
result=task['result'],
group=task.get('group'),
success=task['success'])
task_list.append(t)
return task_list
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
def count_group(group_id, failures=False, cached=Conf.CACHED):
"""
Count the results in a group.
:param str group_id: the group id
:param bool failures: Returns failure count if True
:param bool cached: run this against the cache backend
:return: the number of tasks/results in a group
:rtype: int
"""
if cached:
return count_group_cached(group_id, failures)
return Task.get_group_count(group_id, failures)
def count_group_cached(group_id, failures=False, broker=None):
"""
Count the results in a group in the cache backend
"""
if not broker:
broker = get_broker()
group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))
if group_list:
if not failures:
return len(group_list)
failure_count = 0
for task_key in group_list:
task = SignedPackage.loads(broker.cache.get(task_key))
if not task['success']:
failure_count += 1
return failure_count
def delete_group(group_id, tasks=False, cached=Conf.CACHED):
"""
Delete a group.
:param str group_id: the group id
:param bool tasks: If set to True this will also delete the group tasks.
Otherwise just the group label is removed.
:param bool cached: run this against the cache backend
:return:
"""
if cached:
return delete_group_cached(group_id)
return Task.delete_group(group_id, tasks)
def delete_group_cached(group_id, broker=None):
"""
Delete a group from the cache backend
"""
if not broker:
broker = get_broker()
group_key = '{}:{}:keys'.format(broker.list_key, group_id)
group_list = broker.cache.get(group_key)
broker.cache.delete_many(group_list)
broker.cache.delete(group_key)
def delete_cached(task_id, broker=None):
"""
Delete a task from the cache backend
"""
if not broker:
broker = get_broker()
return broker.cache.delete('{}:{}'.format(broker.list_key, task_id))
def queue_size(broker=None):
"""
Returns the current queue size.
Note that this doesn't count any tasks currently being processed by workers.
:param broker: optional broker
:return: current queue size
:rtype: int
"""
if not broker:
broker = get_broker()
return broker.queue_size()
def async_iter(func, args_iter, **kwargs):
"""
enqueues a function with iterable arguments
"""
iter_count = len(args_iter)
iter_group = uuid()[1]
# clean up the kwargs
options = kwargs.get('q_options', kwargs)
options.pop('hook', None)
options['broker'] = options.get('broker', get_broker())
options['group'] = iter_group
options['iter_count'] = iter_count
if options.get('cached', None):
options['iter_cached'] = options['cached']
options['cached'] = True
# save the original arguments
broker = options['broker']
broker.cache.set('{}:{}:args'.format(broker.list_key, iter_group), SignedPackage.dumps(args_iter))
for args in args_iter:
if not isinstance(args, tuple):
args = (args,)
async_task(func, *args, **options)
return iter_group
def async_chain(chain, group=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None):
"""
enqueues a chain of tasks
the chain must be in the format [(func,(args),{kwargs}),(func,(args),{kwargs})]
"""
if not group:
group = uuid()[1]
args = ()
kwargs = {}
task = chain.pop(0)
if type(task) is not tuple:
task = (task,)
if len(task) > 1:
args = task[1]
if len(task) > 2:
kwargs = task[2]
kwargs['chain'] = chain
kwargs['group'] = group
kwargs['cached'] = cached
kwargs['sync'] = sync
kwargs['broker'] = broker or get_broker()
async_task(task[0], *args, **kwargs)
return group
class Iter(object):
"""
An async task with iterable arguments
"""
def __init__(self, func=None, args=None, kwargs=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None):
self.func = func
self.args = args or []
self.kwargs = kwargs or {}
self.id = ''
self.broker = broker or get_broker()
self.cached = cached
self.sync = sync
self.started = False
def append(self, *args):
"""
add arguments to the set
"""
self.args.append(args)
if self.started:
self.started = False
return self.length()
def run(self):
"""
Start queueing the tasks to the worker cluster
:return: the task id
"""
self.kwargs['cached'] = self.cached
self.kwargs['sync'] = self.sync
self.kwargs['broker'] = self.broker
self.id = async_iter(self.func, self.args, **self.kwargs)
self.started = True
return self.id
def result(self, wait=0):
"""
return the full list of results.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results
"""
if self.started:
return result(self.id, wait=wait, cached=self.cached)
def fetch(self, wait=0):
"""
get the task result objects.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of task objects
"""
if self.started:
return fetch(self.id, wait=wait, cached=self.cached)
def length(self):
"""
get the length of the arguments list
:return int: length of the argument list
"""
return len(self.args)
class Chain(object):
"""
A sequential chain of tasks
"""
def __init__(self, chain=None, group=None, cached=Conf.CACHED, sync=Conf.SYNC):
self.chain = chain or []
self.group = group or ''
self.broker = get_broker()
self.cached = cached
self.sync = sync
self.started = False
def append(self, func, *args, **kwargs):
"""
add a task to the chain
takes the same parameters as async_task()
"""
self.chain.append((func, args, kwargs))
# remove existing results
if self.started:
delete_group(self.group)
self.started = False
return self.length()
def run(self):
"""
Start queueing the chain to the worker cluster
:return: the chain's group id
"""
self.group = async_chain(chain=self.chain[:], group=self.group, cached=self.cached, sync=self.sync,
broker=self.broker)
self.started = True
return self.group
def result(self, wait=0):
"""
return the full list of results from the chain when it finishes. blocks until timeout.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results
"""
if self.started:
return result_group(self.group, wait=wait, count=self.length(), cached=self.cached)
def fetch(self, failures=True, wait=0):
"""
get the task result objects from the chain when it finishes. blocks until timeout.
:param failures: include failed tasks
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of task objects
"""
if self.started:
return fetch_group(self.group, failures=failures, wait=wait, count=self.length(), cached=self.cached)
def current(self):
"""
get the index of the currently executing chain element
:return int: current chain index
"""
if not self.started:
return None
return count_group(self.group, cached=self.cached)
def length(self):
"""
get the length of the chain
:return int: length of the chain
"""
return len(self.chain)
class AsyncTask(object):
"""
an async task
"""
def __init__(self, func, *args, **kwargs):
self.id = ''
self.started = False
self.func = func
self.args = args
self.kwargs = kwargs
@property
def broker(self):
return self._get_option('broker', None)
@broker.setter
def broker(self, value):
self._set_option('broker', value)
@property
def sync(self):
return self._get_option('sync', None)
@sync.setter
def sync(self, value):
self._set_option('sync', value)
@property
def save(self):
return self._get_option('save', None)
@save.setter
def save(self, value):
self._set_option('save', value)
@property
def hook(self):
return self._get_option('hook', None)
@hook.setter
def hook(self, value):
self._set_option('hook', value)
@property
def group(self):
return self._get_option('group', None)
@group.setter
def group(self, value):
self._set_option('group', value)
@property
def cached(self):
return self._get_option('cached', Conf.CACHED)
@cached.setter
def cached(self, value):
self._set_option('cached', value)
def _set_option(self, key, value):
if 'q_options' in self.kwargs:
self.kwargs['q_options'][key] = value
else:
self.kwargs[key] = value
self.started = False
def _get_option(self, key, default=None):
if 'q_options' in self.kwargs:
return self.kwargs['q_options'].get(key, default)
else:
return self.kwargs.get(key, default)
def run(self):
self.id = async_task(self.func, *self.args, **self.kwargs)
self.started = True
return self.id
def result(self, wait=0):
if self.started:
return result(self.id, wait=wait, cached=self.cached)
def fetch(self, wait=0):
if self.started:
return fetch(self.id, wait=wait, cached=self.cached)
def result_group(self, failures=False, wait=0, count=None):
if self.started and self.group:
return result_group(self.group, failures=failures, wait=wait, count=count, cached=self.cached)
def fetch_group(self, failures=True, wait=0, count=None):
if self.started and self.group:
return fetch_group(self.group, failures=failures, wait=wait, count=count, cached=self.cached)
def _sync(pack):
"""Simulate a package travelling through the cluster."""
from django_q.cluster import worker, monitor
task_queue = Queue()
result_queue = Queue()
task = SignedPackage.loads(pack)
task_queue.put(task)
task_queue.put('STOP')
worker(task_queue, result_queue, Value('f', -1))
result_queue.put('STOP')
monitor(result_queue)
task_queue.close()
task_queue.join_thread()
result_queue.close()
result_queue.join_thread()
return task['id']
| 30.453757
| 120
| 0.583847
|
b8d0e8d35667e990fb2df7966b0da79a825c94a0
| 1,115
|
py
|
Python
|
thetools/users/tests/test_forms.py
|
sarthaksahni/pseudo-code-flow
|
516b23af817252729679e3ddcd2c2ebed5b74c6c
|
[
"MIT"
] | null | null | null |
thetools/users/tests/test_forms.py
|
sarthaksahni/pseudo-code-flow
|
516b23af817252729679e3ddcd2c2ebed5b74c6c
|
[
"MIT"
] | null | null | null |
thetools/users/tests/test_forms.py
|
sarthaksahni/pseudo-code-flow
|
516b23af817252729679e3ddcd2c2ebed5b74c6c
|
[
"MIT"
] | null | null | null |
import pytest
from thetools.users.forms import UserCreationForm
from thetools.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 27.195122
| 59
| 0.593722
|
867f6a85b8994bfcf2de66269d8ab6bbc4d8f56e
| 15,268
|
py
|
Python
|
qa/L0_shared_memory/shared_memory_test.py
|
jiweibo/triton_server
|
a0f7868eb0cad9d6a119edf845387ceae808d350
|
[
"BSD-3-Clause"
] | 2,159
|
2020-08-26T06:21:38.000Z
|
2022-03-31T16:13:46.000Z
|
qa/L0_shared_memory/shared_memory_test.py
|
jiweibo/triton_server
|
a0f7868eb0cad9d6a119edf845387ceae808d350
|
[
"BSD-3-Clause"
] | 1,482
|
2020-08-26T08:26:36.000Z
|
2022-03-31T23:11:19.000Z
|
qa/L0_shared_memory/shared_memory_test.py
|
jiweibo/triton_server
|
a0f7868eb0cad9d6a119edf845387ceae808d350
|
[
"BSD-3-Clause"
] | 592
|
2020-08-26T06:09:25.000Z
|
2022-03-31T00:37:41.000Z
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import numpy as np
import unittest
import os
import test_util as tu
import tritonclient.grpc as grpcclient
import tritonclient.http as httpclient
import tritonclient.utils.shared_memory as shm
from tritonclient import utils
class SharedMemoryTest(tu.TestResultCollector):
def test_invalid_create_shm(self):
# Raises error since tried to create invalid system shared memory region
try:
shm_op0_handle = shm.create_shared_memory_region(
"dummy_data", "/dummy_data", -1)
shm.destroy_shared_memory_region(shm_op0_handle)
except Exception as ex:
self.assertTrue(str(ex) == "unable to initialize the size")
def test_valid_create_set_register(self):
# Create a valid system shared memory region, fill data in it and register
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
shm_op0_handle = shm.create_shared_memory_region(
"dummy_data", "/dummy_data", 8)
shm.set_shared_memory_region(shm_op0_handle,
[np.array([1, 2], dtype=np.float32)])
triton_client.register_system_shared_memory("dummy_data", "/dummy_data",
8)
shm_status = triton_client.get_system_shared_memory_status()
if _protocol == "http":
self.assertTrue(len(shm_status) == 1)
else:
self.assertTrue(len(shm_status.regions) == 1)
shm.destroy_shared_memory_region(shm_op0_handle)
def test_unregister_before_register(self):
# Create a valid system shared memory region and unregister before register
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
shm_op0_handle = shm.create_shared_memory_region(
"dummy_data", "/dummy_data", 8)
triton_client.unregister_system_shared_memory("dummy_data")
shm_status = triton_client.get_system_shared_memory_status()
if _protocol == "http":
self.assertTrue(len(shm_status) == 0)
else:
self.assertTrue(len(shm_status.regions) == 0)
shm.destroy_shared_memory_region(shm_op0_handle)
def test_unregister_after_register(self):
# Create a valid system shared memory region and unregister after register
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
shm_op0_handle = shm.create_shared_memory_region(
"dummy_data", "/dummy_data", 8)
triton_client.register_system_shared_memory("dummy_data", "/dummy_data",
8)
triton_client.unregister_system_shared_memory("dummy_data")
shm_status = triton_client.get_system_shared_memory_status()
if _protocol == "http":
self.assertTrue(len(shm_status) == 0)
else:
self.assertTrue(len(shm_status.regions) == 0)
shm.destroy_shared_memory_region(shm_op0_handle)
def test_reregister_after_register(self):
# Create a valid system shared memory region and unregister after register
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
shm_op0_handle = shm.create_shared_memory_region(
"dummy_data", "/dummy_data", 8)
triton_client.register_system_shared_memory("dummy_data", "/dummy_data",
8)
try:
triton_client.register_system_shared_memory("dummy_data",
"/dummy_data", 8)
except Exception as ex:
self.assertTrue(
"shared memory region 'dummy_data' already in manager" in str(
ex))
shm_status = triton_client.get_system_shared_memory_status()
if _protocol == "http":
self.assertTrue(len(shm_status) == 1)
else:
self.assertTrue(len(shm_status.regions) == 1)
shm.destroy_shared_memory_region(shm_op0_handle)
def _configure_sever(self):
shm_ip0_handle = shm.create_shared_memory_region(
"input0_data", "/input0_data", 64)
shm_ip1_handle = shm.create_shared_memory_region(
"input1_data", "/input1_data", 64)
shm_op0_handle = shm.create_shared_memory_region(
"output0_data", "/output0_data", 64)
shm_op1_handle = shm.create_shared_memory_region(
"output1_data", "/output1_data", 64)
input0_data = np.arange(start=0, stop=16, dtype=np.int32)
input1_data = np.ones(shape=16, dtype=np.int32)
shm.set_shared_memory_region(shm_ip0_handle, [input0_data])
shm.set_shared_memory_region(shm_ip1_handle, [input1_data])
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
triton_client.register_system_shared_memory("input0_data",
"/input0_data", 64)
triton_client.register_system_shared_memory("input1_data",
"/input1_data", 64)
triton_client.register_system_shared_memory("output0_data",
"/output0_data", 64)
triton_client.register_system_shared_memory("output1_data",
"/output1_data", 64)
return [shm_ip0_handle, shm_ip1_handle, shm_op0_handle, shm_op1_handle]
def _cleanup_server(self, shm_handles):
for shm_handle in shm_handles:
shm.destroy_shared_memory_region(shm_handle)
def _basic_inference(self,
shm_ip0_handle,
shm_ip1_handle,
shm_op0_handle,
shm_op1_handle,
error_msg,
big_shm_name="",
big_shm_size=64):
input0_data = np.arange(start=0, stop=16, dtype=np.int32)
input1_data = np.ones(shape=16, dtype=np.int32)
inputs = []
outputs = []
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
inputs.append(httpclient.InferInput("INPUT0", [1, 16], "INT32"))
inputs.append(httpclient.InferInput("INPUT1", [1, 16], "INT32"))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=True))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
inputs.append(grpcclient.InferInput("INPUT0", [1, 16], "INT32"))
inputs.append(grpcclient.InferInput("INPUT1", [1, 16], "INT32"))
outputs.append(grpcclient.InferRequestedOutput('OUTPUT0'))
outputs.append(grpcclient.InferRequestedOutput('OUTPUT1'))
inputs[0].set_shared_memory("input0_data", 64)
if type(shm_ip1_handle) == np.array:
inputs[1].set_data_from_numpy(input0_data, binary_data=True)
elif big_shm_name != "":
inputs[1].set_shared_memory(big_shm_name, big_shm_size)
else:
inputs[1].set_shared_memory("input1_data", 64)
outputs[0].set_shared_memory("output0_data", 64)
outputs[1].set_shared_memory("output1_data", 64)
try:
results = triton_client.infer("simple",
inputs,
model_version="",
outputs=outputs)
output = results.get_output('OUTPUT0')
if _protocol == "http":
output_datatype = output['datatype']
output_shape = output['shape']
else:
output_datatype = output.datatype
output_shape = output.shape
output_dtype = utils.triton_to_np_dtype(output_datatype)
output_data = shm.get_contents_as_numpy(shm_op0_handle,
output_dtype, output_shape)
self.assertTrue(
(output_data[0] == (input0_data + input1_data)).all(),
"Model output does not match expected output")
except Exception as ex:
error_msg.append(str(ex))
def test_unregister_after_inference(self):
# Unregister after inference
error_msg = []
shm_handles = self._configure_sever()
self._basic_inference(shm_handles[0], shm_handles[1], shm_handles[2],
shm_handles[3], error_msg)
if len(error_msg) > 0:
raise Exception(str(error_msg))
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
triton_client.unregister_system_shared_memory("output0_data")
shm_status = triton_client.get_system_shared_memory_status()
if _protocol == "http":
self.assertTrue(len(shm_status) == 3)
else:
self.assertTrue(len(shm_status.regions) == 3)
self._cleanup_server(shm_handles)
def test_register_after_inference(self):
# Register after inference
error_msg = []
shm_handles = self._configure_sever()
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
self._basic_inference(shm_handles[0], shm_handles[1], shm_handles[2],
shm_handles[3], error_msg)
if len(error_msg) > 0:
raise Exception(str(error_msg))
shm_ip2_handle = shm.create_shared_memory_region(
"input2_data", "/input2_data", 64)
triton_client.register_system_shared_memory("input2_data",
"/input2_data", 64)
shm_status = triton_client.get_system_shared_memory_status()
if _protocol == "http":
self.assertTrue(len(shm_status) == 5)
else:
self.assertTrue(len(shm_status.regions) == 5)
shm_handles.append(shm_ip2_handle)
self._cleanup_server(shm_handles)
def test_too_big_shm(self):
# Shared memory input region larger than needed - Throws error
error_msg = []
shm_handles = self._configure_sever()
shm_ip2_handle = shm.create_shared_memory_region(
"input2_data", "/input2_data", 128)
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
triton_client.register_system_shared_memory("input2_data",
"/input2_data", 128)
self._basic_inference(shm_handles[0], shm_ip2_handle, shm_handles[2],
shm_handles[3], error_msg, "input2_data", 128)
if len(error_msg) > 0:
self.assertTrue(
"unexpected total byte size 128 for input 'INPUT1', expecting 64"
in error_msg[-1])
shm_handles.append(shm_ip2_handle)
self._cleanup_server(shm_handles)
def test_mixed_raw_shm(self):
# Mix of shared memory and RAW inputs
error_msg = []
shm_handles = self._configure_sever()
input1_data = np.ones(shape=16, dtype=np.int32)
self._basic_inference(shm_handles[0], [input1_data], shm_handles[2],
shm_handles[3], error_msg)
if len(error_msg) > 0:
raise Exception(error_msg[-1])
self._cleanup_server(shm_handles)
def test_unregisterall(self):
# Unregister all shared memory blocks
shm_handles = self._configure_sever()
if _protocol == "http":
triton_client = httpclient.InferenceServerClient(_url, verbose=True)
else:
triton_client = grpcclient.InferenceServerClient(_url, verbose=True)
status_before = triton_client.get_system_shared_memory_status()
if _protocol == "http":
self.assertTrue(len(status_before) == 4)
else:
self.assertTrue(len(status_before.regions) == 4)
triton_client.unregister_system_shared_memory()
status_after = triton_client.get_system_shared_memory_status()
if _protocol == "http":
self.assertTrue(len(status_after) == 0)
else:
self.assertTrue(len(status_after.regions) == 0)
self._cleanup_server(shm_handles)
if __name__ == '__main__':
_protocol = os.environ.get('CLIENT_TYPE', "http")
if _protocol == "http":
_url = "localhost:8000"
else:
_url = "localhost:8001"
unittest.main()
| 47.26935
| 83
| 0.629945
|
b840af50fd282f34ead02c3cb1ea91de3c4f1595
| 4,128
|
py
|
Python
|
openshift/test/test_v1_config_map.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/test/test_v1_config_map.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/test/test_v1_config_map.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v3.6.0-alpha.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_config_map import V1ConfigMap
class TestV1ConfigMap(unittest.TestCase):
""" V1ConfigMap unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ConfigMap(self):
"""
Test V1ConfigMap
"""
model = openshift.client.models.v1_config_map.V1ConfigMap()
if __name__ == '__main__':
unittest.main()
| 96
| 3,380
| 0.787064
|
b3e204b69561604583a039eadc9b607c6042296d
| 12,586
|
py
|
Python
|
pyscandl/modules/Pyscandl.py
|
Annwan/pyscandl
|
afec7b410699f1bb6289a8653fc7cfa5b0d03406
|
[
"BSD-3-Clause"
] | null | null | null |
pyscandl/modules/Pyscandl.py
|
Annwan/pyscandl
|
afec7b410699f1bb6289a8653fc7cfa5b0d03406
|
[
"BSD-3-Clause"
] | null | null | null |
pyscandl/modules/Pyscandl.py
|
Annwan/pyscandl
|
afec7b410699f1bb6289a8653fc7cfa5b0d03406
|
[
"BSD-3-Clause"
] | null | null | null |
import contextlib
from .excepts import DryNoSauceHere, TooManySauce, EmptyChapter, DelayedRelease
from .fetchers.fetcher import StandaloneFetcher
from PIL import Image
import requests
import os
from re import sub as re_sub
from sys import stderr
from wand.image import Image
class Pyscandl:
"""
The main object of the program. It is responsible of the downloads and controls the fetchers for you.
"""
def __init__(self, fetcher, chapstart=1, output:str=".", pdf:bool=True, keep:bool=False, image:bool=False, all:bool=False, link:str=None, manga:str=None, download_number:int=1, chapend=0, quiet:bool=False, skip:int=0, tiny:bool=False):
"""
Initialize this instance of the pyscandl downloader, it needs either manga or link to work.
:param fetcher: fetcher object related to the download
:param chapstart: first chapter to be downloaded
:type chapstart: int/float/str
:param output: output folder
:type output: str
:param pdf: tell if the result should be kept as a pdf
:type pdf: bool
:param keep: tell if the result should be kept as a pdf and as a collection of images
:type keep: bool
:param image: tell if the result should be kept as a collection of images
:type image: bool
:param all: download all the chapters that are available after chapstart
:type all: bool
:param link: link of the manga to download
:type link: str
:param manga: identification tag of the manga *(see every fetcher for their different variations)*
:type manga: str
:param download_number: number of chapters to download
:type download_number: int
:param chapend: chapter to end the download on, if non exstant the download will stop once the next to download chapter number is greater than it
:type chapend: int/float/str
:param quiet: should the program not output any information about what it is doing in the console
:type quiet: bool
:param skip: number of images to skip on the first chapter being downloaded *(useful if running in image mode)*
:type skip: int
:param tiny: should the name of every downloaded scan be minified and only include the chapter number and the chapter title
:type tiny: bool
:raises DryNoSauceHere: neither link or manga was specified
:raises TooManySauce: both link and manga were specified
"""
if link is not None and manga is None or link is None and manga is not None:
if issubclass(fetcher, StandaloneFetcher):
self.fetcher = fetcher(link=link, manga=manga)
else:
self.fetcher = fetcher(link=link, manga=manga, chapstart=chapstart)
elif link is None and manga is None:
raise DryNoSauceHere
else:
raise TooManySauce
# in case windows is the os, remove the banned characters
if os.name == "nt":
manga_name = re_sub(r'[\\/*?:"<>|]', u"█", self.fetcher.manga_name)
else:
manga_name = self.fetcher.manga_name
# creating output folder
self._output = (output[-1] == "/" and output or output + "/") + manga_name + "/"
if not os.path.exists(self._output):
os.makedirs(self._output)
self._header = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
"Set-Cookie": f"domain={self.fetcher.domain}"}
self._header.update(self.fetcher.headers)
self._nskip = skip
self._quiet = quiet
# select download mode
self._pdf = pdf
self._keep = keep
self._image = image
self._all = all
self._download_number = download_number
self._chapend = float(chapend) if "." in str(chapend) else int(chapend)
self._path = f"{self._output}ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}/" # save path for images
self._img_bin_list = []
self._tiny = tiny
# in case windows is the os, remove the banned characters
if os.name == "nt":
chapter_name = re_sub(r'[\\/*?:"<>|]', u"█", self.fetcher.chapter_name)
else:
chapter_name = self.fetcher.chapter_name
if self._tiny:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
else:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{manga_name} - {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - {self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}{manga_name} - ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
self._banlist = []
ban_path = f"{os.path.dirname(os.path.abspath(__file__))}/../banlist"
for img in os.listdir(ban_path):
with open(f"{ban_path}/{img}", "rb") as img_bin:
self._banlist.append(img_bin.read())
def _dl_image(self):
"""
Downloads the currently selected image.
"""
if not os.path.exists(self._path):
os.makedirs(self._path)
with open(f"{self._path}{self.fetcher.npage}.{self.fetcher.ext}", "wb") as img:
img.write(requests.get(self.fetcher.image, headers=self._header).content)
if not self._quiet:
print(".", end="", flush=True)
def full_chapter(self):
"""
Fetching all the images of the chapter and storing them in RAM.
"""
if not self._quiet:
if isinstance(self.fetcher, StandaloneFetcher):
print(f"fetching: {self.fetcher.chapter_name}")
else:
print(f"fetching: ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}")
while not self.fetcher.is_last_image():
self._img_bin_list.append(requests.get(self.fetcher.image, headers=self._header).content)
if not self._quiet:
print(".", end="", flush=True)
self.fetcher.next_image()
self._img_bin_list.append(requests.get(self.fetcher.image, headers=self._header).content)
if not self._quiet:
print(".", end="", flush=True)
def keep_full_chapter(self):
"""
Downloading all the images of the chapters and storing them where the output was specified.
"""
if not self._quiet:
if isinstance(self.fetcher, StandaloneFetcher):
print(f"downloading: {self.fetcher.chapter_name}")
else:
print(f"downloading: ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}")
while not self.fetcher.is_last_image():
if self._keep:
self._img_bin_list.append(requests.get(self.fetcher.image, headers=self._header).content)
self._dl_image()
self.fetcher.next_image()
if self._keep:
self._img_bin_list.append(requests.get(self.fetcher.image, headers=self._header).content)
self._dl_image()
if not self._quiet and self._image:
print("")
def _skip(self):
"""
Skips the images as asked with the skip parameter.
"""
for loop in range(self._nskip):
self.fetcher.next_image()
def create_pdf(self):
"""
Creates the pdf at the output location with the fetched or the downloaded images of the current chapter.
:raises EmptyChapter: the images of the current chapter were all blacklisted images and the pdf was empty
"""
print("Warning: the pdf creation engine will be changed for Wand in the next major release (3.0.0). Please do not forget to install imagemagick at that time", file=stderr)
if not self._quiet:
print("\nconverting...", end=" ")
# loading the downloaded images if keep mode
# removing the images found in the banlist
self._img_bin_list = [img for img in self._img_bin_list if img not in self._banlist]
if len(self._img_bin_list) > 0:
# creating the pdf
with Image() as pdf:
for img_bin in self._img_bin_list:
with contextlib.redirect_stderr(None): # to mute alpha channel and ICC warnings as wand processes the image well anyway
with Image(blob=img_bin) as img:
pdf.sequence.append(img)
pdf.save(filename=self._pdf_path)
with open(self._pdf_path, "rb") as file:
pdf = file.read()
with open(self._pdf_path, "wb") as file:
file.write(pdf.replace(b"/Producer (https://imagemagick.org)", b"/Producer (https://pypi.org/project/pyscandl/)")
.replace(
b"/CreationDate", b"/Author <feff"+self.fetcher.author.encode("utf-16_be").hex().encode()+
b">\n/Keywords <feff"+self.fetcher.manga_name.encode("utf-16_be").hex().encode()+b">\n/CreationDate")
) # manually adding the missing metadate from the pdf creation
if not self._quiet:
print("converted")
else:
raise EmptyChapter(self.fetcher.manga_name, self.fetcher.chapter_number)
def go_to_chapter(self, chap_num):
"""
Make Pyscandl go to the asked chapter.
:param chap_num: chapter number that was asked for
:type chap_num: int/str/float
"""
# in case windows is the os, remove the banned characters
if os.name == "nt":
chapter_name = re_sub(r'[\\/*?:"<>|]', u"█", self.fetcher.chapter_name)
else:
chapter_name = self.fetcher.chapter_name
self.fetcher.go_to_chapter(chap_num)
self._path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}/"
self._img_bin_list = []
# prepares the next pdf path and name
if self._tiny:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
else:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{self.fetcher.manga_name} - {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - {self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
def next_chapter(self):
"""
Goes to the next chapter
"""
self.fetcher.next_chapter()
# in case windows is the os, remove the banned characters
if os.name == "nt":
chapter_name = re_sub(r'[\\/*?:"<>|]', u"█", self.fetcher.chapter_name)
else:
chapter_name = self.fetcher.chapter_name
self._path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}/"
self._img_bin_list = []
# prepares the next pdf path and name
if self._tiny:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
else:
if isinstance(self.fetcher, StandaloneFetcher):
self._pdf_path = f"{self._output}{self.fetcher.manga_name} - {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - {self.fetcher.chapter_name}"
else:
self._pdf_path = f"{self._output}{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {chapter_name}.pdf"
self._name_metadata_pdf = f"{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
def full_download(self):
"""
Does the full download process with what is specified when initializing the Pyscandl object
"""
try:
# emulating a do while
self._skip()
counter = 1
try:
if self._keep or self._image:
self.keep_full_chapter()
else:
self.full_chapter()
if not self._image:
try:
self.create_pdf()
except EmptyChapter:
if not self._quiet:
print("empty")
except DelayedRelease as e:
if not self._quiet:
print(e)
while not isinstance(self.fetcher, StandaloneFetcher) and not self.fetcher.is_last_chapter() and (self._all or counter < self._download_number or float(self.fetcher.chapter_number) < self._chapend):
self.next_chapter()
try:
if self._keep or self._image:
self.keep_full_chapter()
else:
self.full_chapter()
if not self._image:
try:
self.create_pdf()
except EmptyChapter:
if not self._quiet:
print("empty")
except DelayedRelease as e:
if not self._quiet:
print(e)
counter += 1
except KeyboardInterrupt:
if not self._quiet:
print("\nmanual interruption")
finally:
self.fetcher.quit()
if not self._quiet:
print("end of the download")
| 37.570149
| 236
| 0.713094
|
5b5e84d4963f0fb3455c3bcb63fb12f2b6eb800d
| 4,135
|
py
|
Python
|
alipay/aop/api/request/AlipayEbppMerchantExternalbillUploadRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/AlipayEbppMerchantExternalbillUploadRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/AlipayEbppMerchantExternalbillUploadRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppMerchantExternalbillUploadRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._community_short_name = None
self._content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def community_short_name(self):
return self._community_short_name
@community_short_name.setter
def community_short_name(self, value):
self._community_short_name = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
if not isinstance(value, FileItem):
return
self._content = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ebpp.merchant.externalbill.upload'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.community_short_name:
if hasattr(self.community_short_name, 'to_alipay_dict'):
params['community_short_name'] = json.dumps(obj=self.community_short_name.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['community_short_name'] = self.community_short_name
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
if self.content:
multipart_params['content'] = self.content
return multipart_params
| 27.026144
| 166
| 0.640387
|
d65cb5c26438ae7017fb24a43d98d8c39ada4174
| 657
|
py
|
Python
|
pp/samples/13_component_yaml.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | 8
|
2020-08-25T11:25:18.000Z
|
2022-03-27T11:32:11.000Z
|
pp/samples/13_component_yaml.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | null | null | null |
pp/samples/13_component_yaml.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | 1
|
2022-03-04T07:03:29.000Z
|
2022-03-04T07:03:29.000Z
|
import pp
netlist = """
instances:
CP1:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
CP2:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
arm_top:
component: mzi_arm
arm_bot:
component: mzi_arm
placements:
arm_bot:
mirror: True
ports:
W0: CP1,W0
E0: CP2,W0
connections:
arm_bot,W0: CP1,E0
arm_top,W0: CP1,E1
CP2,E0: arm_bot,E0
CP2,E1: arm_top,E0
"""
def test_mzi():
return pp.component_from_yaml(netlist)
if __name__ == "__main__":
c = test_mzi()
pp.show(c)
pp.plotgds(c)
| 15.27907
| 42
| 0.561644
|
1ddc0a132b27f5d50057b083d7485063e98b031d
| 3,632
|
py
|
Python
|
configs/ttfnet/ttfnet_r18_dyndila_param10_scale1e4_1e4_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/ttfnet/ttfnet_r18_dyndila_param10_scale1e4_1e4_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
configs/ttfnet/ttfnet_r18_dyndila_param10_scale1e4_1e4_1x.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
local_conv_cfg=dict(type='DynDilaConv', param_multiply=10., alpha_scale=1e-4, split=8),
stage_with_local_conv_cfg=(False, False, True, True),
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
wh_conv=64,
hm_head_conv_num=2,
wh_head_conv_num=1,
num_classes=81,
wh_offset_base=16,
wh_agnostic=True,
wh_gaussian=True,
shortcut_cfg=(1, 2, 3),
norm_cfg=dict(type='BN'),
alpha=0.54,
hm_weight=1.,
wh_weight=5.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.,
arch_lr_mult=1e4, arch_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfnet18_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 31.310345
| 95
| 0.631883
|
fc0429962d81fe7351780331d2200adeaf0f515d
| 5,610
|
py
|
Python
|
examples/kalman/helpers/feature_handler.py
|
mfkiwl/laika-gnss
|
dc38f251dbc7ebb535a3c220de8424634d297248
|
[
"MIT"
] | 365
|
2018-12-17T07:43:34.000Z
|
2022-03-29T22:23:39.000Z
|
examples/kalman/helpers/feature_handler.py
|
mfkiwl/laika-gnss
|
dc38f251dbc7ebb535a3c220de8424634d297248
|
[
"MIT"
] | 36
|
2019-07-24T10:20:45.000Z
|
2022-02-14T22:11:24.000Z
|
examples/kalman/helpers/feature_handler.py
|
mfkiwl/laika-gnss
|
dc38f251dbc7ebb535a3c220de8424634d297248
|
[
"MIT"
] | 156
|
2018-12-17T05:06:23.000Z
|
2022-03-31T12:06:07.000Z
|
#!/usr/bin/env python3
import os
import numpy as np
import kalman.helpers.orientation as orient
from selfdrive.locationd.kalman.helpers import (TEMPLATE_DIR, load_code,
write_code)
from selfdrive.locationd.kalman.helpers.sympy_helpers import quat_matrix_l
def sane(track):
img_pos = track[1:, 2:4]
diffs_x = abs(img_pos[1:, 0] - img_pos[:-1, 0])
diffs_y = abs(img_pos[1:, 1] - img_pos[:-1, 1])
for i in range(1, len(diffs_x)):
if ((diffs_x[i] > 0.05 or diffs_x[i - 1] > 0.05) and
(diffs_x[i] > 2 * diffs_x[i - 1] or
diffs_x[i] < .5 * diffs_x[i - 1])) or \
((diffs_y[i] > 0.05 or diffs_y[i - 1] > 0.05) and
(diffs_y[i] > 2 * diffs_y[i - 1] or
diffs_y[i] < .5 * diffs_y[i - 1])):
return False
return True
class FeatureHandler():
name = 'feature_handler'
@staticmethod
def generate_code(K=5):
# Wrap c code for slow matching
c_header = "\nvoid merge_features(double *tracks, double *features, long long *empty_idxs);"
c_code = "#include <math.h>\n"
c_code += "#include <string.h>\n"
c_code += "#define K %d\n" % K
c_code += "\n" + open(os.path.join(TEMPLATE_DIR, "feature_handler.c")).read()
filename = f"{FeatureHandler.name}_{K}"
write_code(filename, c_code, c_header)
def __init__(self, K=5):
self.MAX_TRACKS = 6000
self.K = K
# Array of tracks, each track has K 5D features preceded
# by 5 params that inidicate [f_idx, last_idx, updated, complete, valid]
# f_idx: idx of current last feature in track
# idx of of last feature in frame
# bool for whether this track has been update
# bool for whether this track is complete
# bool for whether this track is valid
self.tracks = np.zeros((self.MAX_TRACKS, K + 1, 5))
self.tracks[:] = np.nan
name = f"{FeatureHandler.name}_{K}"
ffi, lib = load_code(name)
def merge_features_c(tracks, features, empty_idxs):
lib.merge_features(ffi.cast("double *", tracks.ctypes.data),
ffi.cast("double *", features.ctypes.data),
ffi.cast("long long *", empty_idxs.ctypes.data))
# self.merge_features = self.merge_features_python
self.merge_features = merge_features_c
def reset(self):
self.tracks[:] = np.nan
def merge_features_python(self, tracks, features, empty_idxs):
empty_idx = 0
for f in features:
match_idx = int(f[4])
if tracks[match_idx, 0, 1] == match_idx and tracks[match_idx, 0, 2] == 0:
tracks[match_idx, 0, 0] += 1
tracks[match_idx, 0, 1] = f[1]
tracks[match_idx, 0, 2] = 1
tracks[match_idx, int(tracks[match_idx, 0, 0])] = f
if tracks[match_idx, 0, 0] == self.K:
tracks[match_idx, 0, 3] = 1
if sane(tracks[match_idx]):
tracks[match_idx, 0, 4] = 1
else:
if empty_idx == len(empty_idxs):
print('need more empty space')
continue
tracks[empty_idxs[empty_idx], 0, 0] = 1
tracks[empty_idxs[empty_idx], 0, 1] = f[1]
tracks[empty_idxs[empty_idx], 0, 2] = 1
tracks[empty_idxs[empty_idx], 1] = f
empty_idx += 1
def update_tracks(self, features):
last_idxs = np.copy(self.tracks[:, 0, 1])
real = np.isfinite(last_idxs)
self.tracks[last_idxs[real].astype(int)] = self.tracks[real]
mask = np.ones(self.MAX_TRACKS, np.bool)
mask[last_idxs[real].astype(int)] = 0
empty_idxs = np.arange(self.MAX_TRACKS)[mask]
self.tracks[empty_idxs] = np.nan
self.tracks[:, 0, 2] = 0
self.merge_features(self.tracks, features, empty_idxs)
def handle_features(self, features):
self.update_tracks(features)
valid_idxs = self.tracks[:, 0, 4] == 1
complete_idxs = self.tracks[:, 0, 3] == 1
stale_idxs = self.tracks[:, 0, 2] == 0
valid_tracks = self.tracks[valid_idxs]
self.tracks[complete_idxs] = np.nan
self.tracks[stale_idxs] = np.nan
return valid_tracks[:, 1:, :4].reshape((len(valid_tracks), self.K * 4))
def generate_orient_error_jac(K):
import sympy as sp
from selfdrive.locationd.kalman.helpers.sympy_helpers import quat_rotate
x_sym = sp.MatrixSymbol('abr', 3, 1)
dtheta = sp.MatrixSymbol('dtheta', 3, 1)
delta_quat = sp.Matrix(np.ones(4))
delta_quat[1:, :] = sp.Matrix(0.5 * dtheta[0:3, :])
poses_sym = sp.MatrixSymbol('poses', 7 * K, 1)
img_pos_sym = sp.MatrixSymbol('img_positions', 2 * K, 1)
alpha, beta, rho = x_sym
to_c = sp.Matrix(orient.rot_matrix(-np.pi / 2, -np.pi / 2, 0))
pos_0 = sp.Matrix(np.array(poses_sym[K * 7 - 7:K * 7 - 4])[:, 0])
q = quat_matrix_l(poses_sym[K * 7 - 4:K * 7]) * delta_quat
quat_rot = quat_rotate(*q)
rot_g_to_0 = to_c * quat_rot.T
rows = []
for i in range(K):
pos_i = sp.Matrix(np.array(poses_sym[i * 7:i * 7 + 3])[:, 0])
q = quat_matrix_l(poses_sym[7 * i + 3:7 * i + 7]) * delta_quat
quat_rot = quat_rotate(*q)
rot_g_to_i = to_c * quat_rot.T
rot_0_to_i = rot_g_to_i * (rot_g_to_0.T)
trans_0_to_i = rot_g_to_i * (pos_0 - pos_i)
funct_vec = rot_0_to_i * sp.Matrix([alpha, beta, 1]) + rho * trans_0_to_i
h1, h2, h3 = funct_vec
rows.append(h1 / h3 - img_pos_sym[i * 2 + 0])
rows.append(h2 / h3 - img_pos_sym[i * 2 + 1])
img_pos_residual_sym = sp.Matrix(rows)
# sympy into c
sympy_functions = []
sympy_functions.append(('orient_error_jac', img_pos_residual_sym.jacobian(dtheta), [x_sym, poses_sym, img_pos_sym, dtheta]))
return sympy_functions
if __name__ == "__main__":
# TODO: get K from argparse
FeatureHandler.generate_code()
| 35.283019
| 126
| 0.630838
|
8ad2b9e351092d6ffc399778dec20e3648d74fed
| 4,285
|
py
|
Python
|
examples/mlx90640_pygamer.py
|
dglaude/Adafruit_CircuitPython_MLX90640
|
874651ca04b0aea92d54c984203a360784e3acd6
|
[
"MIT"
] | null | null | null |
examples/mlx90640_pygamer.py
|
dglaude/Adafruit_CircuitPython_MLX90640
|
874651ca04b0aea92d54c984203a360784e3acd6
|
[
"MIT"
] | null | null | null |
examples/mlx90640_pygamer.py
|
dglaude/Adafruit_CircuitPython_MLX90640
|
874651ca04b0aea92d54c984203a360784e3acd6
|
[
"MIT"
] | null | null | null |
import time
import board
import busio
import adafruit_mlx90640
import displayio
import terminalio
from adafruit_display_text.label import Label
number_of_colors = 64
palette = displayio.Palette(number_of_colors) # Palette with all our colors
## Heatmap code inspired from: http://www.andrewnoske.com/wiki/Code_-_heatmaps_and_color_gradients
color_A = [[0, 0, 0], [0, 0, 255], [0, 255, 255], [0, 255, 0], [255, 255, 0], \
[255, 0, 0], [255, 255, 255]]
color_B = [[0, 0, 255], [0, 255, 255] , [0, 255, 0], [255, 255, 0], [255, 0, 0]]
color_C = [[0, 0, 0], [255, 255, 255]]
color_D = [[0, 0, 255], [255, 0, 0]]
color = color_B
NUM_COLORS = len (color)
def MakeHeatMapColor():
for c in range(number_of_colors):
value = c * (NUM_COLORS-1) / (number_of_colors - 1)
idx1 = int(value) # Our desired color will be after this index.
if idx1 == value : # This is the corner case
red = color[idx1][0]
green = color[idx1][1]
blue = color[idx1][2]
else:
idx2 = idx1+1 # ... and before this index (inclusive).
fractBetween = value - idx1 # Distance between the two indexes (0-1).
red = int(round((color[idx2][0] - color[idx1][0]) * fractBetween + color[idx1][0]))
green = int(round((color[idx2][1] - color[idx1][1]) * fractBetween + color[idx1][1]))
blue = int(round((color[idx2][2] - color[idx1][2]) * fractBetween + color[idx1][2]))
palette[c]= ( 0x010000 * red ) + ( 0x000100 * green ) + ( 0x000001 * blue )
MakeHeatMapColor()
# Bitmap for colour coded thermal value
image_bitmap = displayio.Bitmap( 32, 24, number_of_colors )
# Create a TileGrid using the Bitmap and Palette
image_tile= displayio.TileGrid(image_bitmap, pixel_shader=palette)
# Create a Group that scale 32*24 to 128*96
image_group = displayio.Group(scale=4)
image_group.append(image_tile)
scale_bitmap = displayio.Bitmap( number_of_colors, 1, number_of_colors )
# Create a Group Scale must be 128 divided by number_of_colors
scale_group = displayio.Group(scale=2)
scale_tile = displayio.TileGrid(scale_bitmap, pixel_shader=palette, x = 0, y = 60)
scale_group.append(scale_tile)
for i in range(number_of_colors):
scale_bitmap[i, 0] = i # Fill the scale with the palette gradian
# Create the super Group
group = displayio.Group()
min_label = Label(terminalio.FONT, max_glyphs=10, color=palette[0], x = 0, y = 110)
max_label = Label(terminalio.FONT, max_glyphs=10, color=palette[number_of_colors-1], \
x = 80, y = 110)
# Add all the sub-group to the SuperGroup
group.append(image_group)
group.append(scale_group)
group.append(min_label)
group.append(max_label)
# Add the SuperGroup to the Display
board.DISPLAY.show(group)
mini = 0
maxi = 0
my_a1 = 20
my_a2 = 37
def temp2index(s, a1, a2):
b1 = 0
b2 = number_of_colors - 1
if s < a1:
r = b1
elif s > a2:
r = b2
else:
r = int( round( b1 + ( (s - a1) * (b2 - b1) / (a2 - a1) ) ) )
return r
i2c = busio.I2C(board.SCL, board.SDA, frequency=800000)
mlx = adafruit_mlx90640.MLX90640(i2c)
print("MLX addr detected on I2C")
print([hex(i) for i in mlx.serial_number])
#mlx.refresh_rate = adafruit_mlx90640.RefreshRate.REFRESH_2_HZ
mlx.refresh_rate = adafruit_mlx90640.RefreshRate.REFRESH_4_HZ
frame = [0] * 768
while True:
stamp = time.monotonic()
try:
mlx.getFrame(frame)
except ValueError:
# these happen, no biggie - retry
continue
print("Read 2 frames in %0.2f s" % (time.monotonic()-stamp))
mini = frame[0] # Define a default min and max value
maxi = frame[0] # Will be updated by temp2index function
for h in range(24):
for w in range(32):
t = frame[h*32 + w]
if t > maxi:
maxi = t
if t < mini:
mini = t
image_bitmap[w, (23-h)] = temp2index(t, my_a1, my_a2)
min_label.text="%0.2f" % (my_a1)
max_string="%0.2f" % (my_a2)
max_label.x=120-(5*len(max_string)) # Tricky calculation to left align
max_label.text=max_string
my_a1 = mini # Automatically change the color scale
my_a2 = maxi
| 32.709924
| 98
| 0.631039
|
44f4fa4b8d5ba73aa49967d05978fb2db978bbb3
| 4,724
|
py
|
Python
|
vega/metrics/mindspore/detection_metric.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/metrics/mindspore/detection_metric.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/metrics/mindspore/detection_metric.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric of detection task by using coco tools."""
import os
import json
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from vega.common import ClassFactory, ClassType
from vega.metrics.mindspore.metrics import MetricBase
from vega.common.task_ops import TaskOps
@ClassFactory.register(ClassType.METRIC, alias='coco')
class CocoMetric(MetricBase):
"""Save and summary metric from mdc dataset using coco tools."""
__metric_name__ = "coco"
def __init__(self, anno_path=None, category=None):
self.anno_path = anno_path or os.path.join(TaskOps().local_output_path, 'instances.json')
self.category = category or []
self.result_record = []
@property
def objective(self):
"""Define reward mode, default is max."""
return {'mAP': 'MAX', 'AP50': 'MAX', 'AP_small': 'MAX', 'AP_medium': 'MAX', 'AP_large': 'MAX'}
def __call__(self, output, targets, *args, **kwargs):
"""Append input into result record cache.
:param output: output data
:param target: target data
:return:
"""
if isinstance(output, dict):
return None
coco_results = []
for id, prediction in enumerate(output):
boxes = xyxy2xywh(prediction['boxes'])
scores = prediction["scores"].asnumpy().tolist()
labels = prediction["labels"].asnumpy().tolist()
img_id = targets[id]['image_id'].asnumpy().tolist()[0]
for idx, box in enumerate(boxes):
data = {}
data['image_id'] = img_id
data['bbox'] = box
data['score'] = scores[idx]
data['category_id'] = labels[idx]
coco_results.append(data)
self.result_record.extend(coco_results)
return None
def reset(self):
"""Reset states for new evaluation after each epoch."""
self.result_record = []
def summary(self):
"""Summary all record from result cache, and get performance."""
if not self.result_record:
return {"mAP": -1, "AP_small": -1, "AP_medium": -1, "AP_large": -1}
det_json_file = os.path.join(TaskOps().local_output_path, 'det_json_file.json')
with open(det_json_file, 'w') as f:
json.dump(self.result_record, f)
eval_result = self.print_scores(det_json_file, self.anno_path)
ap_result = eval_result.pop('AP(bbox)')
ap_result = list(ap_result)
ap_result = {
"mAP": ap_result[0] * 100,
"AP50": ap_result[1] * 100,
"AP_small": ap_result[3] * 100,
"AP_medium": ap_result[4] * 100,
"AP_large": ap_result[5] * 100
}
if eval_result:
ap_result.update(eval_result)
return ap_result
def print_scores(self, det_json_file, json_file):
"""Print scores.
:param det_json_file: dest json file
:param json_file: gt json file
:return:
"""
ret = {}
coco = COCO(json_file)
cocoDt = coco.loadRes(det_json_file)
cocoEval = COCOeval(coco, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
ret['AP(bbox)'] = cocoEval.stats
for id, item in enumerate(self.category):
cocoEval = COCOeval(coco, cocoDt, 'bbox')
cocoEval.params.catIds = [id + 1]
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if len(cocoEval.stats) > 0:
ret[item] = cocoEval.stats[1] * 100
return ret
def xyxy2xywh(boxes):
"""Transform the bbox coordinate to [x,y ,w,h].
:param bbox: the predict bounding box coordinate
:type bbox: list
:return: [x,y ,w,h]
:rtype: list
"""
from mindspore import ops
ms_unbind = ops.Unstack(axis=1)
ms_stack = ops.Stack(axis=1)
xmin, ymin, xmax, ymax = ms_unbind(boxes)
return ms_stack((xmin, ymin, xmax - xmin, ymax - ymin)).asnumpy().tolist()
| 35.787879
| 102
| 0.614733
|
ac8f5ce5dbbbcdd0e54bb4eef17b551f445fa66c
| 8,956
|
py
|
Python
|
tests/test_headers.py
|
RedisLabs/jsonrpclib-1
|
7b3659d2ae373b4f63af81f371494fd3c0e4b74b
|
[
"Apache-2.0"
] | 35
|
2015-01-16T16:54:03.000Z
|
2022-03-29T09:07:13.000Z
|
tests/test_headers.py
|
RedisLabs/jsonrpclib-1
|
7b3659d2ae373b4f63af81f371494fd3c0e4b74b
|
[
"Apache-2.0"
] | 43
|
2015-01-15T23:27:26.000Z
|
2022-02-19T16:40:56.000Z
|
tests/test_headers.py
|
RedisLabs/jsonrpclib-1
|
7b3659d2ae373b4f63af81f371494fd3c0e4b74b
|
[
"Apache-2.0"
] | 24
|
2015-02-12T14:11:24.000Z
|
2021-11-01T03:35:05.000Z
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Tests to verify the additional headers feature
:license: Apache License 2.0
"""
# Standard library
import contextlib
import re
import sys
import unittest
import jsonrpclib
try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
# JSON-RPC library
from jsonrpclib.utils import from_bytes
# Tests utilities
from tests.utilities import UtilityServer
# ------------------------------------------------------------------------------
class HeadersTests(unittest.TestCase):
"""
These tests verify functionality of additional headers.
"""
REQUEST_LINE = "^send: POST"
def setUp(self):
"""
Sets up the test
"""
# Set up the server
self.server = UtilityServer().start("", 0)
self.port = self.server.get_port()
def tearDown(self):
"""
Post-test clean up
"""
# Stop the server
self.server.stop()
@contextlib.contextmanager
def captured_headers(self, check_duplicates=True):
"""
Captures the request headers. Yields the {header : value} dictionary,
where keys are in lower case.
:param check_duplicates: If True, raises an error if a header appears
twice
"""
# Redirect the standard output, to catch jsonrpclib verbose messages
stdout = sys.stdout
sys.stdout = f = StringIO()
headers = {}
yield headers
sys.stdout = stdout
# Extract the sent request content
request_lines = f.getvalue().splitlines()
request_lines = list(
filter(lambda l: l.startswith("send:"), request_lines)
)
request_line = request_lines[0].split("send: ")[-1]
# Convert it to a string
try:
# Use eval to convert the representation into a string
request_line = from_bytes(eval(request_line))
except:
# Keep the received version
pass
# Extract headers
raw_headers = request_line.splitlines()[1:-1]
raw_headers = map(lambda h: re.split(r":\s?", h, 1), raw_headers)
for header, value in raw_headers:
header = header.lower()
if check_duplicates and header in headers:
raise KeyError("Header defined twice: {0}".format(header))
headers[header] = value
def test_should_extract_headers(self):
""" Check client headers capture """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port), verbose=1
)
# when
with self.captured_headers() as headers:
response = client.ping()
self.assertTrue(response)
# then
self.assertTrue(len(headers) > 0)
self.assertTrue("content-type" in headers)
self.assertEqual(headers["content-type"], "application/json-rpc")
def test_should_add_additional_headers(self):
""" Check sending of custom headers """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port),
verbose=1,
headers={"X-My-Header": "Test"},
)
# when
with self.captured_headers() as headers:
response = client.ping()
self.assertTrue(response)
# then
self.assertTrue("x-my-header" in headers)
self.assertEqual(headers["x-my-header"], "Test")
def test_should_add_additional_headers_to_notifications(self):
""" Check custom headers on notifications """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port),
verbose=1,
headers={"X-My-Header": "Test"},
)
# when
with self.captured_headers() as headers:
client._notify.ping()
# then
self.assertTrue("x-my-header" in headers)
self.assertEqual(headers["x-my-header"], "Test")
def test_should_override_headers(self):
""" Custom headers must override default ones """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port),
verbose=1,
headers={"User-Agent": "jsonrpclib test", "Host": "example.com"},
)
# when
with self.captured_headers(False) as headers:
response = client.ping()
self.assertTrue(response)
# then
self.assertEqual(headers["user-agent"], "jsonrpclib test")
self.assertEqual(headers["host"], "example.com")
def test_should_not_override_content_length(self):
""" Custom headers can't override Content-Length """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port),
verbose=1,
headers={"Content-Length": "invalid value"},
)
# when
with self.captured_headers() as headers:
response = client.ping()
self.assertTrue(response)
# then
self.assertTrue("content-length" in headers)
self.assertNotEqual(headers["content-length"], "invalid value")
def test_should_convert_header_values_to_basestring(self):
""" Custom headers values should be converted to str """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port),
verbose=1,
headers={"X-Test": 123},
)
# when
with self.captured_headers() as headers:
response = client.ping()
self.assertTrue(response)
# then
self.assertTrue("x-test" in headers)
self.assertEqual(headers["x-test"], "123")
def test_should_add_custom_headers_to_methods(self):
""" Check method-based custom headers """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port), verbose=1
)
# when
with self.captured_headers() as headers:
with client._additional_headers({"X-Method": "Method"}) as cl:
response = cl.ping()
self.assertTrue(response)
# then
self.assertTrue("x-method" in headers)
self.assertEqual(headers["x-method"], "Method")
def test_should_override_global_headers(self):
""" Method-based custom headers override context ones """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port),
verbose=1,
headers={"X-Test": "Global"},
)
# when
with self.captured_headers() as headers:
with client._additional_headers({"X-Test": "Method"}) as cl:
response = cl.ping()
self.assertTrue(response)
# then
self.assertTrue("x-test" in headers)
self.assertEqual(headers["x-test"], "Method")
def test_should_restore_global_headers(self):
""" Check custom headers context clean up """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port),
verbose=1,
headers={"X-Test": "Global"},
)
# when
with self.captured_headers() as headers:
with client._additional_headers({"X-Test": "Method"}) as cl:
response = cl.ping()
self.assertTrue(response)
self.assertTrue("x-test" in headers)
self.assertEqual(headers["x-test"], "Method")
with self.captured_headers() as headers:
response = cl.ping()
self.assertTrue(response)
# then
self.assertTrue("x-test" in headers)
self.assertEqual(headers["x-test"], "Global")
def test_should_allow_to_nest_additional_header_blocks(self):
""" Check nested additional headers """
# given
client = jsonrpclib.ServerProxy(
"http://localhost:{0}".format(self.port), verbose=1
)
# when
with client._additional_headers({"X-Level-1": "1"}) as cl_level1:
with self.captured_headers() as headers1:
response = cl_level1.ping()
self.assertTrue(response)
with cl_level1._additional_headers({"X-Level-2": "2"}) as cl:
with self.captured_headers() as headers2:
response = cl.ping()
self.assertTrue(response)
# then
self.assertTrue("x-level-1" in headers1)
self.assertEqual(headers1["x-level-1"], "1")
self.assertTrue("x-level-1" in headers2)
self.assertEqual(headers1["x-level-1"], "1")
self.assertTrue("x-level-2" in headers2)
self.assertEqual(headers2["x-level-2"], "2")
| 30.882759
| 80
| 0.574252
|
f99a7cd3235455578928e65caf4dff1f1ea7b1ee
| 9,397
|
py
|
Python
|
examples/trials/mnist-annotation/mnist.py
|
kant/nni
|
ff390b4d5457cd5dec9e5c1c3a98892e5f2aed2a
|
[
"MIT"
] | 1
|
2019-06-27T16:31:20.000Z
|
2019-06-27T16:31:20.000Z
|
examples/trials/mnist-annotation/mnist.py
|
0Deer0/nni
|
b4e9f0878d3f74742ae3acd783ddff303b4732cb
|
[
"MIT"
] | null | null | null |
examples/trials/mnist-annotation/mnist.py
|
0Deer0/nni
|
b4e9f0878d3f74742ae3acd783ddff303b4732cb
|
[
"MIT"
] | 1
|
2019-01-04T00:32:14.000Z
|
2019-01-04T00:32:14.000Z
|
"""A deep MNIST classifier using convolutional layers."""
import logging
import math
import tempfile
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
logger = logging.getLogger('mnist_AutoML')
class MnistNetwork(object):
'''
MnistNetwork is for initlizing and building basic network for mnist.
'''
def __init__(self,
channel_1_num,
channel_2_num,
conv_size,
hidden_size,
pool_size,
learning_rate,
x_dim=784,
y_dim=10):
self.channel_1_num = channel_1_num
self.channel_2_num = channel_2_num
"""@nni.variable(nni.choice(2, 3, 5, 7),name=self.conv_size)"""
self.conv_size = conv_size
"""@nni.variable(nni.choice(124, 512, 1024), name=self.hidden_size)"""
self.hidden_size = hidden_size
self.pool_size = pool_size
"""@nni.variable(nni.uniform(0.0001, 0.1), name=self.learning_rate)"""
self.learning_rate = learning_rate
self.x_dim = x_dim
self.y_dim = y_dim
self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x')
self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.train_step = None
self.accuracy = None
def build_network(self):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
print(
'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
w_conv1 = weight_variable(
[self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
"""@nni.function_choice(tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1), name=tf.nn.relu)"""
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size), avg_pool(h_conv1, self.pool_size), name=max_pool)"""
h_pool1 = max_pool(h_conv1, self.pool_size)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
w_conv2 = weight_variable([self.conv_size, self.conv_size,
self.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
w_fc1 = weight_variable(
[last_dim * last_dim * self.channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(
h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
w_fc2 = weight_variable([self.hidden_size, self.y_dim])
b_fc2 = bias_variable([self.y_dim])
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv))
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(
self.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(
tf.argmax(y_conv, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32))
def conv2d(x_input, w_matrix):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x_input, pool_size):
"""max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def avg_pool(x_input, pool_size):
return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(params):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist = input_data.read_data_sets(params['data_dir'], one_hot=True)
print('Mnist download data down.')
logger.debug('Mnist download data down.')
# Create the model
# Build the graph for the deep net
mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'],
channel_2_num=params['channel_2_num'],
conv_size=params['conv_size'],
hidden_size=params['hidden_size'],
pool_size=params['pool_size'],
learning_rate=params['learning_rate'])
mnist_network.build_network()
logger.debug('Mnist build network done.')
# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
"""@nni.variable(nni.choice(50, 250, 500), name=batch_num)"""
batch_num = params['batch_num']
for i in range(batch_num):
batch = mnist.train.next_batch(batch_num)
"""@nni.variable(nni.choice(1, 5), name=dropout_rate)"""
dropout_rate = params['dropout_rate']
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: dropout_rate}
)
if i % 100 == 0:
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_intermediate_result(test_acc)"""
logger.debug('test accuracy %g', test_acc)
logger.debug('Pipe send intermediate result done.')
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_final_result(test_acc)"""
logger.debug('Final result is %g', test_acc)
logger.debug('Send final result done.')
def generate_defualt_params():
'''
Generate default parameters for mnist network.
'''
params = {
'data_dir': '/tmp/tensorflow/mnist/input_data',
'dropout_rate': 0.5,
'channel_1_num': 32,
'channel_2_num': 64,
'conv_size': 5,
'pool_size': 2,
'hidden_size': 1024,
'learning_rate': 1e-4,
'batch_num': 200}
return params
if __name__ == '__main__':
try:
main(generate_defualt_params())
except Exception as exception:
logger.exception(exception)
raise
| 39.649789
| 202
| 0.599766
|
122ae92db8c0fba7f33754fcbab35f9e25571cce
| 1,032
|
py
|
Python
|
rsp2/src/python/rsp2/io/structure_dir.py
|
colin-daniels/agnr-ml
|
fc936cb8b6a68c37dfaf64c74796e0cf795c1bb8
|
[
"MIT"
] | null | null | null |
rsp2/src/python/rsp2/io/structure_dir.py
|
colin-daniels/agnr-ml
|
fc936cb8b6a68c37dfaf64c74796e0cf795c1bb8
|
[
"MIT"
] | null | null | null |
rsp2/src/python/rsp2/io/structure_dir.py
|
colin-daniels/agnr-ml
|
fc936cb8b6a68c37dfaf64c74796e0cf795c1bb8
|
[
"MIT"
] | null | null | null |
import json
import os
from pymatgen.io.vasp import Poscar
def from_path(path):
# TODO: should maybe support .tar.gz or .tar.xz
return StructureDir.from_dir(path)
class StructureDir:
def __init__(self, *, layers, masses, layer_sc_matrices, structure):
self.layers = layers
self.masses = masses
self.layer_sc_matrices = layer_sc_matrices
self.structure = structure
@classmethod
def from_dir(cls, path):
structure = Poscar.from_file(os.path.join(path, 'POSCAR')).structure
with open(os.path.join(path, 'meta.json')) as f:
meta = json.load(f)
layer_sc_matrices = meta.pop('layer_sc_matrices', None) or meta.pop('layer-sc-matrices', None)
if layer_sc_matrices:
layer_sc_matrices = [x['matrix'] for x in layer_sc_matrices]
return cls(
layers=meta.pop('layers', None),
masses=meta.pop('masses', None),
layer_sc_matrices=layer_sc_matrices,
structure=structure,
)
| 32.25
| 102
| 0.643411
|
0276a00a250a94c7b7e52ba7fc6141e96292263b
| 803
|
py
|
Python
|
app_shop/urls.py
|
saoguang/hnist2
|
cac02dd1f9e860a24c99cdf10499307df33dad3b
|
[
"MIT"
] | null | null | null |
app_shop/urls.py
|
saoguang/hnist2
|
cac02dd1f9e860a24c99cdf10499307df33dad3b
|
[
"MIT"
] | 1
|
2020-11-30T01:23:53.000Z
|
2020-11-30T01:23:53.000Z
|
app_shop/urls.py
|
saoguang/hnist2
|
cac02dd1f9e860a24c99cdf10499307df33dad3b
|
[
"MIT"
] | null | null | null |
"""hnist2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app_shop import views
app_name = 'shop'
urlpatterns = [
path('index/', views.index, name='index'),
]
| 33.458333
| 77
| 0.711083
|
869faab6c6664ae441633d4ca2dd843121da41a1
| 667
|
py
|
Python
|
dbt_gen/py3env/bin/rst2s5.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
dbt_gen/py3env/bin/rst2s5.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
dbt_gen/py3env/bin/rst2s5.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
#!/home/ethan/dbt_gen/dbt_gen/py3env/bin/python3
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| 26.68
| 74
| 0.743628
|
55b0ac1b94393a479b7e3b28497638147b763376
| 10,187
|
py
|
Python
|
kardioml/segmentation/teijeiro/model/automata.py
|
Seb-Good/physionet-challenge-2020
|
c6f1648a148335babc0a26d8a589120616327548
|
[
"BSD-2-Clause"
] | 13
|
2020-12-18T08:09:34.000Z
|
2022-03-15T04:51:46.000Z
|
kardioml/segmentation/teijeiro/model/automata.py
|
Seb-Good/physionet-challenge-2020
|
c6f1648a148335babc0a26d8a589120616327548
|
[
"BSD-2-Clause"
] | null | null | null |
kardioml/segmentation/teijeiro/model/automata.py
|
Seb-Good/physionet-challenge-2020
|
c6f1648a148335babc0a26d8a589120616327548
|
[
"BSD-2-Clause"
] | 14
|
2020-09-16T21:07:38.000Z
|
2022-03-23T14:01:24.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=
"""
Created on Thu Dec 19 18:02:37 2013
This module contains the model definition for the automata-based abstraction
patterns. Since abstraction patterns are defined by regular grammars, the
internal representation is based on finite automatas.
@author: T. Teijeiro
"""
from .FreezableObject import FreezableObject
from .observable import Observable
from .constraint_network import verify
from collections import defaultdict
import itertools as it
ABSTRACTED = True
ENVIRONMENT = False
def NULL_CONST(pattern, obs):
"""Default constraint definition"""
return None
def NULL_PROC(pattern):
"""Default observation procedure"""
return None
def BASIC_TCONST(pattern, obs):
"""
This function defines the basic temporal constraints that should contain
any observation within an abstraction pattern. This constraints set that
the beginning of an observation has to occur before its ending.
"""
if obs.start is not obs.time:
pattern.last_tnet.set_before(obs.start, obs.time)
if obs.time is not obs.end:
pattern.last_tnet.set_before(obs.time, obs.end)
class Transition(FreezableObject):
"""
Model for the transition of an automata, allowing the attribute
specification as a function on a partial-recognized pattern. It also
includes a specific attribute to indicate the property of the observable
that allows the transition as an Abstracted observation or an Environment
observation.
"""
__slots__ = ('istate', 'fstate', 'observable', 'abstracted', 'tconst', 'gconst')
def __init__(
self,
istate=None,
fstate=None,
observable=None,
abstracted=ABSTRACTED,
tconst=BASIC_TCONST,
gconst=NULL_CONST,
):
"""
Creates a new transition that can be added to a DFA definition. All the
attributes of the transition must be set on the creation, and no
posterior modifications are allowed.
Parameters
----------
istate:
Initial state of the transition.
fstate:
Final state of the transition.
observable:
Observable type that allows the transition. It can be None, to
determine such transitions that simply add constraints, but no
new observations.
abstracted:
Flag that indicates if the observation that allows the transition
should be considered as an Abstracted observation or an Environment
observation.
tconst:
Function that receives as a parameter the AbstractionPattern object,
and adds the temporal constraints introduced by this transition
with the rest of the variables of the pattern, including the
hypothesis. These constraints are added before the matching of the
predicted finding with an actual observation.
gconst:
Function that receives as a parameter the AbstractionPattern object,
and checks any additional constraint in the value of the
observations. These constraints are checked after the matching of
the finding with an actual observation.
"""
super(Transition, self).__init__()
if istate is None or fstate is None:
raise ValueError('Initial and final states must be != None')
self.istate = istate
self.fstate = fstate
self.observable = observable
self.abstracted = abstracted
self.tconst = tconst
self.gconst = gconst
# We don't allow any posterior modification of a transition.
self.freeze()
def __str__(self):
"""Obtains the representation of a transition as a string"""
return '{0} --{1}{2}--> {3}'.format(
self.istate, self.observable, '@' if self.abstracted == ABSTRACTED else '#', self.fstate
)
def __repr__(self):
return str(self)
class PatternAutomata(FreezableObject):
"""
This class represents an automata created by adding *Transition* instances.
It also includes the definition of the final states of the pattern, and
the Observable class that represents the hypothesis of the pattern.
"""
__slots__ = ('name', 'Hypothesis', 'transitions', 'abstractions', 'final_states', 'obs_proc')
def __init__(self):
"""
Creates a new empty pattern automata.
Instance properties
-------------------
name:
String representing the name of the pattern.
Hypothesis:
Observable type that constitutes the hypothesis of the pattern.
transitions:
List of transitions of the pattern automata.
abstractions:
Dictionary that maps each abstracted observable with the
transitions that can be used to perform that abstraction in an
abductive way.
final_states:
Set of states of the automata that are final or accepting states.
obs_proc:
Observation procedure that is executed once the necessary evidence
for the pattern has been observed. It receives a single parameter,
the abstraction pattern to apply the procedure.
"""
super(PatternAutomata, self).__init__()
self.name = ''
self.Hypothesis = Observable
self.transitions = []
self.abstractions = defaultdict(tuple)
self.final_states = set()
self.obs_proc = NULL_PROC
def __str__(self):
return self.name
def __repr__(self):
return self.name if len(self.name) > 0 else object.__repr__(self)
def freeze(self):
"""We override the freeze method to check additional constraints"""
verify(self.final_states, 'The automata does not have any final state')
# We use a tuple to avoid posterior changes on the transitions
self.transitions = tuple(self.transitions)
super(PatternAutomata, self).freeze()
def add_transition(
self,
istate=None,
fstate=None,
observable=None,
abstracted=ABSTRACTED,
tconst=NULL_CONST,
gconst=NULL_CONST,
):
"""
Adds a new *Transition* to this automata.
Parameters
----------
istate:
Initial state of the transition.
fstate:
Final state of the transition.
observable:
Observable type that allows the transition.
abstracted:
Flag that indicates if the observation that allows the transition
should be considered as an Abstracted observation or an Environment
observation.
tconst:
Function that receives as a parameter the AbstractionPattern object,
and adds the temporal constraints introduced by this transition
with the rest of the variables of the pattern, including the
hypothesis. These constraints are added before the matching of the
predicted finding with an actual observation.
gconst:
Function that receives as a parameter the AbstractionPattern object,
and checks any additional constraint in the value of the
observations. These constraints are checked after the matching of
the finding with an actual observation.
"""
assert abstracted in (ABSTRACTED, ENVIRONMENT), 'Invalid abstraction'
transition = Transition(istate, fstate, observable, abstracted, tconst, gconst)
self.transitions.append(transition)
# All states (except the initial one) must be reached by at least one
# transition.
for state in self.states:
if state != self.start_state:
verify(self.tr_to(state))
@property
def states(self):
"""Obtains the set of states of this automata"""
return set(it.chain.from_iterable((t.istate, t.fstate) for t in self.transitions))
@property
def start_state(self):
"""Obtains the initial state of this automata"""
if not self.transitions:
raise IndexError('This automata does not contain any state')
return self.transitions[0].istate
@property
def abstracted(self):
"""Obtains the set of observables abstracted by this pattern"""
return {
t.observable for t in self.transitions if t.abstracted is ABSTRACTED and t.observable is not None
}
@property
def environment(self):
"""
Obtains the set of observables that are the environment of this
pattern.
"""
return {
t.observable
for t in self.transitions
if t.abstracted is ENVIRONMENT and t.observable is not None
}
@property
def manifestations(self):
"""
Obtains the set of observables over which this pattern is created. It
corresponds to the alphabet of the pattern in the classical DFA
nomenclature.
"""
return set.union(self.abstracted, self.environment)
def tr_from(self, state):
"""
Obtains the list of transitions starting in a given state.
"""
return [t for t in self.transitions if t.istate == state]
def tr_to(self, state):
"""
Obtains the list of transitions finishing in a given state.
"""
return [t for t in self.transitions if t.fstate == state]
if __name__ == "__main__":
# Small test for automata creation.
DFA = PatternAutomata()
DFA.add_transition('S', 'A', 'a', ENVIRONMENT)
DFA.add_transition('A', 'A', 'a')
DFA.add_transition('A', 'B', 'b')
DFA.add_transition('B', 'B', 'b')
DFA.add_transition('B', 'C', 'c')
DFA.add_transition('C', 'D', 'c')
DFA.add_transition('D', 'E', 'd')
DFA.add_transition('E', 'E', 'd')
DFA.add_transition('E', 'F', 'e')
DFA.add_transition('F', 'G', 'f')
DFA.add_transition('G', 'F', 'e')
DFA.add_transition('F', 'H', 'f', ENVIRONMENT)
DFA.final_states.add('H')
DFA.freeze()
| 35.494774
| 109
| 0.640326
|
91a3b0b288114f05d07d3b80c229fe0ffeebd59d
| 17,014
|
py
|
Python
|
lib/tool_shed/util/metadata_util.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 4
|
2015-05-12T20:36:41.000Z
|
2017-06-26T15:34:02.000Z
|
lib/tool_shed/util/metadata_util.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 52
|
2015-03-16T14:02:14.000Z
|
2021-12-24T09:50:23.000Z
|
lib/tool_shed/util/metadata_util.py
|
rikeshi/galaxy
|
c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a
|
[
"CC-BY-3.0"
] | 1
|
2016-03-21T12:54:06.000Z
|
2016-03-21T12:54:06.000Z
|
import logging
from operator import itemgetter
from sqlalchemy import and_
from galaxy.tool_shed.util.hg_util import (
INITIAL_CHANGELOG_HASH,
reversed_lower_upper_bounded_changelog,
)
from galaxy.tool_shed.util.repository_util import get_repository_by_name_and_owner
from galaxy.util.tool_shed.common_util import parse_repository_dependency_tuple
from tool_shed.util.hg_util import changeset2rev
log = logging.getLogger(__name__)
def get_all_dependencies(app, metadata_entry, processed_dependency_links=[]):
encoder = app.security.encode_id
value_mapper = {'repository_id': encoder, 'id': encoder, 'user_id': encoder}
metadata = metadata_entry.to_dict(value_mapper=value_mapper, view='element')
db = app.model.context.current
returned_dependencies = []
required_metadata = get_dependencies_for_metadata_revision(app, metadata)
if required_metadata is None:
return metadata
for dependency_metadata in required_metadata:
dependency_dict = dependency_metadata.to_dict(value_mapper=value_mapper, view='element')
dependency_link = (metadata['id'], dependency_dict['id'])
if dependency_link in processed_dependency_links:
continue
processed_dependency_links.append(dependency_link)
repository = db.query(app.model.Repository).get(app.security.decode_id(dependency_dict['repository_id']))
dependency_dict['repository'] = repository.to_dict(value_mapper=value_mapper)
if dependency_metadata.includes_tools:
dependency_dict['tools'] = dependency_metadata.metadata['tools']
dependency_dict['repository_dependencies'] = []
if dependency_dict['includes_tool_dependencies']:
dependency_dict['tool_dependencies'] = repository.get_tool_dependencies(app, dependency_dict['changeset_revision'])
if dependency_dict['has_repository_dependencies']:
dependency_dict['repository_dependencies'] = get_all_dependencies(app, dependency_metadata, processed_dependency_links)
else:
dependency_dict['repository_dependencies'] = []
returned_dependencies.append(dependency_dict)
return returned_dependencies
def get_current_repository_metadata_for_changeset_revision(app, repository, changeset_revision):
encoded_repository_id = app.security.encode_id(repository.id)
repository_metadata = get_repository_metadata_by_changeset_revision(app,
encoded_repository_id,
changeset_revision)
if repository_metadata:
return repository_metadata
# The installable changeset_revision may have been changed because it was "moved ahead"
# in the repository changelog.
updated_changeset_revision = get_next_downloadable_changeset_revision(app, repository, after_changeset_revision=changeset_revision)
if updated_changeset_revision and updated_changeset_revision != changeset_revision:
repository_metadata = get_repository_metadata_by_changeset_revision(app,
encoded_repository_id,
updated_changeset_revision)
if repository_metadata:
return repository_metadata
return None
def get_dependencies_for_metadata_revision(app, metadata):
dependencies = []
for shed, name, owner, changeset, prior, _ in metadata['repository_dependencies']:
required_repository = get_repository_by_name_and_owner(app, name, owner)
updated_changeset = get_next_downloadable_changeset_revision(app, required_repository, changeset)
if updated_changeset is None:
continue
metadata_entry = get_repository_metadata_by_changeset_revision(app, app.security.encode_id(required_repository.id), updated_changeset)
dependencies.append(metadata_entry)
return dependencies
def get_latest_changeset_revision(app, repository):
repository_tip = repository.tip()
repository_metadata = get_repository_metadata_by_changeset_revision(app,
app.security.encode_id(repository.id),
repository_tip)
if repository_metadata and repository_metadata.downloadable:
return repository_tip
changeset_revisions = [revision[1] for revision in get_metadata_revisions(app, repository)]
if changeset_revisions:
return changeset_revisions[-1]
return INITIAL_CHANGELOG_HASH
def get_latest_downloadable_changeset_revision(app, repository):
repository_tip = repository.tip()
repository_metadata = get_repository_metadata_by_changeset_revision(app, app.security.encode_id(repository.id), repository_tip)
if repository_metadata and repository_metadata.downloadable:
return repository_tip
changeset_revisions = [revision[1] for revision in get_metadata_revisions(app, repository)]
if changeset_revisions:
return changeset_revisions[-1]
return INITIAL_CHANGELOG_HASH
def get_latest_repository_metadata(app, decoded_repository_id, downloadable=False):
"""Get last metadata defined for a specified repository from the database."""
sa_session = app.model.context.current
repository = sa_session.query(app.model.Repository).get(decoded_repository_id)
if downloadable:
changeset_revision = get_latest_downloadable_changeset_revision(app, repository)
else:
changeset_revision = get_latest_changeset_revision(app, repository)
return get_repository_metadata_by_changeset_revision(app,
app.security.encode_id(repository.id),
changeset_revision)
def get_metadata_revisions(app, repository, sort_revisions=True, reverse=False, downloadable=True):
"""
Return a list of changesets for the provided repository.
"""
sa_session = app.model.context.current
if downloadable:
metadata_revisions = repository.downloadable_revisions
else:
metadata_revisions = repository.metadata_revisions
repo_path = repository.repo_path(app)
changeset_tups = []
for repository_metadata in metadata_revisions:
if repository_metadata.numeric_revision == -1 or repository_metadata.numeric_revision is None:
try:
rev = changeset2rev(repo_path, repository_metadata.changeset_revision)
repository_metadata.numeric_revision = rev
sa_session.add(repository_metadata)
sa_session.flush()
except Exception:
rev = -1
else:
rev = repository_metadata.numeric_revision
changeset_tups.append((rev, repository_metadata.changeset_revision))
if sort_revisions:
changeset_tups.sort(key=itemgetter(0), reverse=reverse)
return changeset_tups
def get_next_downloadable_changeset_revision(app, repository, after_changeset_revision):
"""
Return the installable changeset_revision in the repository changelog after the changeset to which
after_changeset_revision refers. If there isn't one, return None. If there is only one installable
changeset, and that matches the requested revision, return it.
"""
changeset_revisions = [revision[1] for revision in get_metadata_revisions(app, repository)]
if len(changeset_revisions) == 1:
changeset_revision = changeset_revisions[0]
if changeset_revision == after_changeset_revision:
return after_changeset_revision
found_after_changeset_revision = False
repo = repository.hg_repo
for changeset in repo.changelog:
changeset_revision = str(repo[changeset])
if found_after_changeset_revision:
if changeset_revision in changeset_revisions:
return changeset_revision
elif changeset_revision == after_changeset_revision:
# We've found the changeset in the changelog for which we need to get the next downloadable changeset.
found_after_changeset_revision = True
return None
def get_previous_metadata_changeset_revision(app, repository, before_changeset_revision, downloadable=True):
"""
Return the changeset_revision in the repository changelog that has associated metadata prior to
the changeset to which before_changeset_revision refers. If there isn't one, return the hash value
of an empty repository changelog, INITIAL_CHANGELOG_HASH.
"""
changeset_revisions = [revision[1] for revision in get_metadata_revisions(app, repository)]
if len(changeset_revisions) == 1:
changeset_revision = changeset_revisions[0]
if changeset_revision == before_changeset_revision:
return INITIAL_CHANGELOG_HASH
return changeset_revision
previous_changeset_revision = None
for changeset_revision in changeset_revisions:
if changeset_revision == before_changeset_revision:
if previous_changeset_revision:
return previous_changeset_revision
else:
# Return the hash value of an empty repository changelog - note that this will not be a valid changeset revision.
return INITIAL_CHANGELOG_HASH
else:
previous_changeset_revision = changeset_revision
def get_repository_dependency_tups_from_repository_metadata(app, repository_metadata, deprecated_only=False):
"""
Return a list of of tuples defining repository objects required by the received repository. The returned
list defines the entire repository dependency tree. This method is called only from the Tool Shed.
"""
dependency_tups = []
if repository_metadata is not None:
metadata = repository_metadata.metadata
if metadata:
repository_dependencies_dict = metadata.get('repository_dependencies', None)
if repository_dependencies_dict is not None:
repository_dependency_tups = repository_dependencies_dict.get('repository_dependencies', None)
if repository_dependency_tups is not None:
# The value of repository_dependency_tups is a list of repository dependency tuples like this:
# ['http://localhost:9009', 'package_samtools_0_1_18', 'devteam', 'ef37fc635cb9', 'False', 'False']
for repository_dependency_tup in repository_dependency_tups:
toolshed, name, owner, changeset_revision, pir, oicct = \
parse_repository_dependency_tuple(repository_dependency_tup)
repository = get_repository_by_name_and_owner(app, name, owner)
if repository:
if deprecated_only:
if repository.deprecated:
dependency_tups.append(repository_dependency_tup)
else:
dependency_tups.append(repository_dependency_tup)
else:
log.debug("Cannot locate repository %s owned by %s for inclusion in repository dependency tups." %
(name, owner))
return dependency_tups
def get_repository_metadata_by_changeset_revision(app, id, changeset_revision):
"""Get metadata for a specified repository change set from the database."""
# Make sure there are no duplicate records, and return the single unique record for the changeset_revision.
# Duplicate records were somehow created in the past. The cause of this issue has been resolved, but we'll
# leave this method as is for a while longer to ensure all duplicate records are removed.
sa_session = app.model.context.current
all_metadata_records = sa_session.query(app.model.RepositoryMetadata) \
.filter(and_(app.model.RepositoryMetadata.table.c.repository_id == app.security.decode_id(id),
app.model.RepositoryMetadata.table.c.changeset_revision == changeset_revision)) \
.all()
if len(all_metadata_records) > 1:
# Delete all records older than the last one updated.
for repository_metadata in all_metadata_records[1:]:
sa_session.delete(repository_metadata)
sa_session.flush()
return all_metadata_records[0]
elif all_metadata_records:
return all_metadata_records[0]
return None
def get_repository_metadata_by_id(app, id):
"""Get repository metadata from the database"""
sa_session = app.model.context.current
return sa_session.query(app.model.RepositoryMetadata).get(app.security.decode_id(id))
def get_repository_metadata_by_repository_id_changeset_revision(app, id, changeset_revision, metadata_only=False):
"""Get a specified metadata record for a specified repository in the tool shed."""
if metadata_only:
repository_metadata = get_repository_metadata_by_changeset_revision(app, id, changeset_revision)
if repository_metadata and repository_metadata.metadata:
return repository_metadata.metadata
return None
return get_repository_metadata_by_changeset_revision(app, id, changeset_revision)
def get_repository_metadata_revisions_for_review(repository, reviewed=True):
repository_metadata_revisions = []
metadata_changeset_revision_hashes = []
if reviewed:
for metadata_revision in repository.metadata_revisions:
metadata_changeset_revision_hashes.append(metadata_revision.changeset_revision)
for review in repository.reviews:
if review.changeset_revision in metadata_changeset_revision_hashes:
rmcr_hashes = [rmr.changeset_revision for rmr in repository_metadata_revisions]
if review.changeset_revision not in rmcr_hashes:
repository_metadata_revisions.append(review.repository_metadata)
else:
for review in repository.reviews:
if review.changeset_revision not in metadata_changeset_revision_hashes:
metadata_changeset_revision_hashes.append(review.changeset_revision)
for metadata_revision in repository.metadata_revisions:
if metadata_revision.changeset_revision not in metadata_changeset_revision_hashes:
repository_metadata_revisions.append(metadata_revision)
return repository_metadata_revisions
def get_updated_changeset_revisions(app, name, owner, changeset_revision):
"""
Return a string of comma-separated changeset revision hashes for all available updates to the received changeset
revision for the repository defined by the received name and owner.
"""
repository = get_repository_by_name_and_owner(app, name, owner)
# Get the upper bound changeset revision.
upper_bound_changeset_revision = get_next_downloadable_changeset_revision(app, repository, changeset_revision)
# Build the list of changeset revision hashes defining each available update up to, but excluding
# upper_bound_changeset_revision.
repo = repository.hg_repo
changeset_hashes = []
for changeset in reversed_lower_upper_bounded_changelog(repo, changeset_revision, upper_bound_changeset_revision):
# Make sure to exclude upper_bound_changeset_revision.
if changeset != upper_bound_changeset_revision:
changeset_hashes.append(str(repo[changeset]))
if changeset_hashes:
changeset_hashes_str = ','.join(changeset_hashes)
return changeset_hashes_str
return ''
def is_downloadable(metadata_dict):
# NOTE: although repository README files are considered Galaxy utilities, they have no
# effect on determining if a revision is installable. See the comments in the
# compare_readme_files() method.
if 'datatypes' in metadata_dict:
# We have proprietary datatypes.
return True
if 'repository_dependencies' in metadata_dict:
# We have repository_dependencies.
return True
if 'tools' in metadata_dict:
# We have tools.
return True
if 'tool_dependencies' in metadata_dict:
# We have tool dependencies, and perhaps only tool dependencies!
return True
if 'workflows' in metadata_dict:
# We have exported workflows.
return True
return False
def is_malicious(app, id, changeset_revision, **kwd):
"""Check the malicious flag in repository metadata for a specified change set revision."""
repository_metadata = get_repository_metadata_by_changeset_revision(app, id, changeset_revision)
if repository_metadata:
return repository_metadata.malicious
return False
| 51.557576
| 142
| 0.710709
|
819eea4fcc3e07dc862b3203251c4ed0a37f1297
| 5,332
|
py
|
Python
|
client_ser_simulation/mvsc_server/server/app.py
|
klautenschlaeger/mvsc
|
9cc04ad2353b48e0a6715c859812e1afb67f8bbb
|
[
"MIT"
] | null | null | null |
client_ser_simulation/mvsc_server/server/app.py
|
klautenschlaeger/mvsc
|
9cc04ad2353b48e0a6715c859812e1afb67f8bbb
|
[
"MIT"
] | null | null | null |
client_ser_simulation/mvsc_server/server/app.py
|
klautenschlaeger/mvsc
|
9cc04ad2353b48e0a6715c859812e1afb67f8bbb
|
[
"MIT"
] | 1
|
2021-11-09T01:42:40.000Z
|
2021-11-09T01:42:40.000Z
|
from flask import Flask, jsonify, request
from flask_cors import CORS
import distanceCalculator
POLYS = [[],
[],
[]]
GROUP = [[], [], [1, 2]]
MACHINES = [
{
'driverid': 1,
'drivername': 'test',
'forename': 'test',
'machineid': 'John Deere 5430i_12',
'one': False,
'two': False,
'three': True
}
]
# configuration
DEBUG = True
# instantiate the app
app = Flask(__name__)
app.config.from_object(__name__)
# enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})
disCalci = distanceCalculator.DistanceCalculator()
global driver_id
driver_id = 2
@app.route('/mv', methods=['GET', 'POST'])
def all_machines():
response_object = {'status': 'success', 'machines': MACHINES}
return jsonify(response_object)
@app.route('/machine', methods=['POST'])
def new_machines():
response_object = {'status': 'success'}
global driver_id
post_data = request.get_json()
MACHINES.append({
'driverid': driver_id,
'drivername': post_data.get('drivername'),
'forename': post_data.get('forename'),
'machineid': post_data.get('machineid'),
'one': post_data.get('one'),
'two': post_data.get('two'),
'three': post_data.get('three')
})
response_object['driverid'] = driver_id
MACHINES2 = [{
'driverid': driver_id,
'drivername': post_data.get('drivername'),
'forename': post_data.get('forename'),
'machineid': post_data.get('machineid'),
'one': post_data.get('one'),
'two': post_data.get('two'),
'three': post_data.get('three')
}]
ids = [driver_id]
if post_data.get('one'):
GROUP[0].append(driver_id)
if post_data.get('two'):
GROUP[1].append(driver_id)
if post_data.get('three'):
GROUP[2].append(driver_id)
for machine in MACHINES:
if post_data.get('one'):
if machine.get('one'):
if machine.get("driverid") not in ids:
MACHINES2.append(machine)
ids.append(machine.get('driverid'))
if post_data.get('two'):
if machine.get('two'):
if machine.get("driverid") not in ids:
MACHINES2.append(machine)
ids.append(machine.get('driverid'))
if post_data.get('three'):
if machine.get('three'):
if machine.get("driverid") not in ids:
MACHINES2.append(machine)
ids.append(machine.get("driverid"))
response_object['message'] = 'Machine added!'
response_object['machines'] = MACHINES2
driver_id = driver_id + 1
return jsonify(response_object)
@app.route('/mv/poly', methods=['POST'])
def all_polys():
response_object = {'status': 'success'}
post_data = request.get_json()
counters = post_data.get('counters')
for i in range(0, 3, 1):
coordinates = []
if POLYS[i].__len__() > 0:
if POLYS[i].__len__() > counters[i]:
workareas = POLYS[i][counters[i]:]
counters[i] = POLYS[i].__len__() - 1
coordinates = []
for e in workareas:
coordinates.append(e.get('w'))
response_object['polys' + str(i + 1)] = coordinates
response_object['counters'] = counters
return jsonify(response_object)
@app.route('/mv/update', methods=['POST'])
def update_polys():
response_object = {'status': 'success'}
post_data = request.get_json()
driver = int(post_data.get('driverid'))
poly_id = post_data.get('workareaid')
polygon = post_data.get('area')
centre = disCalci.calcCenter(poly=polygon)
structure = {
"w_id": poly_id,
"d_id": driver,
"w": polygon,
"centre": centre
}
needed = []
disCalci.setCentre(polygon)
for i in range(0, 3, 1):
if driver in GROUP[i]:
if POLYS[i].__len__() > 0:
for poly in POLYS[i]:
if poly.get("d_id") != driver:
distance = disCalci.calcDistance(centre_end=poly.get("centre"))
if distance < 150:
print(distance)
code = poly.get("w_id") * 100 + poly.get("d_id")
if code not in needed:
needed.append(code)
POLYS[i].append(structure)
response_object['needed'] = needed
return jsonify(response_object)
@app.route('/missing', methods=['POST'])
def send_polys():
response_object = {'status': 'success'}
post_data = request.get_json()
needed = post_data.get('needed')
needed_polys = []
for code in needed:
driver = code % 100
p_id = int((code - driver)/100)
found = False
i = 0
while not found and i < 3:
for structure in POLYS[i]:
if structure.get("d_id") == driver and structure.get("w_id") == p_id:
needed_polys.append(structure)
found = True
break
i = i + 1
response_object["needed"] = needed_polys
return jsonify(response_object)
if __name__ == '__main__':
app.run(host="localhost", port=5001, debug=True)
| 31
| 87
| 0.554764
|
8bb7a2deefabfd854edc9a2906e13a0d6c0a27a3
| 7,444
|
py
|
Python
|
enemy.py
|
cxong/Slappa
|
bb601734db07ee1f1e1d3763d2c5f6146248fd76
|
[
"MIT"
] | 7
|
2015-02-24T22:24:45.000Z
|
2021-05-15T16:39:27.000Z
|
enemy.py
|
cxong/Slappa
|
bb601734db07ee1f1e1d3763d2c5f6146248fd76
|
[
"MIT"
] | null | null | null |
enemy.py
|
cxong/Slappa
|
bb601734db07ee1f1e1d3763d2c5f6146248fd76
|
[
"MIT"
] | 1
|
2016-06-22T11:50:22.000Z
|
2016-06-22T11:50:22.000Z
|
from simple_character import *
class Enemy(SimpleCharacter):
def __init__(self, game, x, y, key, players, thing_keys, things):
self.moves = True
if key == 'zombie':
self.init_zombie(game, x, y)
elif key == 'monster':
self.init_monster(game, x, y)
elif key == 'flying':
self.init_flying(game, x, y)
self.body.width = self.width * 0.5
self.body.height = self.height * 0.5
# Random behaviour
self.delay = 60
self.action = 'idle'
# Counter for throwing at a delay
self.throw_counter = 0
self.players = players
self.thing_key = random.choice(thing_keys)
self.thing_group = things
self.hit_offset = Point(0, -25)
self.sounds['swings'] = game.audio['growls']
self.sounds['hurts'] = game.audio['deaths']
self.sounds['deaths'] = game.audio['deaths']
def init_zombie(self, game, x, y):
super(Enemy, self).__init__(game, x, y, 'zombie', (64, 64))
self.body.y = -35
self.anchor.y = 0.95
width = 4
self.animations.animations['idle'] = Animation(game, [0, 1, 2, 3], 20, True)
row = 1 * width
self.animations.animations['walk'] = Animation(
game, [row + x for x in [0, 1, 2, 3]], 20, True)
row = 2 * width
self.animations.animations['hit'] = Animation(
game, [row + x for x in [1, 2, 3]], 10)
row = 3 * width
self.animations.animations['hurt'] = Animation(
game, [row + x for x in [0, 1]], 20)
self.animations.animations['die'] = Animation(
game, [row + x for x in [0, 1, 2, 3]], 5)
self.health = 2
self.speed = 0.1
self.max_speed = 0.05
def init_monster(self, game, x, y):
super(Enemy, self).__init__(game, x, y, 'monster', (64, 64))
self.body.y = -25
self.anchor.y = 0.84
width = 4
self.animations.animations['idle'] = Animation(game, [0, 1, 2, 3], 20, True)
row = width * 1
self.animations.animations['walk'] = Animation(
game, [row + x for x in [0, 1, 2, 3]], 20, True)
row = width * 4
self.animations.animations['jump'] = Animation(
game, [row + x for x in [0, 1, 2, 3, 2, 3, 2, 3]], 14)
row = width * 2
self.animations.animations['hit'] = Animation(
game, [row + x for x in [0, 1, 2, 3, 0]], 7)
row = width * 3
self.animations.animations['hurt'] = Animation(
game, [row + x for x in [0, 1]], 20)
self.animations.animations['die'] = Animation(
game, [row + x for x in [0, 1, 2, 3]], 5)
self.health = 2
self.speed = 0.1
self.max_speed = 0.05
self.gravity = 0.0003
self.jump_force = 0.3
self.moves = False
def init_flying(self, game, x, y):
super(Enemy, self).__init__(game, x, y, 'flying', (64, 64))
width = 4
self.animations.animations['idle'] = Animation(game, [0, 1, 2, 3], 12, True)
self.animations.animations['walk'] = Animation(game, [0, 1, 2, 3], 10, True)
row = width * 1
self.animations.animations['hit'] = Animation(
game, [row + x for x in [0, 1, 2, 3]], 7)
row = width * 2
self.animations.animations['hurt'] = Animation(
game, [row + x for x in [0, 1]], 20)
self.animations.animations['die'] = Animation(
game, [row + x for x in [0, 1, 2, 3]], 5)
self.health = 1
self.speed = 0.2
self.max_speed = 0.1
def update(self, time):
super(Enemy, self).update(time)
if self.throw_counter > 0 and self.health > 0:
self.throw_counter -= time
if self.throw_counter <= 0:
self.throw()
if self.y > self.game.config.FLOOR_Y:
self.y = self.game.config.FLOOR_Y
if self.is_hitting:
return
self.delay -= self.game.config.ANIM_FRAME_RATE / self.game.config.FRAME_RATE
# Perform the action
if self.action == 'idle':
# Idle; don't do anything
pass
elif self.action == 'hit':
self.hit(random.choice(['left', 'right'])) # TODO: hit player
elif self.action == 'move_left':
self.move(-1)
elif self.action == 'move_right':
self.move(1)
elif self.action == 'jump_left':
if not self.is_jumping:
self.jump()
self.move(-1)
elif self.action == 'jump_right':
if not self.is_jumping:
self.jump()
self.move(1)
if self.delay <= 0 or self.action == 'hit':
self.delay = random.randint(40, 60)
# Switch to new action
while True:
new_action = random.choice([
'idle', 'hit', 'move_left', 'move_right',
'jump_left', 'jump_right'])
# Reject impossible actions (need to place hits between idles)
if self.action != 'idle' and new_action == 'hit':
continue
if self.action == 'hit' and new_action != 'idle':
continue
# Don't do same thing twice
if self.action == new_action:
continue
# Try to move towards center of screen
if (new_action in ('move_left', 'jump_left') and
self.x < self.game.width / 4):
continue
if (new_action in ('move_right', 'jump_right') and
self.x > self.game.width * 3 / 4):
continue
# See if we can jump
if self.gravity == 0.0 and (
new_action in ('jump_left', 'jump_right')):
continue
# See if we can move
if not self.moves and (
new_action in ('move_left', 'move_right')):
continue
self.action = new_action
break
def do_hit(self, direction):
players_alive = [p for p in self.players.children if p.health > 0]
if len(players_alive) == 0:
return
super(Enemy, self).do_hit(direction)
self.throw_counter = 500
def throw(self):
players_alive = [p for p in self.players.children if p.health > 0]
if len(players_alive) == 0:
return
# Throw a thing at a player
player = random.choice(players_alive)
player_center = player.get_body_center()
# Randomly offset target
player_center.add(Point(
random.uniform(-self.game.config.ENEMY_TARGET_OFFSET,
self.game.config.ENEMY_TARGET_OFFSET),
random.uniform(-self.game.config.ENEMY_TARGET_OFFSET,
self.game.config.ENEMY_TARGET_OFFSET)))
player_center.y = min([self.game.config.FLOOR_Y + player.body.y,
player_center.y])
self.thing_group.add(Thing(self.game,
self.x + self.hit_offset.x,
self.y + self.hit_offset.y,
self.thing_key,
player_center))
| 38.371134
| 84
| 0.510075
|
aebfe2728148f79689e01069f184a23b76e6fd06
| 6,484
|
py
|
Python
|
Heat_Transfer/4.3_Pipe_with_&_out_cover.py
|
Daz-Riza-Seriog/Transport_Phenomena
|
822b89556fa56ef57494a318cbb03524e3a4d237
|
[
"MIT"
] | 4
|
2021-03-19T00:15:20.000Z
|
2021-11-17T11:32:28.000Z
|
Heat_Transfer/4.3_Pipe_with_&_out_cover.py
|
Daz-Riza-Seriog/Transport_Phenomena
|
822b89556fa56ef57494a318cbb03524e3a4d237
|
[
"MIT"
] | null | null | null |
Heat_Transfer/4.3_Pipe_with_&_out_cover.py
|
Daz-Riza-Seriog/Transport_Phenomena
|
822b89556fa56ef57494a318cbb03524e3a4d237
|
[
"MIT"
] | 1
|
2021-03-22T23:26:50.000Z
|
2021-03-22T23:26:50.000Z
|
# Code made for Sergio Andrés Díaz Ariza
# 12 Abril 2021
# License MIT
# Transport Phenomena: Python Program-Assessment 4.3
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.optimize import minimize
sns.set()
# Solve for Temperature of Steam at given Pressure
class enviroment_convective:
def temp_steam_sat_reg(self, Sat_pressure_1, Sat_pressure_2, Temp_from_pressure_1, Temp_from_pressure_2,
Sat_pressure_system):
p1 = Sat_pressure_1 # [kPa]
p2 = Sat_pressure_2 # [kPa]
T1 = Temp_from_pressure_1 + 273.15 # [K]
T2 = Temp_from_pressure_2 + 273.15 # [K]
P_x = Sat_pressure_system # [kPa]
m = (T2 - T1) / (p2 - p1)
T = m * P_x - (m * p1) + T1
return T
# Optimice for the maximum difference allow
class Optimice:
def objective_T(self, x, *args):
T_supp, r = args[0], args[1]
thk = 0.015
x1 = x[0] # C1
x2 = x[1] # C2
return T_supp - ((x1 * np.log(r + thk)) - x2)
def constraint_BC1_BC2(self, x):
r, T_in = (0.025, 484.8362745098039)
K, thk, h_in, T_out, h_out, e = (15.6, 0.015, 30, 25 + 273.15, 5, 0.3)
x1 = x[0] # C1
x2 = x[1] # C2
R_conv_1 = (1 / (2 * np.pi * (r)) * h_in)
h_comb = (2 * np.pi * (r + thk)) * (h_out + e * 5.670e-8 * (x1 * np.log(r + thk) + x2 - T_out)
* ((x1 * np.log(r + thk) + x2) ** 2 + T_out ** 2))
R_cond = np.log(thk) / (2 * np.pi * K)
return ((T_in - T_out) / (R_conv_1 + R_cond + (1 / h_comb))) + ((K * x1) / r)
def objective_T_II(self, x, *args):
T_supp, r = args[0], args[1]
x1 = x[0] # C1
x2 = x[1] # C2
return T_supp - ((x1 * np.log(r)) - x2)
def constraint_BC1_BC2_II(self, x):
r, T_in = (0.025, 484.8362745098039)
K, thk_1, h_in, T_out, h_out = (15.6, 0.015, 30, 25 + 273.15, 5)
K_2, thk_2, e = (0.25, 0.012, 0.8)
x1 = x[0] # C1
x2 = x[1] # C2
R_conv_1 = (1 / (2 * np.pi * r) * h_in)
R_cond = np.log(thk_1) / (2 * np.pi * K)
R_cond_2 = np.log(thk_2) / (2 * np.pi * K_2)
h_comb = (2 * np.pi * (r + thk_1 + thk_2)) * (
h_out + e * 5.670e-8 * (x1 * np.log(r + thk_1 + thk_2) + x2 - T_out)
* ((x1 * np.log(r + thk_1 + thk_2) + x2) ** 2 + T_out ** 2))
return ((T_in - T_out) / (R_conv_1 + R_cond + R_cond_2 + (1 / h_comb))) + ((K * x1) / r)
# Determine the Q flux with cover and without cover
class Q_determine:
def Q_uncover(self, r, T_in, K, thk, h_in, T_out, h_out, e, Delta_T):
T_surf = (T_int - Delta_T) + 273.15
R_conv_1 = (1 / (2 * np.pi * r) * h_in)
h_comb = (2 * np.pi * (r + thk)) * (h_out + e * 5.670e-8 * (T_surf - T_out)
* (T_surf ** 2 + T_out ** 2))
R_cond = np.log(thk) / (2 * np.pi * K)
Q = ((T_in - T_out) / (R_conv_1 + R_cond + (1 / h_comb)))
return Q
def Q_cover(self, r, T_in, K, K_2, thk_1, thk_2, h_in, T_out, h_out, e, Delta_T):
T_surf = (T_int - Delta_T) + 273.15
R_conv_1 = (1 / (2 * np.pi * r) * h_in)
R_cond = np.log(thk_1) / (2 * np.pi * K)
R_cond_2 = np.log(thk_2) / (2 * np.pi * K_2)
h_comb = (2 * np.pi * (r + thk_1 + thk_2)) * (
h_out + e * 5.670e-8 * (T_surf - T_out)
* (T_surf ** 2 + T_out ** 2))
Q = ((T_in - T_out) / (R_conv_1 + R_cond + R_cond_2 + (1 / h_comb)))
return Q
# Temperature of T in of the cylinder iron
class T_profile_iron:
def T_in_II(self, Q_tot, r, K, thk, T_surf_out):
R_cond = np.log(r - thk) / (2 * np.pi * K)
T_surf_in = (-Q_tot * R_cond) + T_surf_out
return T_surf_in
env_conv = enviroment_convective()
Opt = Optimice()
Q_s = Q_determine()
T_iron = T_profile_iron()
T_int = env_conv.temp_steam_sat_reg(1553, 2318, 200, 220, 2000)
constraint_equal1 = {'type': 'eq', 'fun': Opt.constraint_BC1_BC2}
constraint = [constraint_equal1]
# T_suppose, Radius_max, T_in
arguments = (T_int, 0.025)
x0 = [0, 0] # This initial values are extracted from a first solution given by the method
sol = minimize(Opt.objective_T, x0, method='SLSQP', args=arguments, constraints=constraint, options={'maxiter': 5})
# BIG NOTE: modify the iteration to reach values according to reality--> You need more restrictions
# In the result you find the maximum difference that the system reach between the suppose and the reality
Q_1 = Q_s.Q_uncover(0.025, T_int, 15.6, 0.015, 30, 25 + 273.15, 5, 0.3, sol.fun)
T_in_iron = T_iron.T_in_II(Q_1, 0.025, 30, 0.015, (T_int - sol.fun) + 273.15)
########################################### Case 2 #####################################################################
constraint_equal1_II = {'type': 'eq', 'fun': Opt.constraint_BC1_BC2_II}
constraint_II = [constraint_equal1_II]
# T_suppose, Radius_max
arguments_II = (T_int, 0.025 + 0.015 + 0.012)
x0 = [0, 0] # This initial values are extracted from a first solution given by the method
sol_II = minimize(Opt.objective_T, x0, method='SLSQP', args=arguments_II, constraints=constraint_II,
options={'maxiter': 5})
# BIG NOTE: modify the iteration to reach values according to reality--> You need more restrictions
# In the result you find the maximum difference that the system reach between the suppose and the reality
Q_2 = Q_s.Q_cover(0.025, T_int, 15.6, 0.25, 0.015, 0.012, 30, 25 + 273.15, 5, 0.3, sol_II.fun)
print("========================= WITH UNCOVER ==============================================\n")
print("Temperature in the convective enviro. 1: {} [K]".format(T_int))
print("Temperature at the start of the cylinder: {} [K]".format(T_in_iron))
print("Temperature at the end of the cylinder: {} [K]".format((T_int - sol.fun) + 273.15))
print("Q for meter of cylinder: {} [W/m]\n".format(Q_1))
print("================================================================================")
print("========================= WITH COVER ==============================================\n")
print("Temperature in the convective enviro. 1: {} [K]".format(T_int))
print("Temperature at the end of the cylinder: {} [K]".format((T_int - sol_II.fun) + 273.15))
print("Q for meter of cylinder: {} [W/m]\n".format(Q_2))
print("================================================================================\n")
| 43.516779
| 120
| 0.543492
|
5fe2d14815e4a00c8ea1ad59d3c645f16e53b4ef
| 30,532
|
py
|
Python
|
spyder/widgets/fileswitcher.py
|
sharmalabs/spyder
|
7d2201699b6df6d4e72c73379dca0510f643bef9
|
[
"MIT"
] | null | null | null |
spyder/widgets/fileswitcher.py
|
sharmalabs/spyder
|
7d2201699b6df6d4e72c73379dca0510f643bef9
|
[
"MIT"
] | null | null | null |
spyder/widgets/fileswitcher.py
|
sharmalabs/spyder
|
7d2201699b6df6d4e72c73379dca0510f643bef9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# Standard library imports
from __future__ import print_function
import os
import os.path as osp
# Third party imports
from qtpy.QtCore import Signal, QEvent, QObject, QRegExp, QSize, Qt
from qtpy.QtGui import (QIcon, QRegExpValidator, QTextCursor)
from qtpy.QtWidgets import (QDialog, QHBoxLayout, QLabel, QLineEdit,
QListWidget, QListWidgetItem, QVBoxLayout,
QMainWindow)
# Local imports
from spyder.config.base import _
from spyder.py3compat import iteritems, to_text_string
from spyder.utils import icon_manager as ima
from spyder.utils.stringmatching import get_search_scores
from spyder.widgets.helperwidgets import HelperToolButton, HTMLDelegate
# --- Python Outline explorer helpers
def process_python_symbol_data(oedata):
"""Returns a list with line number, definition name, fold and token."""
symbol_list = []
for key in oedata:
val = oedata[key]
if val and key != 'found_cell_separators':
if val.is_class_or_function():
symbol_list.append((key, val.def_name, val.fold_level,
val.get_token()))
return sorted(symbol_list)
def get_python_symbol_icons(oedata):
"""Return a list of icons for oedata of a python file."""
class_icon = ima.icon('class')
method_icon = ima.icon('method')
function_icon = ima.icon('function')
private_icon = ima.icon('private1')
super_private_icon = ima.icon('private2')
symbols = process_python_symbol_data(oedata)
# line - 1, name, fold level
fold_levels = sorted(list(set([s[2] for s in symbols])))
parents = [None]*len(symbols)
icons = [None]*len(symbols)
indexes = []
parent = None
for level in fold_levels:
for index, item in enumerate(symbols):
line, name, fold_level, token = item
if index in indexes:
continue
if fold_level == level:
indexes.append(index)
parent = item
else:
parents[index] = parent
for index, item in enumerate(symbols):
parent = parents[index]
if item[-1] == 'def':
icons[index] = function_icon
elif item[-1] == 'class':
icons[index] = class_icon
else:
icons[index] = QIcon()
if parent is not None:
if parent[-1] == 'class':
if item[-1] == 'def' and item[1].startswith('__'):
icons[index] = super_private_icon
elif item[-1] == 'def' and item[1].startswith('_'):
icons[index] = private_icon
else:
icons[index] = method_icon
return icons
def shorten_paths(path_list, is_unsaved):
"""
Takes a list of paths and tries to "intelligently" shorten them all. The
aim is to make it clear to the user where the paths differ, as that is
likely what they care about. Note that this operates on a list of paths
not on individual paths.
If the path ends in an actual file name, it will be trimmed off.
"""
# TODO: at the end, if the path is too long, should do a more dumb kind of
# shortening, but not completely dumb.
# Convert the path strings to a list of tokens and start building the
# new_path using the drive
path_list = path_list[:] # Make a local copy
new_path_list = []
for ii, (path, is_unsav) in enumerate(zip(path_list, is_unsaved)):
if is_unsav:
new_path_list.append(_('unsaved file'))
path_list[ii] = None
else:
drive, path = osp.splitdrive(osp.dirname(path))
new_path_list.append(drive + osp.sep)
path_list[ii] = [part for part in path.split(osp.sep) if part]
def recurse_level(level_idx):
sep = os.sep
# If toks are all empty we need not have recursed here
if not any(level_idx.values()):
return
# Firstly, find the longest common prefix for all in the level
# s = len of longest common prefix
sample_toks = list(level_idx.values())[0]
if not sample_toks:
s = 0
else:
for s, sample_val in enumerate(sample_toks):
if not all(len(toks) > s and toks[s] == sample_val
for toks in level_idx.values()):
break
# Shorten longest common prefix
if s == 0:
short_form = ''
else:
if s == 1:
short_form = sample_toks[0]
elif s == 2:
short_form = sample_toks[0] + sep + sample_toks[1]
else:
short_form = "..." + sep + sample_toks[s-1]
for idx in level_idx:
new_path_list[idx] += short_form + sep
level_idx[idx] = level_idx[idx][s:]
# Group the remaining bit after the common prefix, shorten, and recurse
while level_idx:
k, group = 0, level_idx # k is length of the group's common prefix
while True:
# Abort if we've gone beyond end of one or more in the group
prospective_group = {idx: toks for idx, toks
in group.items() if len(toks) == k}
if prospective_group:
if k == 0: # we spit out the group with no suffix
group = prospective_group
break
# Only keep going if all n still match on the kth token
_, sample_toks = next(iteritems(group))
prospective_group = {idx: toks for idx, toks
in group.items()
if toks[k] == sample_toks[k]}
if len(prospective_group) == len(group) or k == 0:
group = prospective_group
k += 1
else:
break
_, sample_toks = next(iteritems(group))
if k == 0:
short_form = ''
elif k == 1:
short_form = sample_toks[0]
elif k == 2:
short_form = sample_toks[0] + sep + sample_toks[1]
else: # k > 2
short_form = sample_toks[0] + "..." + sep + sample_toks[k-1]
for idx in group.keys():
new_path_list[idx] += short_form + (sep if k > 0 else '')
del level_idx[idx]
recurse_level({idx: toks[k:] for idx, toks in group.items()})
recurse_level({i: pl for i, pl in enumerate(path_list) if pl})
return [path.rstrip(os.sep) for path in new_path_list]
class KeyPressFilter(QObject):
"""Use with `installEventFilter` to get up/down arrow key press signal."""
UP, DOWN = [-1, 1] # Step constants
sig_up_key_pressed = Signal()
sig_down_key_pressed = Signal()
def eventFilter(self, src, e):
if e.type() == QEvent.KeyPress:
if e.key() == Qt.Key_Up:
self.sig_up_key_pressed.emit()
elif e.key() == Qt.Key_Down:
self.sig_down_key_pressed.emit()
return super(KeyPressFilter, self).eventFilter(src, e)
class FilesFilterLine(QLineEdit):
"""QLineEdit used to filter files by name."""
# User has not clicked outside this widget
clicked_outside = False
def focusOutEvent(self, event):
"""
Detect when the focus goes out of this widget.
This is used to make the file switcher leave focus on the
last selected file by the user.
"""
self.clicked_outside = True
return super(QLineEdit, self).focusOutEvent(event)
class FileSwitcher(QDialog):
"""A Sublime-like file switcher."""
sig_goto_file = Signal(int, object)
# Constants that define the mode in which the list widget is working
# FILE_MODE is for a list of files, SYMBOL_MODE if for a list of symbols
# in a given file when using the '@' symbol.
FILE_MODE, SYMBOL_MODE = [1, 2]
MAX_WIDTH = 600
def __init__(self, parent, plugin, tabs, data, icon):
QDialog.__init__(self, parent)
# Variables
self.plugins_tabs = []
self.plugins_data = []
self.plugins_instances = []
self.add_plugin(plugin, tabs, data, icon)
self.plugin = None # Last plugin with focus
self.mode = self.FILE_MODE # By default start in this mode
self.initial_cursors = None # {fullpath: QCursor}
self.initial_path = None # Fullpath of initial active editor
self.initial_widget = None # Initial active editor
self.line_number = None # Selected line number in filer
self.is_visible = False # Is the switcher visible?
help_text = _("Press <b>Enter</b> to switch files or <b>Esc</b> to "
"cancel.<br><br>Type to filter filenames.<br><br>"
"Use <b>:number</b> to go to a line, e.g. "
"<b><code>main:42</code></b><br>"
"Use <b>@symbol_text</b> to go to a symbol, e.g. "
"<b><code>@init</code></b>"
"<br><br> Press <b>Ctrl+W</b> to close current tab.<br>")
# Either allow searching for a line number or a symbol but not both
regex = QRegExp("([A-Za-z0-9_]{0,100}@[A-Za-z0-9_]{0,100})|" +
"([A-Za-z0-9_]{0,100}:{0,1}[0-9]{0,100})")
# Widgets
self.edit = FilesFilterLine(self)
self.help = HelperToolButton()
self.list = QListWidget(self)
self.filter = KeyPressFilter()
regex_validator = QRegExpValidator(regex, self.edit)
# Widgets setup
self.setWindowFlags(Qt.Popup | Qt.FramelessWindowHint)
self.setWindowOpacity(0.95)
self.edit.installEventFilter(self.filter)
self.edit.setValidator(regex_validator)
self.help.setToolTip(help_text)
self.list.setItemDelegate(HTMLDelegate(self))
# Layout
edit_layout = QHBoxLayout()
edit_layout.addWidget(self.edit)
edit_layout.addWidget(self.help)
layout = QVBoxLayout()
layout.addLayout(edit_layout)
layout.addWidget(self.list)
self.setLayout(layout)
# Signals
self.rejected.connect(self.restore_initial_state)
self.filter.sig_up_key_pressed.connect(self.previous_row)
self.filter.sig_down_key_pressed.connect(self.next_row)
self.edit.returnPressed.connect(self.accept)
self.edit.textChanged.connect(self.setup)
self.list.itemSelectionChanged.connect(self.item_selection_changed)
self.list.clicked.connect(self.edit.setFocus)
# --- Properties
@property
def widgets(self):
widgets = []
for plugin in self.plugins_instances:
tabs = self.get_plugin_tabwidget(plugin)
widgets += [(tabs.widget(index), plugin) for
index in range(tabs.count())]
return widgets
@property
def line_count(self):
line_count = []
for widget in self.widgets:
try:
current_line_count = widget[0].get_line_count()
except AttributeError:
current_line_count = 0
line_count.append(current_line_count)
return line_count
@property
def save_status(self):
save_status = []
for da, icon in self.plugins_data:
save_status += [getattr(td, 'newly_created', False) for td in da]
return save_status
@property
def paths(self):
paths = []
for plugin in self.plugins_instances:
da = self.get_plugin_data(plugin)
paths += [getattr(td, 'filename', None) for td in da]
return paths
@property
def filenames(self):
filenames = []
for plugin in self.plugins_instances:
da = self.get_plugin_data(plugin)
filenames += [os.path.basename(getattr(td,
'filename',
None)) for td in da]
return filenames
@property
def icons(self):
icons = []
for da, icon in self.plugins_data:
icons += [icon for td in da]
return icons
@property
def current_path(self):
return self.paths_by_widget.get(self.get_widget(), None)
@property
def paths_by_widget(self):
widgets = [w[0] for w in self.widgets]
return dict(zip(widgets, self.paths))
@property
def widgets_by_path(self):
widgets = [w[0] for w in self.widgets]
return dict(zip(self.paths, widgets))
@property
def filter_text(self):
"""Get the normalized (lowecase) content of the filter text."""
return to_text_string(self.edit.text()).lower()
def set_search_text(self, _str):
self.edit.setText(_str)
def save_initial_state(self):
"""Save initial cursors and initial active widget."""
paths = self.paths
self.initial_widget = self.get_widget()
self.initial_cursors = {}
for i, editor in enumerate(self.widgets):
if editor is self.initial_widget:
self.initial_path = paths[i]
# This try is needed to make the fileswitcher work with
# plugins that does not have a textCursor.
try:
self.initial_cursors[paths[i]] = editor.textCursor()
except AttributeError:
pass
def accept(self):
self.is_visible = False
QDialog.accept(self)
self.list.clear()
def restore_initial_state(self):
"""Restores initial cursors and initial active editor."""
self.list.clear()
self.is_visible = False
widgets = self.widgets_by_path
if not self.edit.clicked_outside:
for path in self.initial_cursors:
cursor = self.initial_cursors[path]
if path in widgets:
self.set_editor_cursor(widgets[path], cursor)
if self.initial_widget in self.paths_by_widget:
index = self.paths.index(self.initial_path)
self.sig_goto_file.emit(index)
def set_dialog_position(self):
"""Positions the file switcher dialog."""
parent = self.parent()
geo = parent.geometry()
width = self.list.width() # This has been set in setup
left = parent.geometry().width()/2 - width/2
# Note: the +1 pixel on the top makes it look better
if isinstance(parent, QMainWindow):
top = (parent.toolbars_menu.geometry().height() +
parent.menuBar().geometry().height() + 1)
else:
top = self.plugins_tabs[0][0].tabBar().geometry().height() + 1
while parent:
geo = parent.geometry()
top += geo.top()
left += geo.left()
parent = parent.parent()
self.move(left, top)
def get_item_size(self, content):
"""
Get the max size (width and height) for the elements of a list of
strings as a QLabel.
"""
strings = []
if content:
for rich_text in content:
label = QLabel(rich_text)
label.setTextFormat(Qt.PlainText)
strings.append(label.text())
fm = label.fontMetrics()
return (max([fm.width(s) * 1.3 for s in strings]), fm.height())
def fix_size(self, content):
"""
Adjusts the width and height of the file switcher
based on the relative size of the parent and content.
"""
# Update size of dialog based on relative size of the parent
if content:
width, height = self.get_item_size(content)
# Width
parent = self.parent()
relative_width = parent.geometry().width() * 0.65
if relative_width > self.MAX_WIDTH:
relative_width = self.MAX_WIDTH
self.list.setMinimumWidth(relative_width)
# Height
if len(content) < 8:
max_entries = len(content)
else:
max_entries = 8
max_height = height * max_entries * 2.5
self.list.setMinimumHeight(max_height)
# Resize
self.list.resize(relative_width, self.list.height())
# --- Helper methods: List widget
def count(self):
"""Gets the item count in the list widget."""
return self.list.count()
def current_row(self):
"""Returns the current selected row in the list widget."""
return self.list.currentRow()
def set_current_row(self, row):
"""Sets the current selected row in the list widget."""
return self.list.setCurrentRow(row)
def select_row(self, steps):
"""Select row in list widget based on a number of steps with direction.
Steps can be positive (next rows) or negative (previous rows).
"""
row = self.current_row() + steps
if 0 <= row < self.count():
self.set_current_row(row)
def previous_row(self):
"""Select previous row in list widget."""
if self.mode == self.SYMBOL_MODE:
self.select_row(-1)
return
prev_row = self.current_row() - 1
if prev_row >= 0:
title = self.list.item(prev_row).text()
else:
title = ''
if prev_row == 0 and '</b></big><br>' in title:
self.list.scrollToTop()
elif '</b></big><br>' in title:
# Select the next previous row, the one following is a title
self.select_row(-2)
else:
self.select_row(-1)
def next_row(self):
"""Select next row in list widget."""
if self.mode == self.SYMBOL_MODE:
self.select_row(+1)
return
next_row = self.current_row() + 1
if next_row < self.count():
if '</b></big><br>' in self.list.item(next_row).text():
# Select the next next row, the one following is a title
self.select_row(+2)
else:
self.select_row(+1)
def get_stack_index(self, stack_index, plugin_index):
"""Get the real index of the selected item."""
other_plugins_count = sum([other_tabs[0].count() \
for other_tabs in \
self.plugins_tabs[:plugin_index]])
real_index = stack_index - other_plugins_count
return real_index
# --- Helper methods: Widget
def get_plugin_data(self, plugin):
"""Get the data object of the plugin's current tab manager."""
# The data object is named "data" in the editor plugin while it is
# named "clients" in the notebook plugin.
try:
data = plugin.get_current_tab_manager().data
except AttributeError:
data = plugin.get_current_tab_manager().clients
return data
def get_plugin_tabwidget(self, plugin):
"""Get the tabwidget of the plugin's current tab manager."""
# The tab widget is named "tabs" in the editor plugin while it is
# named "tabwidget" in the notebook plugin.
try:
tabwidget = plugin.get_current_tab_manager().tabs
except AttributeError:
tabwidget = plugin.get_current_tab_manager().tabwidget
return tabwidget
def get_widget(self, index=None, path=None, tabs=None):
"""Get widget by index.
If no tabs and index specified the current active widget is returned.
"""
if (index and tabs) or (path and tabs):
return tabs.widget(index)
elif self.plugin:
return self.get_plugin_tabwidget(self.plugin).currentWidget()
else:
return self.plugins_tabs[0][0].currentWidget()
def set_editor_cursor(self, editor, cursor):
"""Set the cursor of an editor."""
pos = cursor.position()
anchor = cursor.anchor()
new_cursor = QTextCursor()
if pos == anchor:
new_cursor.movePosition(pos)
else:
new_cursor.movePosition(anchor)
new_cursor.movePosition(pos, QTextCursor.KeepAnchor)
editor.setTextCursor(cursor)
def goto_line(self, line_number):
"""Go to specified line number in current active editor."""
if line_number:
line_number = int(line_number)
try:
self.plugin.go_to_line(line_number)
except AttributeError:
pass
# --- Helper methods: Outline explorer
def get_symbol_list(self):
"""Get the list of symbols present in the file."""
try:
oedata = self.get_widget().get_outlineexplorer_data()
except AttributeError:
oedata = {}
return oedata
# --- Handlers
def item_selection_changed(self):
"""List widget item selection change handler."""
row = self.current_row()
if self.count() and row >= 0:
if '</b></big><br>' in self.list.currentItem().text() and row == 0:
self.next_row()
if self.mode == self.FILE_MODE:
try:
stack_index = self.paths.index(self.filtered_path[row])
self.plugin = self.widgets[stack_index][1]
plugin_index = self.plugins_instances.index(self.plugin)
# Count the real index in the tabWidget of the
# current plugin
real_index = self.get_stack_index(stack_index,
plugin_index)
self.sig_goto_file.emit(real_index,
self.plugin.get_current_tab_manager())
self.goto_line(self.line_number)
try:
self.plugin.switch_to_plugin()
self.raise_()
except AttributeError:
# The widget using the fileswitcher is not a plugin
pass
self.edit.setFocus()
except ValueError:
pass
else:
line_number = self.filtered_symbol_lines[row]
self.goto_line(line_number)
def setup_file_list(self, filter_text, current_path):
"""Setup list widget content for file list display."""
short_paths = shorten_paths(self.paths, self.save_status)
paths = self.paths
icons = self.icons
results = []
trying_for_line_number = ':' in filter_text
# Get optional line number
if trying_for_line_number:
filter_text, line_number = filter_text.split(':')
if line_number == '':
line_number = None
# Get all the available filenames
scores = get_search_scores('', self.filenames,
template="<b>{0}</b>")
else:
line_number = None
# Get all available filenames and get the scores for
# "fuzzy" matching
scores = get_search_scores(filter_text, self.filenames,
template="<b>{0}</b>")
# Get max width to determine if shortpaths should be used
max_width = self.get_item_size(paths)[0]
self.fix_size(paths)
# Build the text that will appear on the list widget
for index, score in enumerate(scores):
text, rich_text, score_value = score
if score_value != -1:
text_item = "<big style='color:{0:}'>{1:}</big>".format(
ima.MAIN_FG_COLOR, rich_text.replace('&', ''))
if trying_for_line_number:
text_item += " [{0:} {1:}]".format(self.line_count[index],
_("lines"))
if max_width > self.list.width():
text_item += u"<br><i style='color:{0:}'>{1:}</i>".format(
ima.MAIN_FG_COLOR, short_paths[index])
else:
text_item += u"<br><i style='color:{0:}'>{1:}</i>".format(
ima.MAIN_FG_COLOR, paths[index])
if (trying_for_line_number and self.line_count[index] != 0 or
not trying_for_line_number):
results.append((score_value, index, text_item))
# Sort the obtained scores and populate the list widget
self.filtered_path = []
plugin = None
for result in sorted(results):
index = result[1]
path = paths[index]
icon = icons[index]
text = ''
try:
title = self.widgets[index][1].get_plugin_title().split(' - ')
if plugin != title[0]:
plugin = title[0]
text += ("<br><big style='color:{0:}'>"
"<b>{1:}</b></big><br>").format(ima.MAIN_FG_COLOR,
plugin)
item = QListWidgetItem(text)
item.setToolTip(path)
item.setSizeHint(QSize(0, 25))
item.setFlags(Qt.ItemIsEditable)
self.list.addItem(item)
self.filtered_path.append(path)
except:
# The widget using the fileswitcher is not a plugin
pass
text = ''
text += result[-1]
item = QListWidgetItem(icon, text)
item.setToolTip(path)
item.setSizeHint(QSize(0, 25))
self.list.addItem(item)
self.filtered_path.append(path)
# To adjust the delegate layout for KDE themes
self.list.files_list = True
# Move selected item in list accordingly and update list size
if current_path in self.filtered_path:
self.set_current_row(self.filtered_path.index(current_path))
elif self.filtered_path:
self.set_current_row(0)
# If a line number is searched look for it
self.line_number = line_number
self.goto_line(line_number)
def setup_symbol_list(self, filter_text, current_path):
"""Setup list widget content for symbol list display."""
# Get optional symbol name
filter_text, symbol_text = filter_text.split('@')
# Fetch the Outline explorer data, get the icons and values
oedata = self.get_symbol_list()
icons = get_python_symbol_icons(oedata)
# The list of paths here is needed in order to have the same
# point of measurement for the list widget size as in the file list
# See issue 4648
paths = self.paths
# Update list size
self.fix_size(paths)
symbol_list = process_python_symbol_data(oedata)
line_fold_token = [(item[0], item[2], item[3]) for item in symbol_list]
choices = [item[1] for item in symbol_list]
scores = get_search_scores(symbol_text, choices, template="<b>{0}</b>")
# Build the text that will appear on the list widget
results = []
lines = []
self.filtered_symbol_lines = []
for index, score in enumerate(scores):
text, rich_text, score_value = score
line, fold_level, token = line_fold_token[index]
lines.append(text)
if score_value != -1:
results.append((score_value, line, text, rich_text,
fold_level, icons[index], token))
template = '{{0}}<span style="color:{0}">{{1}}</span>'.format(
ima.MAIN_FG_COLOR)
for (score, line, text, rich_text, fold_level, icon,
token) in sorted(results):
fold_space = ' '*(fold_level)
line_number = line + 1
self.filtered_symbol_lines.append(line_number)
textline = template.format(fold_space, rich_text)
item = QListWidgetItem(icon, textline)
item.setSizeHint(QSize(0, 16))
self.list.addItem(item)
# To adjust the delegate layout for KDE themes
self.list.files_list = False
# Select edit line when using symbol search initially.
# See issue 5661
self.edit.setFocus()
# Move selected item in list accordingly
# NOTE: Doing this is causing two problems:
# 1. It makes the cursor to auto-jump to the last selected
# symbol after opening or closing a different file
# 2. It moves the cursor to the first symbol by default,
# which is very distracting.
# That's why this line is commented!
# self.set_current_row(0)
def setup(self):
"""Setup list widget content."""
if len(self.plugins_tabs) == 0:
self.close()
return
self.list.clear()
current_path = self.current_path
filter_text = self.filter_text
# Get optional line or symbol to define mode and method handler
trying_for_symbol = ('@' in self.filter_text)
if trying_for_symbol:
self.mode = self.SYMBOL_MODE
self.setup_symbol_list(filter_text, current_path)
else:
self.mode = self.FILE_MODE
self.setup_file_list(filter_text, current_path)
# Set position according to size
self.set_dialog_position()
def show(self):
"""
Override Qt method to force an update of the fileswitcher before
showing it. See Issue #5317 and PR #5389.
"""
self.setup()
super(FileSwitcher, self).show()
def add_plugin(self, plugin, tabs, data, icon):
"""Add a plugin to display its files."""
self.plugins_tabs.append((tabs, plugin))
self.plugins_data.append((data, icon))
self.plugins_instances.append(plugin)
| 37.325183
| 82
| 0.566127
|
bbb0af8e20a48242e7cfbe7031a3b7969faa2459
| 5,339
|
py
|
Python
|
twisted/cred/portal.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 7
|
2015-04-28T13:26:11.000Z
|
2020-02-09T17:01:04.000Z
|
twisted/cred/portal.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 4
|
2017-02-19T23:58:13.000Z
|
2019-11-01T15:31:22.000Z
|
twisted/cred/portal.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 6
|
2017-02-13T09:11:02.000Z
|
2021-06-29T11:22:18.000Z
|
# -*- test-case-name: twisted.test.test_newcred -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The point of integration of application and authentication.
"""
from twisted.internet import defer
from twisted.internet.defer import maybeDeferred
from twisted.python import failure, reflect
from twisted.cred import error
from zope.interface import providedBy, Interface
class IRealm(Interface):
"""
The realm connects application-specific objects to the
authentication system.
"""
def requestAvatar(avatarId, mind, *interfaces):
"""
Return avatar which provides one of the given interfaces.
@param avatarId: a string that identifies an avatar, as returned by
L{ICredentialsChecker.requestAvatarId<twisted.cred.checkers.ICredentialsChecker.requestAvatarId>}
(via a Deferred). Alternatively, it may be
C{twisted.cred.checkers.ANONYMOUS}.
@param mind: usually None. See the description of mind in
L{Portal.login}.
@param interfaces: the interface(s) the returned avatar should
implement, e.g. C{IMailAccount}. See the description of
L{Portal.login}.
@returns: a deferred which will fire a tuple of (interface,
avatarAspect, logout), or the tuple itself. The interface will be
one of the interfaces passed in the 'interfaces' argument. The
'avatarAspect' will implement that interface. The 'logout' object
is a callable which will detach the mind from the avatar.
"""
class Portal:
"""
A mediator between clients and a realm.
A portal is associated with one Realm and zero or more credentials checkers.
When a login is attempted, the portal finds the appropriate credentials
checker for the credentials given, invokes it, and if the credentials are
valid, retrieves the appropriate avatar from the Realm.
This class is not intended to be subclassed. Customization should be done
in the realm object and in the credentials checker objects.
"""
def __init__(self, realm, checkers=()):
"""
Create a Portal to a L{IRealm}.
"""
self.realm = realm
self.checkers = {}
for checker in checkers:
self.registerChecker(checker)
def listCredentialsInterfaces(self):
"""
Return list of credentials interfaces that can be used to login.
"""
return self.checkers.keys()
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers[credentialInterface] = checker
def login(self, credentials, mind, *interfaces):
"""
@param credentials: an implementor of
L{twisted.cred.credentials.ICredentials}
@param mind: an object which implements a client-side interface for
your particular realm. In many cases, this may be None, so if the
word 'mind' confuses you, just ignore it.
@param interfaces: list of interfaces for the perspective that the mind
wishes to attach to. Usually, this will be only one interface, for
example IMailAccount. For highly dynamic protocols, however, this
may be a list like (IMailAccount, IUserChooser, IServiceInfo). To
expand: if we are speaking to the system over IMAP, any information
that will be relayed to the user MUST be returned as an
IMailAccount implementor; IMAP clients would not be able to
understand anything else. Any information about unusual status
would have to be relayed as a single mail message in an
otherwise-empty mailbox. However, in a web-based mail system, or a
PB-based client, the ``mind'' object inside the web server
(implemented with a dynamic page-viewing mechanism such as a
Twisted Web Resource) or on the user's client program may be
intelligent enough to respond to several ``server''-side
interfaces.
@return: A deferred which will fire a tuple of (interface,
avatarAspect, logout). The interface will be one of the interfaces
passed in the 'interfaces' argument. The 'avatarAspect' will
implement that interface. The 'logout' object is a callable which
will detach the mind from the avatar. It must be called when the
user has conceptually disconnected from the service. Although in
some cases this will not be in connectionLost (such as in a
web-based session), it will always be at the end of a user's
interactive session.
"""
for i in self.checkers:
if i.providedBy(credentials):
return maybeDeferred(self.checkers[i].requestAvatarId, credentials
).addCallback(self.realm.requestAvatar, mind, *interfaces
)
ifac = providedBy(credentials)
return defer.fail(failure.Failure(error.UnhandledCredentials(
"No checker for %s" % ', '.join(map(reflect.qual, ifac)))))
| 43.762295
| 109
| 0.664731
|
8547d70127c1b02e8ff2087b2c3735f6be111b0c
| 123
|
py
|
Python
|
examples/docker/app/metrics/testmetric.py
|
defreng/python-pagr
|
352623d8c1a5b8d59f9c223c4f6cecc7a585bf9a
|
[
"MIT"
] | null | null | null |
examples/docker/app/metrics/testmetric.py
|
defreng/python-pagr
|
352623d8c1a5b8d59f9c223c4f6cecc7a585bf9a
|
[
"MIT"
] | null | null | null |
examples/docker/app/metrics/testmetric.py
|
defreng/python-pagr
|
352623d8c1a5b8d59f9c223c4f6cecc7a585bf9a
|
[
"MIT"
] | null | null | null |
class TestMetric:
def __init__(self, services):
pass
def run(self):
print('Executing Testmetric')
| 17.571429
| 37
| 0.617886
|
9b33a3d47c8155ed85a6b9762a0303551ba5a627
| 2,854
|
py
|
Python
|
scripts/RunCosmicShear.py
|
lshuns/CosmicShearRB
|
84d682fc09dc8be0e12b82894cfb2c2c272b616b
|
[
"MIT"
] | null | null | null |
scripts/RunCosmicShear.py
|
lshuns/CosmicShearRB
|
84d682fc09dc8be0e12b82894cfb2c2c272b616b
|
[
"MIT"
] | null | null | null |
scripts/RunCosmicShear.py
|
lshuns/CosmicShearRB
|
84d682fc09dc8be0e12b82894cfb2c2c272b616b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 15:49:41 2020
@author: ssli
Running the module of cosmic shear signal prediction
Please modify necessary configurations in Cosmo/cosmic_shear_signal/input
"""
import time
import os
import sys
# Self-defined package
sys.path.insert(0,os.path.realpath('../Cosmo/cosmic_shear_signal'))
import CosmicShear, initialise
# +++++++++++++++++++ General setting
# parameter files
name_param_files = ['kv450_cf_best.param', 'kv450_cf_Planck.param']
# Set the output folder
out_folder = 'theory_vector'
out_suffix_s = ['KV450_best', 'Planck']
# path for all necessary input
paths = {}
# cosmological code path (CLASS)
paths['cosmo'] = '/net/eemmeer/data1/ssli/class_public'
# father path for all the input and output
paths['data'] = '/disks/shear15/ssli/CosmicShear'
# parameter/configure file path
paths['param'] = '/net/raam/data1/surfdrive_ssli/Projects/6CosmicShear_RB/CosmicShearRB/Cosmo/cosmic_shear_signal/input'
# +++++++++++++++++++ Running scripts
Start = time.time()
# Initialisation
# class: data and cosmo created
cosmo, data = initialise.initialise(paths)
for i in range(len(name_param_files)):
# parameter file (with cosmological and nuisance parameters)
name_param_file = name_param_files[i]
data.read_file(name_param_file, 'data', field='', separate=False)
# output folder
data.conf['out_folder'] = out_folder
data.conf['out_suffix'] = out_suffix_s[i]
# # ++++++++++++++++++++++++++++++++++++++++++ whole
# # name of configure files
# name_conf_file = 'kv450_cf.conf'
# # data filled with input files
# # configure file (with configure and hardly changed setting parameters)
# data.read_file(name_conf_file, 'data', field='', separate=False)
# # cosmic shear signal calculation
# CosmicShear.CSsignalFunc(data, cosmo, save_theory_vector=True)
# ++++++++++++++++++++++++++++++++++++++++++ red
# name of parameter/configure files
name_conf_file = 'kv450_cf_red.conf'
# data filled with input files
# configure file (with configure and hardly changed setting parameters)
data.read_file(name_conf_file, 'data', field='', separate=False)
# cosmic shear signal calculation
CosmicShear.CSsignalFunc(data, cosmo, save_theory_vector=True)
# ++++++++++++++++++++++++++++++++++++++++++ blue
# name of parameter/configure files
name_conf_file = 'kv450_cf_blue.conf'
# data filled with input files
# configure file (with configure and hardly changed setting parameters)
data.read_file(name_conf_file, 'data', field='', separate=False)
# cosmic shear signal calculation
CosmicShear.CSsignalFunc(data, cosmo, save_theory_vector=True)
print("All finished in", time.time()-Start)
# eemmeer (2020-04-23)
# ('All finished in', 23.075681924819946)
| 30.688172
| 121
| 0.689208
|
d1976f4e8d45b6060179a89540c740f71dc82f0e
| 2,039
|
py
|
Python
|
easyadmin/pages/admin_table.py
|
codemation/easyadmin
|
0f305a92174a151fe08b2c8db4cf893a34055407
|
[
"MIT"
] | 6
|
2021-03-30T01:28:15.000Z
|
2021-12-18T00:51:14.000Z
|
easyadmin/pages/admin_table.py
|
codemation/easyadmin
|
0f305a92174a151fe08b2c8db4cf893a34055407
|
[
"MIT"
] | 1
|
2021-05-24T01:10:35.000Z
|
2021-05-24T10:17:13.000Z
|
easyadmin/pages/admin_table.py
|
codemation/easyadmin
|
0f305a92174a151fe08b2c8db4cf893a34055407
|
[
"MIT"
] | null | null | null |
from .admin import get_admin_page
def get_table_body(
name: str,
table_headers_html: str,
table_rows_html: str,
above: str = '',
below: str = '',
):
return f"""
<!-- Page Heading -->
{above}
<!-- DataTales Example -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">{name}</h6>
</div>
<div class="card-body">
<div class="table-responsive">
<table class="table table-bordered" id="dataTable" width="100%" cellspacing="0">
<thead>
<tr>
{table_headers_html}
</tr>
</thead>
<tfoot>
<tr>
{table_headers_html}
</tr>
</tfoot>
<tbody>
{table_rows_html}
</tbody>
</table>
</div>
</div>
</div>
{below}
"""
def get_table_page(
name: str,
data: list,
sidebar: str,
current_user: str,
modals: str = "",
above: str = "",
below: str = "",
root_path: str = '/',
google: str = '',
):
navbar = sidebar
table_headers = [th for th in data[0]]
table_headers_html = ''.join(
[f'<th>{th}</th>\n' for th in table_headers]
)
table_rows = [list(tr.values()) for tr in data]
table_rows_html = []
for tr in table_rows:
row = [f"<td>{td}</td>\n" for td in tr]
row_html = ''.join(row)
table_rows_html.append(f"""
<tr>\n
{row_html}
</tr>
""")
table_rows_html = ''.join(table_rows_html)
admin_page = get_admin_page(
name=name,
sidebar=sidebar,
body=get_table_body(
name,
table_headers_html,
table_rows_html,
above=above,
below=below
),
current_user=current_user,
modals=modals,
root_path=root_path,
google=google
)
return admin_page
| 23.709302
| 92
| 0.491908
|
aad9b3c42f839efa859efeda30f87ff4a9b7ad7a
| 124
|
py
|
Python
|
crash_course/ch02/exec/name_cases.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
crash_course/ch02/exec/name_cases.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
crash_course/ch02/exec/name_cases.py
|
dantin/python-by-example
|
5769c7a332ebd60fd54e477b6813f2f2a0f3f37f
|
[
"BSD-3-Clause"
] | null | null | null |
name = 'david ding'
print('lowercase:', name.lower())
print('uppercase:', name.upper())
print('titlecase:', name.title())
| 17.714286
| 33
| 0.66129
|
4fa24060276f935262300ba79923e9dd008767ca
| 5,333
|
py
|
Python
|
card_detection_module/yolov5/TabDetectDewarp/yolov5/utils/experimental.py
|
nhatnxn/ID_Passport-OCR
|
78322ec2b9648d0b027326dced7c4aec967bcab3
|
[
"MIT"
] | 1
|
2021-06-30T11:01:25.000Z
|
2021-06-30T11:01:25.000Z
|
card_detection_module/yolov5/TabDetectDewarp/yolov5/utils/experimental.py
|
nhatnxn/ID_Passport-OCR
|
78322ec2b9648d0b027326dced7c4aec967bcab3
|
[
"MIT"
] | null | null | null |
card_detection_module/yolov5/TabDetectDewarp/yolov5/utils/experimental.py
|
nhatnxn/ID_Passport-OCR
|
78322ec2b9648d0b027326dced7c4aec967bcab3
|
[
"MIT"
] | null | null | null |
# YOLOv5 experimental modules
import numpy as np
import torch
import torch.nn as nn
import sys
sys.path.append('..')
from models.common import Conv, DWConv
from .google_utils import attempt_download
class CrossConv(nn.Module):
# Cross Convolution Downsample
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
super(CrossConv, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class Sum(nn.Module):
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, n, weight=False): # n: number of inputs
super(Sum, self).__init__()
self.weight = weight # apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
def forward(self, x):
y = x[0] # no weight
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super(GhostConv, self).__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)
class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
def forward(self, x):
return self.conv(x) + self.shortcut(x)
class MixConv2d(nn.Module):
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super(MixConv2d, self).__init__()
groups = len(k)
if equal_ch: # equal c_ per group
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super(Ensemble, self).__init__()
def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output
def attempt_load(weights, map_location=None, inplace=True):
from models.yolo import Detect, Model
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
ckpt = torch.load(attempt_download(w), map_location=map_location) # load
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
m.inplace = inplace # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print(f'Ensemble created with {weights}\n')
for k in ['names']:
setattr(model, k, getattr(model[-1], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
return model # return ensemble
| 38.092857
| 116
| 0.574161
|
3f3e5441cd5e6990fec7b1d369ae3c881bdfd53d
| 4,424
|
py
|
Python
|
wagtail_checklist/serializers.py
|
StocksDigital/wagtail-checklist
|
7883a397c09b3c86834de2428631c9374341682a
|
[
"MIT"
] | null | null | null |
wagtail_checklist/serializers.py
|
StocksDigital/wagtail-checklist
|
7883a397c09b3c86834de2428631c9374341682a
|
[
"MIT"
] | null | null | null |
wagtail_checklist/serializers.py
|
StocksDigital/wagtail-checklist
|
7883a397c09b3c86834de2428631c9374341682a
|
[
"MIT"
] | null | null | null |
import re
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from wagtail.core.models import Page
from .rules import check_form_rules, check_rules
class PageActions:
EDIT = 'EDIT'
CREATE = 'CREATE'
class ChecklistSerializer(serializers.Serializer):
EDIT_REGEX = r'/pages/(?P<page_id>\d+)/edit/$'
CREATE_REGEX = r'/pages/add/(?P<app_name>\w+)/(?P<model_name>\w+)/(?P<parent_id>\d+)/$'
url = serializers.URLField()
action = serializers.ChoiceField([PageActions.EDIT, PageActions.CREATE])
page = serializers.JSONField()
def validate(self, data):
"""
Ensure that the provided URL is from the Wagtail edit or create views, so that
we can extract some required information using EDIT_REGEX / CREATE_REGEX
"""
validated = super().validate(data)
action = validated['action']
is_valid_url = (
(action == PageActions.EDIT and re.search(self.EDIT_REGEX, validated['url'])) or
(action == PageActions.CREATE and re.search(self.CREATE_REGEX, validated['url']))
)
if not is_valid_url:
raise serializers.ValidationError('Invalid URL for action {}'.format(action))
return validated
def create(self, validated_data):
"""
Construct a Page instance and validate the instance against the built-in Wagtail
form, as well as any rules that are registered.
"""
# Use information encoded in the URL to build a page instance.
if validated_data['action'] == PageActions.EDIT:
page_class, page, parent_page = self.get_edit_page(validated_data)
else:
page_class, page, parent_page = self.get_create_page(validated_data)
# Construct and validate a model-specific form so that we can add Wagtail's built-in
# validation to our response.
edit_handler = page_class.get_edit_handler()
form_class = edit_handler.get_form_class()
form = form_class(validated_data['page'], instance=page, parent_page=parent_page)
# Build a list of Wagtail built-in form errors
form_rules = check_form_rules(page_class, form)
# Build a list of custom rules
error_rules, warning_rules = check_rules(page_class, page, parent_page)
rule_lists = [
('ERROR', form_rules),
('ERROR', error_rules),
('WARNING', warning_rules)
]
# Build the checklist from the rule lists
checklist = {}
for error_type, rule_list in rule_lists:
for rule in rule_list:
serialized_rule = {
'isValid': rule.is_valid,
'hasError': rule.has_error,
'type': error_type,
'message': rule.message,
}
name = rule.name.lower().replace('_', ' ')
try:
checklist[name].append(serialized_rule)
except (KeyError, AttributeError):
checklist[name] = [serialized_rule]
return {'checklist': checklist}
def get_edit_page(self, validated_data):
"""
Construct a Page instance using data the Wagtail editor's 'edit' page.
Use the Page pk to fetch the instance from the database.
"""
url_data = re.search(self.EDIT_REGEX, validated_data['url']).groupdict()
page = Page.objects.get(pk=url_data['page_id']).specific
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
parent_page = page.get_parent()
if not parent_page:
raise serializers.ValidationError('Page must have a parent')
return page_class, page, parent_page
def get_create_page(self, validated_data):
"""
Construct a Page instance using data the Wagtail editor's 'add' page.
Use the app name and model name to construct a new Page model.
"""
url_data = re.search(self.CREATE_REGEX, validated_data['url']).groupdict()
content_type = ContentType.objects.get_by_natural_key(url_data['app_name'], url_data['model_name'])
page_class = content_type.model_class()
parent_page = Page.objects.get(pk=url_data['parent_id']).specific
page = page_class()
return page_class, page, parent_page
| 39.855856
| 107
| 0.635624
|
cf051fa5c0ef57f45cfbf34d85e8d24b4bbed3f5
| 899
|
py
|
Python
|
brain_games/games/progression.py
|
ilitili/python-project-lvl1
|
a2f5e836f5ce6992cdf543beb06ff20555025cc8
|
[
"MIT"
] | 2
|
2020-08-19T09:34:18.000Z
|
2020-08-30T20:43:15.000Z
|
brain_games/games/progression.py
|
ilitili/python-project-lvl1
|
a2f5e836f5ce6992cdf543beb06ff20555025cc8
|
[
"MIT"
] | 11
|
2020-08-25T20:46:35.000Z
|
2021-07-30T03:40:55.000Z
|
brain_games/games/progression.py
|
ilitili/python-project-lvl1
|
a2f5e836f5ce6992cdf543beb06ff20555025cc8
|
[
"MIT"
] | 11
|
2020-09-09T06:06:46.000Z
|
2022-01-08T10:08:46.000Z
|
"""Progression game engine."""
import random
GAME_DESCRIPTION = 'What number is missing in the progression?'
START_MIN = 1
START_MAX = 20
STEP_MIN = 1
STEP_MAX = 10
PROGERSSION_LENGTH = 10
def get_challenge():
"""Progression game Q&A generation.
It forms an arithmetic progression,
replacing one of the numbers with two points.
Returns:
question{str} : arithmetic progression;
answer{str} : hidden number
"""
start = random.randint(START_MIN, START_MAX)
step = random.randint(STEP_MIN, STEP_MAX)
progression = [(start + (ind * step))
for ind in range(PROGERSSION_LENGTH)]
hidden_element_index = random.randint(0, PROGERSSION_LENGTH - 1)
answer = str(progression[hidden_element_index])
progression[hidden_element_index] = '..'
question = ' '.join(str(ind) for ind in progression)
return (question, answer)
| 27.242424
| 68
| 0.68743
|
72690bba5957e632c271e2c6696bd907cccc3c32
| 5,835
|
py
|
Python
|
deca/gui/dataviewwidget.py
|
kk49/deca
|
8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae
|
[
"MIT"
] | 50
|
2019-06-05T04:01:04.000Z
|
2022-03-05T14:56:43.000Z
|
deca/gui/dataviewwidget.py
|
kk49/deca
|
8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae
|
[
"MIT"
] | 115
|
2019-03-27T13:34:00.000Z
|
2022-03-11T23:43:12.000Z
|
deca/gui/dataviewwidget.py
|
kk49/deca
|
8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae
|
[
"MIT"
] | 13
|
2020-01-25T01:15:49.000Z
|
2022-02-08T02:20:05.000Z
|
from typing import Optional, List
from deca.db_processor import VfsNode
from deca.db_view import VfsView
from deca.db_processor import VfsNode, VfsProcessor
from deca.ff_types import *
from deca.gui.viewer_adf import DataViewerAdf
from deca.gui.viewer_rtpc import DataViewerRtpc
from deca.gui.viewer_image import DataViewerImage
from deca.gui.viewer_raw import DataViewerRaw
from deca.gui.viewer_info import DataViewerInfo
from deca.gui.viewer_text import DataViewerText
from deca.gui.viewer_sarc import DataViewerSarc
from deca.gui.viewer_obc import DataViewerObc
from deca.gui.deca_interfaces import IVfsViewSrc
from PySide2.QtCore import Signal
from PySide2.QtWidgets import QSizePolicy, QWidget, QVBoxLayout, QTabWidget
class DataViewWidget(QWidget):
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.data_source: Optional[IVfsViewSrc] = None
self.tab_info = DataViewerInfo()
self.tab_raw = DataViewerRaw()
self.tab_text = DataViewerText()
self.tab_sarc = DataViewerSarc()
self.tab_image = DataViewerImage()
self.tab_adf = DataViewerAdf()
self.tab_adf_gdc = DataViewerAdf()
self.tab_rtpc = DataViewerRtpc()
self.tab_obc = DataViewerObc()
self.tab_widget = QTabWidget()
self.tab_info_index = self.tab_widget.addTab(self.tab_info, 'Info')
self.tab_raw_index = self.tab_widget.addTab(self.tab_raw, 'Raw/Hex')
self.tab_text_index = self.tab_widget.addTab(self.tab_text, 'Text')
self.tab_sarc_index = self.tab_widget.addTab(self.tab_sarc, 'SARC')
self.tab_image_index = self.tab_widget.addTab(self.tab_image, 'Image')
self.tab_adf_index = self.tab_widget.addTab(self.tab_adf, 'ADF')
self.tab_adf_gdc_index = self.tab_widget.addTab(self.tab_adf_gdc, 'ADF/GDC')
self.tab_rtpc_index = self.tab_widget.addTab(self.tab_rtpc, 'RTPC')
self.tab_obc_index = self.tab_widget.addTab(self.tab_obc, 'OBC')
size = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
size.setVerticalStretch(1)
self.tab_widget.setSizePolicy(size)
self.main_layout = QVBoxLayout()
self.main_layout.addWidget(self.tab_widget)
self.setLayout(self.main_layout)
def data_source_set(self, data_source: IVfsViewSrc):
self.data_source = data_source
self.data_source.signal_selection_changed.connect(self.vnode_selection_changed)
self.tab_sarc.signal_archive_open.connect(self.data_source.archive_open)
def vnode_selection_changed(self):
print('DataViewWidget:vnode_selection_changed')
def vnode_2click_selected(self, uids: List[int]):
vfs: VfsProcessor = self.data_source.vfs_get()
vnodes = [vfs.node_where_uid(uid) for uid in uids]
for uid, vnode in zip(uids, vnodes):
print(f'DataViewWidget:vnode_2click_selected: {uid}: {vnode}')
vnode = vnodes[0]
self.tab_widget.setTabEnabled(self.tab_info_index, True)
self.tab_info.vnode_process(vfs, vnode)
self.tab_widget.setTabEnabled(self.tab_raw_index, True)
self.tab_raw.vnode_process(vfs, vnode)
self.tab_widget.setTabEnabled(self.tab_text_index, False)
self.tab_widget.setTabEnabled(self.tab_sarc_index, False)
self.tab_widget.setTabEnabled(self.tab_image_index, False)
self.tab_widget.setTabEnabled(self.tab_adf_index, False)
self.tab_widget.setTabEnabled(self.tab_adf_gdc_index, False)
self.tab_widget.setTabEnabled(self.tab_rtpc_index, False)
self.tab_widget.setTabEnabled(self.tab_obc_index, False)
if vnode.file_type in {FTYPE_TXT}:
self.tab_widget.setTabEnabled(self.tab_text_index, True)
self.tab_text.vnode_process(vfs, vnode)
self.tab_widget.setCurrentIndex(self.tab_text_index)
elif vnode.file_type in {FTYPE_SARC}:
self.tab_widget.setTabEnabled(self.tab_sarc_index, True)
self.tab_sarc.vnode_process(vfs, vnode)
self.tab_widget.setCurrentIndex(self.tab_sarc_index)
elif vnode.file_type in {FTYPE_AVTX, FTYPE_ATX, FTYPE_HMDDSC, FTYPE_DDS, FTYPE_BMP}:
self.tab_widget.setTabEnabled(self.tab_image_index, True)
self.tab_image.vnode_process(vfs, vnode)
self.tab_widget.setCurrentIndex(self.tab_image_index)
elif vnode.file_type in {FTYPE_ADF, FTYPE_ADF_BARE, FTYPE_ADF0}:
# handle the case for GenZero where ADF files can be in the
vnodes_adf = []
vnodes_adfb = []
for vnode in vnodes:
if vnode.file_type == FTYPE_ADF_BARE:
vnodes_adfb.append(vnode)
else:
vnodes_adf.append(vnode)
if len(vnodes_adf) > 0:
self.tab_widget.setTabEnabled(self.tab_adf_index, True)
self.tab_adf.vnode_process(vfs, vnodes_adf[0])
self.tab_widget.setCurrentIndex(self.tab_adf_index)
if len(vnodes_adfb) > 0:
self.tab_widget.setTabEnabled(self.tab_adf_gdc_index, True)
self.tab_adf_gdc.vnode_process(vfs, vnodes_adfb[0])
self.tab_widget.setCurrentIndex(self.tab_adf_index)
elif vnode.file_type in {FTYPE_RTPC}:
self.tab_widget.setTabEnabled(self.tab_rtpc_index, True)
self.tab_rtpc.vnode_process(vfs, vnode)
self.tab_widget.setCurrentIndex(self.tab_rtpc_index)
elif vnode.file_type in {FTYPE_OBC}:
self.tab_widget.setTabEnabled(self.tab_obc_index, True)
self.tab_obc.vnode_process(vfs, vnode)
self.tab_widget.setCurrentIndex(self.tab_obc_index)
else:
self.tab_widget.setCurrentIndex(self.tab_raw_index)
| 45.585938
| 92
| 0.700086
|
f4d5acd8ba2bb300b0c4a05c9d4505d31f3c3803
| 2,660
|
py
|
Python
|
data/p3BR/R2/benchmark/startCirq165.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startCirq165.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startCirq165.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=31
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[2])) # number=28
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=29
c.append(cirq.H.on(input_qubit[2])) # number=30
c.append(cirq.X.on(input_qubit[2])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=8
c.append(cirq.H.on(input_qubit[1])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[1])) # number=20
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=22
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=23
c.append(cirq.H.on(input_qubit[1])) # number=24
c.append(cirq.Z.on(input_qubit[2])) # number=3
c.append(cirq.X.on(input_qubit[1])) # number=17
c.append(cirq.Y.on(input_qubit[2])) # number=5
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq165.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 33.670886
| 77
| 0.684962
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.