hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18020a245499524813e55143618bee2b7b72b00e
| 7,863
|
py
|
Python
|
tests/test_nms2D.py
|
kevinyamauchi/stardist
|
951e1ba413d32e883ad1e0f620a1fe149ca00d74
|
[
"BSD-3-Clause"
] | 221
|
2018-06-27T07:15:02.000Z
|
2021-02-28T11:58:37.000Z
|
tests/test_nms2D.py
|
kevinyamauchi/stardist
|
951e1ba413d32e883ad1e0f620a1fe149ca00d74
|
[
"BSD-3-Clause"
] | 118
|
2018-06-28T08:13:47.000Z
|
2021-02-28T22:54:32.000Z
|
tests/test_nms2D.py
|
kevinyamauchi/stardist
|
951e1ba413d32e883ad1e0f620a1fe149ca00d74
|
[
"BSD-3-Clause"
] | 88
|
2018-07-11T08:49:00.000Z
|
2021-02-25T06:56:51.000Z
|
import numpy as np
import pytest
from stardist import star_dist, edt_prob, non_maximum_suppression, dist_to_coord, polygons_to_label
from stardist.matching import matching
from csbdeep.utils import normalize
from utils import random_image, real_image2d, check_similar
def create_random_data(shape=(356, 299), radius=10, noise=.1, n_rays=32):
dist = radius*np.ones(shape+(n_rays,))
noise = np.clip(noise, 0, 1)
if noise > 0:
dist *= (1+noise*np.random.uniform(-1, 1, dist.shape))
prob = np.random.uniform(0, 1, shape)
return prob, dist
def create_random_suppressed(shape=(356, 299), grid = (1,1), radius=10, noise=.1, n_rays=32, nms_thresh=.1):
prob, dist = create_random_data(shape, radius, noise, n_rays)
prob = prob[::grid[0],::grid[1]]
dist = dist[::grid[0],::grid[1]]
points, probi, disti = non_maximum_suppression(dist, prob, prob_thresh=0.9,
nms_thresh=nms_thresh,
verbose=True)
img = polygons_to_label(disti, points, prob=probi, shape=shape)
return img
def test_large():
nms = create_random_suppressed(
shape=(2000, 2007), radius=10, noise=0, nms_thresh=0)
return nms
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
def test_bbox_search_old(img):
from stardist.geometry.geom2d import _polygons_to_label_old, _dist_to_coord_old
from stardist.nms import _non_maximum_suppression_old
prob = edt_prob(img)
dist = star_dist(img, n_rays=32, mode="cpp")
coord = _dist_to_coord_old(dist)
points_a = _non_maximum_suppression_old(coord, prob, prob_thresh=0.4, verbose=False, max_bbox_search=False)
points_b = _non_maximum_suppression_old(coord, prob, prob_thresh=0.4, verbose=False, max_bbox_search=True)
img2_a = _polygons_to_label_old(coord, prob, points_a, shape=img.shape)
img2_b = _polygons_to_label_old(coord, prob, points_b, shape=img.shape)
check_similar(points_a, points_b)
check_similar(img2_a, img2_b)
@pytest.mark.parametrize('grid', ((1,1),(2,2)))
@pytest.mark.parametrize('img', (real_image2d()[1], ))
def test_acc_old(img, grid):
from stardist.geometry.geom2d import _polygons_to_label_old, _dist_to_coord_old
from stardist.nms import _non_maximum_suppression_old
prob = edt_prob(img)[::grid[0],::grid[1]]
dist = star_dist(img, n_rays=32, mode="cpp")[::grid[0],::grid[1]]
coord = _dist_to_coord_old(dist, grid = grid)
points = _non_maximum_suppression_old(coord, prob, prob_thresh=0.4, grid=grid)
img2 = _polygons_to_label_old(coord, prob, points, shape=img.shape)
m = matching(img, img2)
acc = m.accuracy
print("accuracy {acc:.2f}".format(acc=acc))
assert acc > 0.9
@pytest.mark.parametrize('grid', ((1,1),(2,2)))
@pytest.mark.parametrize('img', (real_image2d()[1], ))
def test_acc(img, grid):
prob = edt_prob(img)[::grid[0],::grid[1]]
dist = star_dist(img, n_rays=32, mode="cpp")[::grid[0],::grid[1]]
points, probi, disti = non_maximum_suppression(dist, prob, grid = grid, prob_thresh=0.4)
img2 = polygons_to_label(disti, points, shape=img.shape)
m = matching(img, img2)
acc = m.accuracy
print("accuracy {acc:.2f}".format(acc=acc))
assert acc > 0.9
@pytest.mark.parametrize('grid', ((1,1),(16,16)))
@pytest.mark.parametrize('n_rays', (11,32))
@pytest.mark.parametrize('shape', ((356, 299),(114, 217)))
def test_old_new(shape, n_rays, grid, radius=10, noise=.1, nms_thresh=.3):
np.random.seed(42)
from stardist.geometry.geom2d import _polygons_to_label_old, _dist_to_coord_old
from stardist.nms import _non_maximum_suppression_old
prob, dist = create_random_data(shape, n_rays = n_rays, radius=10, noise=.1)
prob = prob[::grid[0],::grid[1]]
dist = dist[::grid[0],::grid[1]]
coord = _dist_to_coord_old(dist, grid = grid)
inds1 = _non_maximum_suppression_old(coord, prob, prob_thresh=0.9,
grid = grid,
nms_thresh=nms_thresh,
verbose=True)
points1 = inds1*np.array(grid)
sort_ind = np.argsort(prob[tuple(inds1.T)])[::-1]
points1 = points1[sort_ind]
points2, probi2, disti2 = non_maximum_suppression(dist, prob,
grid = grid, prob_thresh=0.9,
nms_thresh=nms_thresh,
verbose=True)
img1 = _polygons_to_label_old(coord, prob, inds1, shape=shape)
img2 = polygons_to_label(disti2, points2, shape=shape)
assert len(points1) == len(points2)
assert np.allclose(points1, points2)
assert np.allclose(img1>0, img2>0)
return points1, img1, points2, img2
def test_speed(nms_thresh = 0.3, grid = (1,1)):
np.random.seed(42)
from stardist.geometry.geom2d import _polygons_to_label_old, _dist_to_coord_old
from stardist.nms import _non_maximum_suppression_old
from time import time
shape = (128,128)
prob, dist = create_random_data(shape, n_rays = 32, radius=10, noise=.1)
prob = np.tile(prob, (8,8))
dist = np.tile(dist, (8,8,1))
prob = prob[::grid[0],::grid[1]]
dist = dist[::grid[0],::grid[1]]
t1 = time()
coord = _dist_to_coord_old(dist, grid = grid)
points1 = _non_maximum_suppression_old(coord, prob, prob_thresh=0.9,
grid = grid,
nms_thresh=nms_thresh,
verbose=True)
t1 = time()-t1
points1 = points1*np.array(grid)
sort_ind = np.argsort(prob[tuple(points1.T)])[::-1]
points1 = points1[sort_ind]
t2 = time()
points2, probi2, disti2 = non_maximum_suppression(dist, prob,
grid = grid, prob_thresh=0.9,
nms_thresh=nms_thresh,
use_kdtree = True,
verbose=True)
t2 = time()-t2
print("\n\n")
print(f"old : {t1:.2f}s")
print(f"new (kdtree): {t2:.2f}s")
return points1, points2
def bench():
np.random.seed(42)
from stardist.geometry.geom2d import _polygons_to_label_old, _dist_to_coord_old
from stardist.nms import _non_maximum_suppression_old
from time import time
shape = (128,128)
prob, dist = create_random_data(shape, n_rays = 32, radius=10, noise=.1)
prob_thresh = 0.9
def _f1(n):
_prob = np.tile(prob, (n,n))
_dist = np.tile(dist, (n,n,1))
t = time()
coord = _dist_to_coord_old(_dist)
points1 = _non_maximum_suppression_old(coord, _prob, prob_thresh=prob_thresh,
nms_thresh=.2,
verbose=True)
t = time()-t
return np.count_nonzero(_prob>prob_thresh), t
def _f2(n):
_prob = np.tile(prob, (n,n))
_dist = np.tile(dist, (n,n,1))
t = time()
points2, probi2, disti2 = non_maximum_suppression(_dist, _prob,
prob_thresh=prob_thresh,
nms_thresh=.2,
use_kdtree = True,
verbose=True)
t = time()-t
return np.count_nonzero(_prob>prob_thresh), t
a1 = np.array(tuple(_f1(n) for n in range(1,20,2)))
a2 = np.array(tuple(_f2(n) for n in range(1,20,2)))
return a1, a2
if __name__ == '__main__':
points1, img1, points2, img2 = test_old_new((356, 299),32,(1,1), nms_thresh = .3)
| 40.323077
| 111
| 0.596846
|
be38c066aa5066c3f1ac86c733195b8453ad0be8
| 2,390
|
py
|
Python
|
intermediate-Day15-to-Day32/classQuiz/data.py
|
jawad5311/100-days-Python
|
9e2c8990365e680d4dcf579c4d7f5d8f0d804566
|
[
"Apache-2.0"
] | null | null | null |
intermediate-Day15-to-Day32/classQuiz/data.py
|
jawad5311/100-days-Python
|
9e2c8990365e680d4dcf579c4d7f5d8f0d804566
|
[
"Apache-2.0"
] | null | null | null |
intermediate-Day15-to-Day32/classQuiz/data.py
|
jawad5311/100-days-Python
|
9e2c8990365e680d4dcf579c4d7f5d8f0d804566
|
[
"Apache-2.0"
] | null | null | null |
question_data = [
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "Linus Torvalds created Linux and Git.", "correct_answer": "True", "incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "The programming language "Python" is based off a modified version of "
""JavaScript".",
"correct_answer": "False", "incorrect_answers": ["True"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "The logo for Snapchat is a Bell.", "correct_answer": "False", "incorrect_answers": ["True"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "Pointers were not used in the original C programming language; they were added later on in C++.",
"correct_answer": "False", "incorrect_answers": ["True"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "RAM stands for Random Access Memory.", "correct_answer": "True", "incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "Ada Lovelace is often considered the first computer programmer.", "correct_answer": "True",
"incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": ""HTML" stands for Hypertext Markup Language.",
"correct_answer": "True", "incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "The Windows 7 operating system has six main editions.", "correct_answer": "True",
"incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "Linux was first created as an alternative to Windows XP.",
"correct_answer": "False", "incorrect_answers": ["True"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "The Python programming language gets its name from the British "
"comedy group "Monty Python."",
"correct_answer": "True", "incorrect_answers": ["False"]}]
| 77.096774
| 116
| 0.62636
|
d8464b5057770f7109379745b9bdbdb7114e2bd9
| 645
|
py
|
Python
|
password.py
|
mhamaneamogh50/All_Python_Pro
|
e943d1924f160a98e5df612b920a67ef80be3b61
|
[
"0BSD"
] | null | null | null |
password.py
|
mhamaneamogh50/All_Python_Pro
|
e943d1924f160a98e5df612b920a67ef80be3b61
|
[
"0BSD"
] | null | null | null |
password.py
|
mhamaneamogh50/All_Python_Pro
|
e943d1924f160a98e5df612b920a67ef80be3b61
|
[
"0BSD"
] | null | null | null |
import random
print("\t*********WELLCOME TO AUTOMATIC PASSWORD GENRATEOR*************")
print("\t # #\n\t # #\n\t # # ")
print("\tWE DO NOT STORE ANY OF YOUR INFORMATION \n\tALL RIGHTS RESERVED BY****@Amogh****")
print("\n")
name= str(input("Enter Your Name :- "))
print("\n")
lower="abcdefghijkmnopqrstuvwxyz"
numbers = "012345678"
symbol = "@&*_"
all = numbers + symbol + lower
length = 8
password = "".join(random.sample(all,length))
print("\n")
print(name,"Your Auto Generated Password Is:-",password)
print("\n\n ~~DO NOT SHARE YOUR PASSWORS WITH ANYONE~~ ")
| 33.947368
| 93
| 0.570543
|
51356d50943ad38199b3417a1691ceb356cc6012
| 7,337
|
py
|
Python
|
estimator_hooks/hooks_test.py
|
keithyin/tensorflow_utils
|
65c00531b14d2328b5c46d36aa6f3eaa711bc5c9
|
[
"Apache-2.0"
] | 3
|
2021-06-11T09:55:41.000Z
|
2021-09-27T10:08:59.000Z
|
estimator_hooks/hooks_test.py
|
keithyin/tensorflow_utils
|
65c00531b14d2328b5c46d36aa6f3eaa711bc5c9
|
[
"Apache-2.0"
] | null | null | null |
estimator_hooks/hooks_test.py
|
keithyin/tensorflow_utils
|
65c00531b14d2328b5c46d36aa6f3eaa711bc5c9
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import numpy as np
from .hooks import LabelDistHook, GroupAucHook, RegressionHook
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
tf.add_to_collection("label", labels)
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
cls_summ = tf.summary.histogram("class_dist", tf.argmax(input=logits, axis=1))
print(tf.argmax(input=logits, axis=1))
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
export_outputs = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.PredictOutput(outputs=predictions)
}
if mode == tf.estimator.ModeKeys.PREDICT:
print("build Predict graph")
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions,
export_outputs=export_outputs)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
global_step = tf.train.get_or_create_global_step()
is_replace = tf.equal(global_step % 3, 0)
if mode == tf.estimator.ModeKeys.TRAIN:
print("build train graph")
label_dist_hook = LabelDistHook(name="train_label", label_tensor=labels,
mask_tensor=tf.ones_like(labels))
group_auc_hook = GroupAucHook(name="train_classification", group_tensor=tf.ones_like(labels),
label_tensor=tf.one_hot(labels, depth=10)[:, 0],
pred_tensor=tf.sigmoid(logits[:, 0]))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[label_dist_hook, group_auc_hook])
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])
}
summary_hook = tf.train.SummarySaverHook(save_steps=1, summary_op=[cls_summ], output_dir="model_dir/eval")
print("build eval graph")
label_dist_hook = LabelDistHook(name="eval_label", label_tensor=labels,
mask_tensor=tf.ones_like(labels), reset_step=50)
group_auc_hook = GroupAucHook(name="eval_classification", group_tensor=tf.ones_like(labels),
label_tensor=tf.one_hot(labels, depth=10)[:, 0],
pred_tensor=tf.sigmoid(logits[:, 0]), reset_step=50)
regression_info_hook = RegressionHook(name="wtf",
group_tensor=tf.one_hot(labels, depth=10)[:, 0],
label_tensor=tf.one_hot(labels, depth=10)[:, 0],
pred_tensor=tf.sigmoid(logits[:, 0]),
mask_tensor=tf.ones_like(logits[:, 0]), reset_step=50)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=tf.cond(tf.random_uniform(shape=[], maxval=1) > 0.7,
lambda: tf.constant(100, dtype=tf.float32),
lambda: tf.constant(200, dtype=tf.float32)),
eval_metric_ops=eval_metric_ops, export_outputs=None,
evaluation_hooks=[summary_hook, label_dist_hook, group_auc_hook, regression_info_hook])
def input_fn():
((train_data, train_labels), (eval_data, eval_labels)) = tf.keras.datasets.mnist.load_data()
train_data = train_data / np.float32(255)
train_labels = train_labels.astype(np.int32) # not required
eval_data = eval_data / np.float32(255)
eval_labels = eval_labels.astype(np.int32) # not required
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=64,
num_epochs=500,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
return train_input_fn, eval_input_fn
def serving_input_fn():
example_proto = tf.placeholder(dtype=tf.string, shape=[None])
receiver_tensor = {"data": example_proto}
feature = tf.parse_example(example_proto, features={"x": tf.FixedLenFeature([], dtype=tf.string)})
img = tf.io.decode_raw(feature['x'], out_type=tf.float32)
feature['x'] = img
return tf.estimator.export.ServingInputReceiver(features=feature, receiver_tensors=receiver_tensor)
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
estimator_config = tf.estimator.RunConfig(model_dir="model_dir",
save_checkpoints_steps=100)
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, config=estimator_config)
train_input_fn, eval_input_fn = input_fn()
exporter = tf.estimator.BestExporter(serving_input_receiver_fn=serving_input_fn)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=None,
start_delay_secs=20,
exporters=exporter, throttle_secs=5)
for i in range(2):
# mnist_classifier.train(train_input_fn, steps=300)
# logging_global_step = tf.train.LoggingTensorHook(
# tensors={"show_me_global_step": "global_step"}, every_n_iter=1)
# print(mnist_classifier.evaluate(eval_input_fn))
for i, v in enumerate(mnist_classifier.predict(eval_input_fn, yield_single_examples=True)):
print(v)
break
#
# mnist_classifier.export_saved_model()
# tf.estimator.train_and_evaluate(estimator=mnist_classifier, train_spec=train_spec, eval_spec=eval_spec)
| 42.166667
| 110
| 0.632684
|
095b5069ba49201a854f3fd8b6ff40c4af1f39e9
| 74
|
py
|
Python
|
datetime_utils/__init__.py
|
borbamartin/datetime-utils
|
00ab8f4d15fe7436391d9adbf511e2501107e1d1
|
[
"BSD-2-Clause"
] | null | null | null |
datetime_utils/__init__.py
|
borbamartin/datetime-utils
|
00ab8f4d15fe7436391d9adbf511e2501107e1d1
|
[
"BSD-2-Clause"
] | null | null | null |
datetime_utils/__init__.py
|
borbamartin/datetime-utils
|
00ab8f4d15fe7436391d9adbf511e2501107e1d1
|
[
"BSD-2-Clause"
] | null | null | null |
from .datetime_utils import rfc822, parse_timestr, DateTimeUtil, TimeUnit
| 37
| 73
| 0.851351
|
88680545fea0e2d96cb5731582f9b805247b63a8
| 897
|
py
|
Python
|
setup.py
|
oseau/django_render
|
7abef31a08d990e679edf53f7dbfd3a44febdbc1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
oseau/django_render
|
7abef31a08d990e679edf53f7dbfd3a44febdbc1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
oseau/django_render
|
7abef31a08d990e679edf53f7dbfd3a44febdbc1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: xiLin.ding
from setuptools import setup, find_packages
setup(
name='django-render-url',
version='0.9.8.7',
packages=find_packages(),
author='WANG Tai',
author_email='i@wangtai.me',
url='https://github.com/wangtai/django_render',
description='a very light django plugin',
#long_description=open('README.md').read(),
license='Apache2',
requires=[
'Django',
'enum34'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: System :: Installation/Setup'
],
include_package_data=True,
zip_safe=False
)
| 24.916667
| 51
| 0.603122
|
b374862ad0478064396a8f9e4d2841dc2e7f4f5c
| 2,088
|
py
|
Python
|
kipy/fileobjs/brd/header.py
|
Arie001/klonor-kicad
|
c916ae4cdac5072803e12f530ef32230732b30d7
|
[
"MIT"
] | 2
|
2016-05-17T13:10:52.000Z
|
2018-09-03T15:18:30.000Z
|
kipy/fileobjs/brd/header.py
|
Arie001/klonor-kicad
|
c916ae4cdac5072803e12f530ef32230732b30d7
|
[
"MIT"
] | 1
|
2016-08-03T09:31:39.000Z
|
2016-08-03T11:19:19.000Z
|
kipy/fileobjs/brd/header.py
|
Arie001/klonor-kicad
|
c916ae4cdac5072803e12f530ef32230732b30d7
|
[
"MIT"
] | 1
|
2018-09-04T16:32:13.000Z
|
2018-09-04T16:32:13.000Z
|
from .brditem import BrdItem
'''
Classes based on BrdItem for parsing and rendering header and
trailer of .brd file.
'''
class PCBNEW_BOARD(BrdItem):
keyword = 'PCBNEW-BOARD'
@classmethod
def subparse(cls, brdpage, tokens, lineiterator):
cls.checkstate(brdpage, -1, 3)
brdpage.page_header = tokens
@staticmethod
def render(schpage, linelist):
linelist.append(schpage.page_header.linetext)
class GENERAL(BrdItem):
keyword = '$GENERAL'
@classmethod
def subparse(cls, brdpage, tokens, lineiterator):
cls.checkstate(brdpage, 3, 2)
brdpage.general = [tokens]
for tokens in lineiterator:
brdpage.general.append(tokens)
if tokens[0] == '$EndGENERAL':
break
@staticmethod
def render(brdpage, linelist):
linelist.extend(x.linetext for x in brdpage.general)
class SHEETDESCR(BrdItem):
keyword = '$SHEETDESCR'
@classmethod
def subparse(cls, brdpage, tokens, lineiterator):
cls.checkstate(brdpage, 2, 1)
brdpage.sheetdescr = [tokens]
for tokens in lineiterator:
brdpage.sheetdescr.append(tokens)
if tokens[0] == '$EndSHEETDESCR':
break
brdpage.items = []
@staticmethod
def render(brdpage, linelist):
linelist.extend(x.linetext for x in brdpage.sheetdescr)
class SETUP(BrdItem):
keyword = '$SETUP'
@classmethod
def subparse(cls, brdpage, tokens, lineiterator):
cls.checkstate(brdpage, 1, 1)
brdpage.setup = [tokens]
for tokens in lineiterator:
brdpage.setup.append(tokens)
if tokens[0] == '$EndSETUP':
break
@staticmethod
def render(brdpage, linelist):
linelist.extend(x.linetext for x in brdpage.setup)
class EndBOARD(BrdItem):
keyword = '$EndBOARD'
@classmethod
def subparse(cls, brdpage, tokens, lineiterator):
cls.checkstate(brdpage, 1, 0)
@classmethod
def render(cls, brdpage, linelist):
linelist.append(cls.keyword)
| 26.769231
| 63
| 0.6341
|
9ec5e2dbd03ff98e9da80cdc96cafcefa74b708a
| 920
|
py
|
Python
|
Own_Codes/Sieve_Of_Eratosthenes.py
|
Dutta-SD/Python_Programs
|
f002dbd49c979a6d8b156f88003a79f364ff01da
|
[
"MIT"
] | 1
|
2021-01-04T07:16:05.000Z
|
2021-01-04T07:16:05.000Z
|
Own_Codes/Sieve_Of_Eratosthenes.py
|
Dutta-SD/Python_Programs
|
f002dbd49c979a6d8b156f88003a79f364ff01da
|
[
"MIT"
] | 2
|
2021-01-27T04:24:50.000Z
|
2021-07-25T17:44:00.000Z
|
Own_Codes/Sieve_Of_Eratosthenes.py
|
Dutta-SD/Python_Programs
|
f002dbd49c979a6d8b156f88003a79f364ff01da
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# =============================================================================
# Sieve of Eratosthenes : To find all primes using sieve of Eratosthenes
# =============================================================================
def Sieve_of_Eratosthenes(n):
arr = list(range(0, n+1))
p = 2
while(p <= n):
t = p * p
while t <= n:
arr[t] = -1
t += p
p = p+1
while p <= n+1:
if p != -1:
break
p += 1
return arr
# =============================================================================
# Driver Function to test the function
# =============================================================================
val = Sieve_of_Eratosthenes(200)
print([i for i in val[2:] if i != -1])
| 27.878788
| 80
| 0.26413
|
b2bcd7f3ac14deea2b56ab2607d4e3e149df4223
| 2,019
|
py
|
Python
|
dizoo/mujoco/config/halfcheetah_sac_config.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | 1
|
2022-03-21T16:15:39.000Z
|
2022-03-21T16:15:39.000Z
|
dizoo/mujoco/config/halfcheetah_sac_config.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
dizoo/mujoco/config/halfcheetah_sac_config.py
|
jayyoung0802/DI-engine
|
efbb35ddaf184d1009291e6842fbbae09f193492
|
[
"Apache-2.0"
] | null | null | null |
from easydict import EasyDict
halfcheetah_sac_config = dict(
exp_name='halfcheetah_sac_seed0',
env=dict(
env_id='HalfCheetah-v3',
norm_obs=dict(use_norm=False, ),
norm_reward=dict(use_norm=False, ),
collector_env_num=1,
evaluator_env_num=8,
use_act_scale=True,
n_evaluator_episode=8,
stop_value=12000,
),
policy=dict(
cuda=True,
random_collect_size=10000,
model=dict(
obs_shape=17,
action_shape=6,
twin_critic=True,
action_space='reparameterization',
actor_head_hidden_size=256,
critic_head_hidden_size=256,
),
learn=dict(
update_per_collect=1,
batch_size=256,
learning_rate_q=1e-3,
learning_rate_policy=1e-3,
learning_rate_alpha=3e-4,
ignore_done=True,
target_theta=0.005,
discount_factor=0.99,
alpha=0.2,
reparameterization=True,
auto_alpha=False,
),
collect=dict(
n_sample=1,
unroll_len=1,
),
command=dict(),
eval=dict(),
other=dict(replay_buffer=dict(replay_buffer_size=1000000, ), ),
),
)
halfcheetah_sac_config = EasyDict(halfcheetah_sac_config)
main_config = halfcheetah_sac_config
halfcheetah_sac_create_config = dict(
env=dict(
type='mujoco',
import_names=['dizoo.mujoco.envs.mujoco_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(
type='sac',
import_names=['ding.policy.sac'],
),
replay_buffer=dict(type='naive', ),
)
halfcheetah_sac_create_config = EasyDict(halfcheetah_sac_create_config)
create_config = halfcheetah_sac_create_config
if __name__ == "__main__":
# or you can enter `ding -m serial -c halfcheetah_sac_config.py -s 0`
from ding.entry import serial_pipeline
serial_pipeline((main_config, create_config), seed=0)
| 28.43662
| 73
| 0.614661
|
ce3ae28f21ef1b152a9845f0e75312d774443851
| 3,209
|
py
|
Python
|
fedml_api/distributed/fedgan/FedGanClientManager.py
|
xuwanwei/FedML
|
c049a30d9839c4554e7e14b0c18275e96fea8130
|
[
"Apache-2.0"
] | 1,120
|
2020-07-22T02:30:52.000Z
|
2022-03-31T08:10:44.000Z
|
fedml_api/distributed/fedgan/FedGanClientManager.py
|
xuwanwei/FedML
|
c049a30d9839c4554e7e14b0c18275e96fea8130
|
[
"Apache-2.0"
] | 113
|
2020-07-27T03:48:09.000Z
|
2022-03-30T03:25:56.000Z
|
fedml_api/distributed/fedgan/FedGanClientManager.py
|
xuwanwei/FedML
|
c049a30d9839c4554e7e14b0c18275e96fea8130
|
[
"Apache-2.0"
] | 381
|
2020-07-22T06:12:57.000Z
|
2022-03-30T18:38:35.000Z
|
import logging
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../../FedML")))
try:
from fedml_core.distributed.client.client_manager import ClientManager
from fedml_core.distributed.communication.message import Message
except ImportError:
from FedML.fedml_core.distributed.client.client_manager import ClientManager
from FedML.fedml_core.distributed.communication.message import Message
from .message_define import MyMessage
from .utils import transform_list_to_tensor, post_complete_message_to_sweep_process
class FedGANClientManager(ClientManager):
def __init__(self, args, trainer, comm=None, rank=0, size=0, backend="MPI"):
super().__init__(args, comm, rank, size, backend)
self.trainer = trainer
self.num_rounds = args.comm_round
self.round_idx = 0
def run(self):
super().run()
def register_message_receive_handlers(self):
self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_INIT_CONFIG,
self.handle_message_init)
self.register_message_receive_handler(MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT,
self.handle_message_receive_model_from_server)
def handle_message_init(self, msg_params):
global_model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX)
if self.args.is_mobile == 1:
global_model_params = transform_list_to_tensor(global_model_params)
self.trainer.update_model(global_model_params)
self.trainer.update_dataset(int(client_index))
self.round_idx = 0
self.__train()
def start_training(self):
self.round_idx = 0
self.__train()
def handle_message_receive_model_from_server(self, msg_params):
logging.info("handle_message_receive_model_from_server.")
model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
client_index = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_INDEX)
if self.args.is_mobile == 1:
model_params = transform_list_to_tensor(model_params)
self.trainer.update_model(model_params)
self.trainer.update_dataset(int(client_index))
self.round_idx += 1
self.__train()
if self.round_idx == self.num_rounds - 1:
# post_complete_message_to_sweep_process(self.args)
self.finish()
def send_model_to_server(self, receive_id, weights, local_sample_num):
message = Message(MyMessage.MSG_TYPE_C2S_SEND_MODEL_TO_SERVER, self.get_sender_id(), receive_id)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, weights)
message.add_params(MyMessage.MSG_ARG_KEY_NUM_SAMPLES, local_sample_num)
self.send_message(message)
def __train(self):
logging.info("#######training########### round_id = %d" % self.round_idx)
weights, local_sample_num = self.trainer.train(self.round_idx)
self.send_model_to_server(0, weights, local_sample_num)
| 42.223684
| 104
| 0.711437
|
bb2ea93db2bbfbb2c1aa4626890f4bc3ef11bc53
| 994
|
py
|
Python
|
test_project/urls.py
|
ninemoreminutes/celery-task-plus
|
0f3d9e8f70646e03d3693d31fe193930b56cddb9
|
[
"BSD-3-Clause"
] | null | null | null |
test_project/urls.py
|
ninemoreminutes/celery-task-plus
|
0f3d9e8f70646e03d3693d31fe193930b56cddb9
|
[
"BSD-3-Clause"
] | null | null | null |
test_project/urls.py
|
ninemoreminutes/celery-task-plus
|
0f3d9e8f70646e03d3693d31fe193930b56cddb9
|
[
"BSD-3-Clause"
] | null | null | null |
# Python
import re
# Django
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path, re_path
from django.views.static import serve
admin.autodiscover()
urlpatterns = [
path('admin/', admin.site.urls),
]
if 'django.contrib.staticfiles' in settings.INSTALLED_APPS and settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
else:
pattern = r'^{}(?P<path>.*)$'.format(re.escape(settings.MEDIA_URL.lstrip('/')))
kwargs = dict(document_root=settings.MEDIA_ROOT)
urlpatterns += [
re_path(pattern, serve, kwargs=kwargs),
]
if 'debug_toolbar' in settings.INSTALLED_APPS and settings.DEBUG:
import debug_toolbar
urlpatterns += [
path(r'__debug__/', include(debug_toolbar.urls)),
]
| 28.4
| 83
| 0.737425
|
022d36a643980c2574a86ecb841a0e77d19dc3ab
| 82,713
|
py
|
Python
|
zerver/lib/bugdown/__init__.py
|
natalietotonchy/zulip
|
2aaad502b45da9b3f6039e45efa731df72dd345a
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/bugdown/__init__.py
|
natalietotonchy/zulip
|
2aaad502b45da9b3f6039e45efa731df72dd345a
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/bugdown/__init__.py
|
natalietotonchy/zulip
|
2aaad502b45da9b3f6039e45efa731df72dd345a
|
[
"Apache-2.0"
] | null | null | null |
# Zulip's main markdown implementation. See docs/subsystems/markdown.md for
# detailed documentation on our markdown syntax.
from typing import (Any, Callable, Dict, Iterable, List, NamedTuple,
Optional, Set, Text, Tuple, TypeVar, Union, cast)
from mypy_extensions import TypedDict
from typing.re import Match
import markdown
import logging
import traceback
import urllib
import re
import os
import html
import twitter
import platform
import time
import functools
import ujson
import xml.etree.cElementTree as etree
from xml.etree.cElementTree import Element, SubElement
from collections import deque, defaultdict
import requests
from django.core import mail
from django.conf import settings
from django.db.models import Q
from markdown.extensions import codehilite
from zerver.lib.bugdown import fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.camo import get_camo_url
from zerver.lib.emoji import translate_emoticons, emoticon_regex
from zerver.lib.mention import possible_mentions, \
possible_user_group_mentions, extract_user_group
from zerver.lib.notifications import encode_stream
from zerver.lib.timeout import timeout, TimeoutExpired
from zerver.lib.cache import cache_with_key, NotFoundInCache
from zerver.lib.url_preview import preview as link_preview
from zerver.models import (
all_realm_filters,
get_active_streams,
MAX_MESSAGE_LENGTH,
Message,
Realm,
RealmFilter,
realm_filters_for_realm,
UserProfile,
UserGroup,
UserGroupMembership,
)
import zerver.lib.mention as mention
from zerver.lib.tex import render_tex
FullNameInfo = TypedDict('FullNameInfo', {
'id': int,
'email': Text,
'full_name': Text,
})
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar('_T')
ElementStringNone = Union[Element, Optional[Text]]
AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)'
GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)'
EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)'
STREAM_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
class BugdownRenderingException(Exception):
pass
def rewrite_local_links_to_relative(link: str) -> str:
""" If the link points to a local destination we can just switch to that
instead of opening a new tab. """
if db_data:
realm_uri_prefix = db_data['realm_uri'] + "/"
if link.startswith(realm_uri_prefix):
# +1 to skip the `/` before the hash link.
return link[len(realm_uri_prefix):]
return link
def url_embed_preview_enabled_for_realm(message: Optional[Message]) -> bool:
if message is not None:
realm = message.get_realm() # type: Optional[Realm]
else:
realm = None
if not settings.INLINE_URL_EMBED_PREVIEW:
return False
if realm is None:
return True
return realm.inline_url_embed_preview
def image_preview_enabled_for_realm() -> bool:
global current_message
if current_message is not None:
realm = current_message.get_realm() # type: Optional[Realm]
else:
realm = None
if not settings.INLINE_IMAGE_PREVIEW:
return False
if realm is None:
return True
return realm.inline_image_preview
def list_of_tlds() -> List[Text]:
# HACK we manually blacklist a few domains
blacklist = ['PY\n', "MD\n"]
# tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [tld.lower().strip() for tld in open(tlds_file, 'r')
if tld not in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root: Element,
processor: Callable[[Element], Optional[_T]],
stop_after_first: bool=False) -> List[_T]:
results = []
queue = deque([root])
while queue:
currElement = queue.popleft()
for child in currElement.getchildren():
if child.getchildren():
queue.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
ElementFamily = NamedTuple('ElementFamily', [
('grandparent', Optional[Element]),
('parent', Element),
('child', Element)
])
ResultWithFamily = NamedTuple('ResultWithFamily', [
('family', ElementFamily),
('result', Any)
])
ElementPair = NamedTuple('ElementPair', [
('parent', Optional[Element]),
('value', Element)
])
def walk_tree_with_family(root: Element,
processor: Callable[[Element], Optional[_T]]
) -> List[ResultWithFamily]:
results = []
queue = deque([ElementPair(parent=None, value=root)])
while queue:
currElementPair = queue.popleft()
for child in currElementPair.value.getchildren():
if child.getchildren():
queue.append(ElementPair(parent=currElementPair, value=child)) # type: ignore # Lack of Deque support in typing module for Python 3.4.3
result = processor(child)
if result is not None:
if currElementPair.parent is not None:
grandparent_element = cast(ElementPair, currElementPair.parent)
grandparent = grandparent_element.value
else:
grandparent = None
family = ElementFamily(
grandparent=grandparent,
parent=currElementPair.value,
child=child
)
results.append(ResultWithFamily(
family=family,
result=result
))
return results
# height is not actually used
def add_a(
root: Element,
url: Text,
link: Text,
title: Optional[Text]=None,
desc: Optional[Text]=None,
class_attr: Text="message_inline_image",
data_id: Optional[Text]=None,
insertion_index: Optional[int]=None
) -> None:
title = title if title is not None else url_filename(link)
title = title if title else ""
desc = desc if desc is not None else ""
if insertion_index is not None:
div = markdown.util.etree.Element("div")
root.insert(insertion_index, div)
else:
div = markdown.util.etree.SubElement(root, "div")
div.set("class", class_attr)
a = markdown.util.etree.SubElement(div, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = markdown.util.etree.SubElement(a, "img")
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = markdown.util.etree.SubElement(div, "div")
title_div = markdown.util.etree.SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = markdown.util.etree.SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
def add_embed(root: Element, link: Text, extracted_data: Dict[Text, Any]) -> None:
container = markdown.util.etree.SubElement(root, "div")
container.set("class", "message_embed")
img_link = extracted_data.get('image')
if img_link:
parsed_img_link = urllib.parse.urlparse(img_link)
# Append domain where relative img_link url is given
if not parsed_img_link.netloc:
parsed_url = urllib.parse.urlparse(link)
domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
img_link = urllib.parse.urljoin(domain, img_link)
img = markdown.util.etree.SubElement(container, "a")
img.set("style", "background-image: url(" + img_link + ")")
img.set("href", link)
img.set("target", "_blank")
img.set("class", "message_embed_image")
data_container = markdown.util.etree.SubElement(container, "div")
data_container.set("class", "data-container")
title = extracted_data.get('title')
if title:
title_elm = markdown.util.etree.SubElement(data_container, "div")
title_elm.set("class", "message_embed_title")
a = markdown.util.etree.SubElement(title_elm, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
a.text = title
description = extracted_data.get('description')
if description:
description_elm = markdown.util.etree.SubElement(data_container, "div")
description_elm.set("class", "message_embed_description")
description_elm.text = description
def add_vimeo_preview(root: Element, link: Text, extracted_data: Dict[Text, Any], vm_id: Text) -> None:
container = markdown.util.etree.SubElement(root, "div")
container.set("class", "vimeo-video message_inline_image")
img_link = extracted_data.get('image')
if img_link:
parsed_img_link = urllib.parse.urlparse(img_link)
# Append domain where relative img_link url is given
if not parsed_img_link.netloc:
parsed_url = urllib.parse.urlparse(link)
domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
img_link = urllib.parse.urljoin(domain, img_link)
anchor = markdown.util.etree.SubElement(container, "a")
anchor.set("href", link)
anchor.set("target", "_blank")
anchor.set("data-id", vm_id)
anchor.set("title", link)
img = markdown.util.etree.SubElement(anchor, "img")
img.set("src", img_link)
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id: Text) -> Optional[Dict[Text, Any]]:
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
try:
api = twitter.Api(tweet_mode='extended', **creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
except AttributeError:
logging.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired as e:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
logging.error(traceback.format_exc())
return None
return res
HEAD_START_RE = re.compile('^head[ >]')
HEAD_END_RE = re.compile('^/head[ >]')
META_START_RE = re.compile('^meta[ >]')
META_END_RE = re.compile('^/meta[ >]')
def fetch_open_graph_image(url: Text) -> Optional[Dict[str, Any]]:
in_head = False
# HTML will auto close meta tags, when we start the next tag add
# a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).text
except Exception:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url: Text) -> Optional[Text]:
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,
# we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,18})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if not url.startswith("http://"):
# Don't rewrite images on our own site (e.g. emoji).
continue
img.set("src", get_camo_url(url))
class BacktickPattern(markdown.inlinepatterns.Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern: Text) -> None:
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.ESCAPED_BSLASH = '%s%s%s' % (markdown.util.STX, ord('\\'), markdown.util.ETX)
self.tag = 'code'
def handleMatch(self, m: Match[Text]) -> Union[Text, Element]:
if m.group(4):
el = markdown.util.etree.Element(self.tag)
# Modified to not strip whitespace
el.text = markdown.util.AtomicString(m.group(4))
return el
else:
return m.group(2).replace('\\\\', self.ESCAPED_BSLASH)
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5
def __init__(self, md: markdown.Markdown, bugdown: 'Bugdown') -> None:
# Passing in bugdown for access to config to check if realm is zulip.com
self.bugdown = bugdown
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def get_actual_image_url(self, url: Text) -> Text:
# Add specific per-site cases to convert image-preview urls to image urls.
# See https://github.com/zulip/zulip/issues/4658 for more information
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')):
# https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png ->
# https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png
split_path = parsed_url.path.split('/')
if len(split_path) > 3 and split_path[3] == "blob":
return urllib.parse.urljoin('https://raw.githubusercontent.com',
'/'.join(split_path[0:3] + split_path[4:]))
return url
def is_image(self, url: Text) -> bool:
if not image_preview_enabled_for_realm():
return False
parsed_url = urllib.parse.urlparse(url)
# List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def dropbox_image(self, url: Text) -> Optional[Dict[str, Any]]:
# TODO: The returned Dict could possibly be a TypedDict in future.
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url: Text) -> Optional[Text]:
if not image_preview_enabled_for_realm():
return None
# Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s
# If it matches, match.group(2) is the video id.
youtube_re = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)' + \
r'(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))' + \
r'?([0-9A-Za-z_-]+)(?(1).+)?$'
match = re.match(youtube_re, url)
if match is None:
return None
return match.group(2)
def youtube_image(self, url: Text) -> Optional[Text]:
yt_id = self.youtube_id(url)
if yt_id is not None:
return "https://i.ytimg.com/vi/%s/default.jpg" % (yt_id,)
return None
def vimeo_id(self, url: Text) -> Optional[Text]:
if not image_preview_enabled_for_realm():
return None
#(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \
r'(?:channels\/(?:\w+\/)?|groups\/' + \
r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$'
match = re.match(vimeo_re, url)
if match is None:
return None
return match.group(5)
def vimeo_image(self, url: Text) -> Optional[Text]:
vm_id = self.vimeo_id(url)
if vm_id is not None:
return "http://i.vimeocdn.com/video/%s.jpg" % (vm_id,)
return None
def twitter_text(self, text: Text,
urls: List[Dict[Text, Text]],
user_mentions: List[Dict[Text, Any]],
media: List[Dict[Text, Any]]) -> Element:
"""
Use data from the twitter API to turn links, mentions and media into A
tags. Also convert unicode emojis to images.
This works by using the urls, user_mentions and media data from
the twitter API and searching for unicode emojis in the text using
`unicode_emoji_regex`.
The first step is finding the locations of the URLs, mentions, media and
emoji in the text. For each match we build a dictionary with type, the start
location, end location, the URL to link to, and the text(codepoint and title
in case of emojis) to be used in the link(image in case of emojis).
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_process = [] # type: List[Dict[Text, Any]]
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'url',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = '@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_process.append({
'type': 'mention',
'start': match.start(),
'end': match.end(),
'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'media',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
# Build dicts for emojis
for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE):
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
to_process.append({
'type': 'emoji',
'start': match.start(),
'end': match.end(),
'codepoint': codepoint,
'title': display_string,
})
to_process.sort(key=lambda x: x['start'])
p = current_node = markdown.util.etree.Element('p')
def set_text(text: Text) -> None:
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
if item['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:item['start']])
current_index = item['end']
if item['type'] != 'emoji':
current_node = elem = url_to_a(item['url'], item['text'])
else:
current_node = elem = make_emoji(item['codepoint'], item['title'])
p.append(elem)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url: Text) -> Optional[Element]:
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user = res['user'] # type: Dict[Text, Any]
tweet = markdown.util.etree.Element("div")
tweet.set("class", "twitter-tweet")
img_a = markdown.util.etree.SubElement(tweet, 'a')
img_a.set("href", url)
img_a.set("target", "_blank")
profile_img = markdown.util.etree.SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
text = html.unescape(res['full_text'])
urls = res.get('urls', [])
user_mentions = res.get('user_mentions', [])
media = res.get('media', []) # type: List[Dict[Text, Any]]
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = markdown.util.etree.SubElement(tweet, 'span')
span.text = "- %s (@%s)" % (user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = '%s:%s' % (media_item['media_url_https'], size_name)
img_div = markdown.util.etree.SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = markdown.util.etree.SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img_a.set('target', '_blank')
img_a.set('title', media_item['url'])
img = markdown.util.etree.SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except Exception:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
logging.warning(traceback.format_exc())
return None
def get_url_data(self, e: Element) -> Optional[Tuple[Text, Text]]:
if e.tag == "a":
if e.text is not None:
return (e.get("href"), e.text)
return (e.get("href"), e.get("href"))
return None
def handle_image_inlining(self, root: Element, found_url: ResultWithFamily) -> None:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
(url, text) = found_url.result
actual_url = self.get_actual_image_url(url)
# url != text usually implies a named link, which we opt not to remove
url_eq_text = (url == text)
if parent.tag == 'li':
add_a(parent, self.get_actual_image_url(url), url, title=text)
if not parent.text and not ahref_element.tail and url_eq_text:
parent.remove(ahref_element)
elif parent.tag == 'p':
parent_index = None
for index, uncle in enumerate(grandparent.getchildren()):
if uncle is parent:
parent_index = index
break
if parent_index is not None:
ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index)
add_a(grandparent, actual_url, url, title=text, insertion_index=ins_index)
else:
# We're not inserting after parent, since parent not found.
# Append to end of list of grandparent's children as normal
add_a(grandparent, actual_url, url, title=text)
# If link is alone in a paragraph, delete paragraph containing it
if (len(parent.getchildren()) == 1 and
(not parent.text or parent.text == "\n") and
not ahref_element.tail and
url_eq_text):
grandparent.remove(parent)
else:
# If none of the above criteria match, fall back to old behavior
add_a(root, actual_url, url, title=text)
def find_proper_insertion_index(self, grandparent: Element, parent: Element,
parent_index_in_grandparent: int) -> int:
# If there are several inline images from same paragraph, ensure that
# they are in correct (and not opposite) order by inserting after last
# inline image from paragraph 'parent'
uncles = grandparent.getchildren()
parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")]
insertion_index = parent_index_in_grandparent
while True:
insertion_index += 1
if insertion_index >= len(uncles):
return insertion_index
uncle = uncles[insertion_index]
inline_image_classes = ['message_inline_image', 'message_inline_ref']
if (
uncle.tag != 'div' or
'class' not in uncle.keys() or
uncle.attrib['class'] not in inline_image_classes
):
return insertion_index
uncle_link = list(uncle.iter(tag="a"))[0].attrib['href']
if uncle_link not in parent_links:
return insertion_index
def is_absolute_url(self, url: Text) -> bool:
return bool(urllib.parse.urlparse(url).netloc)
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_urls = walk_tree_with_family(root, self.get_url_data)
if len(found_urls) == 0 or len(found_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:
return
rendered_tweet_count = 0
for found_url in found_urls:
(url, text) = found_url.result
if not self.is_absolute_url(url):
if self.is_image(url):
self.handle_image_inlining(root, found_url)
# We don't have a strong use case for doing url preview for relative links.
continue
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title', ""),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr)
continue
if self.is_image(url):
self.handle_image_inlining(root, found_url)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = markdown.util.etree.SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
yt_id = self.youtube_id(url)
add_a(root, youtube, url, None, None, "youtube-video message_inline_image", yt_id)
continue
global db_data
if db_data and db_data['sent_by_bot']:
continue
if current_message is None or not url_embed_preview_enabled_for_realm(current_message):
continue
try:
extracted_data = link_preview.link_embed_data_from_cache(url)
except NotFoundInCache:
current_message.links_for_preview.add(url)
continue
vimeo = self.vimeo_image(url)
if extracted_data:
if vimeo is not None:
vm_id = self.vimeo_id(url)
add_vimeo_preview(root, url, extracted_data, vm_id)
continue
else:
add_embed(root, url, extracted_data)
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[Text]) -> Optional[Element]:
img = markdown.util.etree.Element('img')
email_address = match.group('email')
email = email_address.strip().lower()
profile_id = None
if db_data is not None:
user_dict = db_data['email_info'].get(email)
if user_dict is not None:
profile_id = user_dict['id']
img.set('class', 'message_body_gravatar')
img.set('src', '/avatar/{0}?s=30'.format(profile_id or email))
img.set('title', email)
img.set('alt', email)
return img
def possible_avatar_emails(content: Text) -> Set[Text]:
emails = set()
for regex in [AVATAR_REGEX, GRAVATAR_REGEX]:
matches = re.findall(regex, content)
for email in matches:
if email:
emails.add(email)
return emails
path_to_name_to_codepoint = os.path.join(settings.STATIC_ROOT,
"generated", "emoji", "name_to_codepoint.json")
with open(path_to_name_to_codepoint) as name_to_codepoint_file:
name_to_codepoint = ujson.load(name_to_codepoint_file)
path_to_codepoint_to_name = os.path.join(settings.STATIC_ROOT,
"generated", "emoji", "codepoint_to_name.json")
with open(path_to_codepoint_to_name) as codepoint_to_name_file:
codepoint_to_name = ujson.load(codepoint_to_name_file)
# All of our emojis(non ZWJ sequences) belong to one of these unicode blocks:
# \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement
# \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement
# \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs
# \U0001f600-\U0001f64f - Emoticons (Emoji)
# \U0001f680-\U0001f6ff - Transport and Map Symbols
# \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs
# \u2000-\u206f - General Punctuation
# \u2300-\u23ff - Miscellaneous Technical
# \u2400-\u243f - Control Pictures
# \u2440-\u245f - Optical Character Recognition
# \u2460-\u24ff - Enclosed Alphanumerics
# \u2500-\u257f - Box Drawing
# \u2580-\u259f - Block Elements
# \u25a0-\u25ff - Geometric Shapes
# \u2600-\u26ff - Miscellaneous Symbols
# \u2700-\u27bf - Dingbats
# \u2900-\u297f - Supplemental Arrows-B
# \u2b00-\u2bff - Miscellaneous Symbols and Arrows
# \u3000-\u303f - CJK Symbols and Punctuation
# \u3200-\u32ff - Enclosed CJK Letters and Months
unicode_emoji_regex = '(?P<syntax>['\
'\U0001F100-\U0001F64F' \
'\U0001F680-\U0001F6FF' \
'\U0001F900-\U0001F9FF' \
'\u2000-\u206F' \
'\u2300-\u27BF' \
'\u2900-\u297F' \
'\u2B00-\u2BFF' \
'\u3000-\u303F' \
'\u3200-\u32FF' \
'])'
# The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]|
# [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for
# frontend unicode emoji processing.
# The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f
# The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff
# The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff
# The JS regex [\u2000-\u206f] represents \u2000-\u206f
# The JS regex [\u2300-\u27bf] represents \u2300-\u27bf
# Similarly other JS regexes can be mapped to the respective unicode blocks.
# For more information, please refer to the following article:
# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript
def make_emoji(codepoint: Text, display_string: Text) -> Element:
# Replace underscore in emoji's title with space
title = display_string[1:-1].replace("_", " ")
span = markdown.util.etree.Element('span')
span.set('class', 'emoji emoji-%s' % (codepoint,))
span.set('title', title)
span.text = display_string
return span
def make_realm_emoji(src: Text, display_string: Text) -> Element:
elt = markdown.util.etree.Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string[1:-1].replace("_", " "))
return elt
def unicode_emoji_to_codepoint(unicode_emoji: Text) -> Text:
codepoint = hex(ord(unicode_emoji))[2:]
# Unicode codepoints are minimum of length 4, padded
# with zeroes if the length is less than zero.
while len(codepoint) < 4:
codepoint = '0' + codepoint
return codepoint
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
""" Translates emoticons like `:)` into emoji like `:smile:`. """
def handleMatch(self, match: Match[Text]) -> Optional[Element]:
# If there is `db_data` and it is True, proceed with translating.
if db_data is None or not db_data['translate_emoticons']:
return None
emoticon = match.group('emoticon')
translated = translate_emoticons(emoticon)
name = translated[1:-1]
return make_emoji(name_to_codepoint[name], translated)
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[Text]) -> Optional[Element]:
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
return make_emoji(codepoint, display_string)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[Text]) -> Optional[Element]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji = {} # type: Dict[Text, Dict[str, Text]]
if db_data is not None:
active_realm_emoji = db_data['active_realm_emoji']
if current_message and name in active_realm_emoji:
return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax)
elif name == 'zulip':
return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax)
elif name in name_to_codepoint:
return make_emoji(name_to_codepoint[name], orig_syntax)
else:
return None
def content_has_emoji_syntax(content: Text) -> bool:
return re.search(EMOJI_REGEX, content) is not None
class ModalLink(markdown.inlinepatterns.Pattern):
"""
A pattern that allows including in-app modal links in messages.
"""
def handleMatch(self, match: Match[Text]) -> Element:
relative_url = match.group('relative_url')
text = match.group('text')
a_tag = markdown.util.etree.Element("a")
a_tag.set("href", relative_url)
a_tag.set("title", relative_url)
a_tag.text = text
return a_tag
class Tex(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[Text]) -> Element:
rendered = render_tex(match.group('body'), is_inline=True)
if rendered is not None:
return etree.fromstring(rendered.encode('utf-8'))
else: # Something went wrong while rendering
span = markdown.util.etree.Element('span')
span.set('class', 'tex-error')
span.text = '$$' + match.group('body') + '$$'
return span
upload_title_re = re.compile("^(https?://[^/]*)?(/user_uploads/\\d+)(/[^/]*)?/[^/]*/(?P<filename>[^/]*)$")
def url_filename(url: Text) -> Text:
"""Extract the filename if a URL is an uploaded file, or return the original URL"""
match = upload_title_re.match(url)
if match:
return match.group('filename')
else:
return url
def fixup_link(link: markdown.util.etree.Element, target_blank: bool=True) -> None:
"""Set certain attributes we want on every link."""
if target_blank:
link.set('target', '_blank')
link.set('title', url_filename(link.get('href')))
def sanitize_url(url: Text) -> Optional[Text]:
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news', 'file', 'bitcoin']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional processing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(url: Text, text: Optional[Text]=None) -> Union[Element, Text]:
a = markdown.util.etree.Element('a')
href = sanitize_url(url)
target_blank = True
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
href = rewrite_local_links_to_relative(href)
target_blank = not href.startswith("#narrow") and not href.startswith('mailto:')
a.set('href', href)
a.text = text
fixup_link(a, target_blank)
return a
class VerbosePattern(markdown.inlinepatterns.Pattern):
def __init__(self, pattern: Text) -> None:
markdown.inlinepatterns.Pattern.__init__(self, ' ')
# HACK: we just had python-markdown compile an empty regex.
# Now replace with the real regex compiled with the flags we want.
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE | re.VERBOSE)
class AutoLink(VerbosePattern):
def handleMatch(self, match: Match[Text]) -> ElementStringNone:
url = match.group('url')
return url_to_a(url)
class UListProcessor(markdown.blockprocessors.UListProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.UListProcessor, but does not accept
'+' or '-' as a bullet character."""
TAG = 'ul'
RE = re.compile('^[ ]{0,3}[*][ ]+(.*)')
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.markdown.tab_length = 2
super().__init__(parser)
parser.markdown.tab_length = 4
class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent
"""
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.markdown.tab_length = 2
super().__init__(parser)
parser.markdown.tab_length = 4
class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows unordered list blocks that come directly after a
paragraph to be rendered as an unordered list
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile('^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE)
HANGING_ULIST_RE = re.compile('^.+\\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE)
def run(self, lines: List[Text]) -> List[Text]:
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
fence = None
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block
m = FENCE_RE.match(lines[i])
if not fence and m:
fence = m.group('fence')
elif fence and m and fence == m.group('fence'):
fence = None
# If we're not in a fenced block and we detect an upcoming list
# hanging off a paragraph, add a newline
if (not fence and lines[i] and
self.LI_RE.match(lines[i+1]) and
not self.LI_RE.match(lines[i])):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
class AutoNumberOListPreprocessor(markdown.preprocessors.Preprocessor):
""" Finds a sequence of lines numbered by the same number"""
RE = re.compile(r'^([ ]*)(\d+)\.[ ]+(.*)')
TAB_LENGTH = 2
def run(self, lines: List[Text]) -> List[Text]:
new_lines = [] # type: List[Text]
current_list = [] # type: List[Match[Text]]
current_indent = 0
for line in lines:
m = self.RE.match(line)
# Remember if this line is a continuation of already started list
is_next_item = (m and current_list
and current_indent == len(m.group(1)) // self.TAB_LENGTH)
if not is_next_item:
# There is no more items in the list we were processing
new_lines.extend(self.renumber(current_list))
current_list = []
if not m:
# Ordinary line
new_lines.append(line)
elif is_next_item:
# Another list item
current_list.append(m)
else:
# First list item
current_list = [m]
current_indent = len(m.group(1)) // self.TAB_LENGTH
new_lines.extend(self.renumber(current_list))
return new_lines
def renumber(self, mlist: List[Match[Text]]) -> List[Text]:
if not mlist:
return []
start_number = int(mlist[0].group(2))
# Change numbers only if every one is the same
change_numbers = True
for m in mlist:
if int(m.group(2)) != start_number:
change_numbers = False
break
lines = [] # type: List[Text]
counter = start_number
for m in mlist:
number = str(counter) if change_numbers else m.group(2)
lines.append('%s%s. %s' % (m.group(1), number, m.group(3)))
counter += 1
return lines
# Based on markdown.inlinepatterns.LinkPattern
class LinkPattern(markdown.inlinepatterns.Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m: Match[Text]) -> Optional[Element]:
href = m.group(9)
if not href:
return None
if href[0] == "<":
href = href[1:-1]
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None
href = rewrite_local_links_to_relative(href)
el = markdown.util.etree.Element('a')
el.text = m.group(2)
el.set('href', href)
fixup_link(el, target_blank=(href[:1] != '#'))
return el
def prepare_realm_pattern(source: Text) -> Text:
""" Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as "name". """
return r"""(?<![^\s'"\(,:<])(?P<name>""" + source + ')(?!\w)'
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern: Text,
format_string: Text,
markdown_instance: Optional[markdown.Markdown]=None) -> None:
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m: Match[Text]) -> Union[Element, Text]:
return url_to_a(self.format_string % m.groupdict(),
m.group("name"))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[Text]) -> Optional[Element]:
match = m.group(2)
if current_message and db_data is not None:
if match.startswith("**") and match.endswith("**"):
name = match[2:-2]
else:
return None
wildcard = mention.user_mention_matches_wildcard(name)
user = db_data['mention_data'].get_user(name)
if wildcard:
current_message.mentions_wildcard = True
user_id = "*"
elif user:
current_message.mentions_user_ids.add(user['id'])
name = user['full_name']
user_id = str(user['id'])
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = markdown.util.etree.Element("span")
el.set('class', 'user-mention')
el.set('data-user-id', user_id)
el.text = "@%s" % (name,)
return el
return None
class UserGroupMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[Text]) -> Optional[Element]:
match = m.group(2)
if current_message and db_data is not None:
name = extract_user_group(match)
user_group = db_data['mention_data'].get_user_group(name)
if user_group:
current_message.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
# Don't highlight @-mentions that don't refer to a valid user
# group.
return None
el = markdown.util.etree.Element("span")
el.set('class', 'user-group-mention')
el.set('data-user-group-id', user_group_id)
el.text = "@%s" % (name,)
return el
return None
class StreamPattern(VerbosePattern):
def find_stream_by_name(self, name: Match[Text]) -> Optional[Dict[str, Any]]:
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[Text]) -> Optional[Element]:
name = m.group('stream_name')
if current_message:
stream = self.find_stream_by_name(name)
if stream is None:
return None
el = markdown.util.etree.Element('a')
el.set('class', 'stream')
el.set('data-stream-id', str(stream['id']))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
stream_url = encode_stream(stream['id'], name)
el.set('href', '/#narrow/stream/{stream_url}'.format(stream_url=stream_url))
el.text = '#{stream_name}'.format(stream_name=name)
return el
return None
def possible_linked_stream_names(content: Text) -> Set[Text]:
matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)
return set(matches)
class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor):
def run(self, lines: Iterable[Text]) -> Iterable[Text]:
if current_message and db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set current_message.alert_words.
realm_words = db_data['possible_words']
content = '\n'.join(lines).lower()
allowed_before_punctuation = "|".join([r'\s', '^', r'[\(\".,\';\[\*`>]'])
allowed_after_punctuation = "|".join([r'\s', '$', r'[\)\"\?:.,\';\]!\*`]'])
for word in realm_words:
escaped = re.escape(word.lower())
match_re = re.compile('(?:%s)%s(?:%s)' %
(allowed_before_punctuation,
escaped,
allowed_after_punctuation))
if re.search(match_re, content):
current_message.alert_words.add(word)
return lines
# This prevents realm_filters from running on the content of a
# Markdown link, breaking up the link. This is a monkey-patch, but it
# might be worth sending a version of this change upstream.
class AtomicLinkPattern(LinkPattern):
def handleMatch(self, m: Match[Text]) -> Optional[Element]:
ret = LinkPattern.handleMatch(self, m)
if ret is None:
return None
if not isinstance(ret, str):
ret.text = markdown.util.AtomicString(ret.text)
return ret
# These are used as keys ("realm_filters_keys") to md_engines and the respective
# realm filter caches
DEFAULT_BUGDOWN_KEY = -1
ZEPHYR_MIRROR_BUGDOWN_KEY = -2
class Bugdown(markdown.Extension):
def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None:
# define default configs
self.config = {
"realm_filters": [kwargs['realm_filters'],
"Realm-specific filters for realm_filters_key %s" % (kwargs['realm'],)],
"realm": [kwargs['realm'], "Realm id"],
"code_block_processor_disabled": [kwargs['code_block_processor_disabled'],
"Disabled for email gateway"]
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
del md.preprocessors['reference']
if self.getConfig('code_block_processor_disabled'):
del md.parser.blockprocessors['code']
for k in ('image_link', 'image_reference', 'automail',
'autolink', 'link', 'reference', 'short_reference',
'escape', 'strong_em', 'emphasis', 'emphasis2',
'linebreak', 'strong', 'backtick'):
del md.inlinePatterns[k]
try:
# linebreak2 was removed upstream in version 3.2.1, so
# don't throw an error if it is not there
del md.inlinePatterns['linebreak2']
except Exception:
pass
md.preprocessors.add("custom_text_notifications", AlertWordsNotificationProcessor(md), "_end")
# Inline code block without whitespace stripping
md.inlinePatterns.add(
"backtick",
BacktickPattern(r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\3(?!`))'),
"_begin")
md.inlinePatterns.add(
'strong_em',
markdown.inlinepatterns.DoubleTagPattern(
r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*', 'strong,em'),
'>backtick')
# Custom bold syntax: **foo** but not __foo__
md.inlinePatterns.add('strong',
markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'),
'>not_strong')
# Custom strikethrough syntax: ~~foo~~
md.inlinePatterns.add('del',
markdown.inlinepatterns.SimpleTagPattern(
r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)', 'del'), '>strong')
# Text inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
md.inlinePatterns.add(
'emphasis',
markdown.inlinepatterns.SimpleTagPattern(r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*', 'em'),
'>strong')
for k in ('hashheader', 'setextheader', 'olist', 'ulist', 'indent'):
del md.parser.blockprocessors[k]
md.parser.blockprocessors.add('ulist', UListProcessor(md.parser), '>hr')
md.parser.blockprocessors.add('indent', ListIndentProcessor(md.parser), '<ulist')
# Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
md.parser.blockprocessors['quote'].RE = re.compile(
r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))'
r'[ ]{0,3}>[ ]?(.*)')
# Note that !gravatar syntax should be deprecated long term.
md.inlinePatterns.add('avatar', Avatar(AVATAR_REGEX), '>backtick')
md.inlinePatterns.add('gravatar', Avatar(GRAVATAR_REGEX), '>backtick')
md.inlinePatterns.add(
'modal_link',
ModalLink(r'!modal_link\((?P<relative_url>[^)]*), (?P<text>[^)]*)\)'),
'>avatar')
md.inlinePatterns.add('usermention', UserMentionPattern(mention.find_mentions), '>backtick')
md.inlinePatterns.add('usergroupmention',
UserGroupMentionPattern(mention.user_group_mentions),
'>backtick')
md.inlinePatterns.add('stream', StreamPattern(STREAM_LINK_REGEX), '>backtick')
md.inlinePatterns.add(
'tex',
Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'),
'>backtick')
md.inlinePatterns.add('emoji', Emoji(EMOJI_REGEX), '_end')
md.inlinePatterns.add('translate_emoticons', EmoticonTranslation(emoticon_regex), '>emoji')
md.inlinePatterns.add('unicodeemoji', UnicodeEmoji(unicode_emoji_regex), '_end')
md.inlinePatterns.add('link', AtomicLinkPattern(markdown.inlinepatterns.LINK_RE, md), '>avatar')
for (pattern, format_string, id) in self.getConfig("realm_filters"):
md.inlinePatterns.add('realm_filters/%s' % (pattern,),
RealmFilterPattern(pattern, format_string), '>link')
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
tlds = '|'.join(list_of_tlds())
link_regex = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
%s # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
%s # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
| (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{25,34}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
""" % (tlds, nested_paren_chunk,
r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r"")
md.inlinePatterns.add('autolink', AutoLink(link_regex), '>link')
md.preprocessors.add('hanging_ulists',
BugdownUListPreprocessor(md),
"_begin")
md.preprocessors.add('auto_number_olist',
AutoNumberOListPreprocessor(md),
"_begin")
md.treeprocessors.add("inline_interesting_links", InlineInterestingLinkProcessor(md, self), "_end")
if settings.CAMO_URI:
md.treeprocessors.add("rewrite_to_https", InlineHttpsProcessor(md), "_end")
if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY:
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
for k in list(md.inlinePatterns.keys()):
if k not in ["autolink"]:
del md.inlinePatterns[k]
for k in list(md.treeprocessors.keys()):
if k not in ["inline_interesting_links", "inline", "rewrite_to_https"]:
del md.treeprocessors[k]
for k in list(md.preprocessors.keys()):
if k not in ["custom_text_notifications"]:
del md.preprocessors[k]
for k in list(md.parser.blockprocessors.keys()):
if k not in ["paragraph"]:
del md.parser.blockprocessors[k]
md_engines = {} # type: Dict[Tuple[int, bool], markdown.Markdown]
realm_filter_data = {} # type: Dict[int, List[Tuple[Text, Text, int]]]
class EscapeHtml(markdown.Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
del md.preprocessors['html_block']
del md.inlinePatterns['html']
def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None:
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
del md_engines[md_engine_key]
realm_filters = realm_filter_data[realm_filters_key]
md_engines[md_engine_key] = markdown.Markdown(
output_format = 'html',
extensions = [
'markdown.extensions.nl2br',
'markdown.extensions.tables',
codehilite.makeExtension(
linenums=False,
guess_lang=False
),
fenced_code.makeExtension(),
EscapeHtml(),
Bugdown(realm_filters=realm_filters,
realm=realm_filters_key,
code_block_processor_disabled=email_gateway)])
def subject_links(realm_filters_key: int, subject: Text) -> List[Text]:
matches = [] # type: List[Text]
realm_filters = realm_filters_for_realm(realm_filters_key)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, subject):
matches += [realm_filter[1] % m.groupdict()]
return matches
def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None:
# If realm_filters_key is None, load all filters
global realm_filter_data
if realm_filters_key is None:
all_filters = all_realm_filters()
all_filters[DEFAULT_BUGDOWN_KEY] = []
for realm_filters_key, filters in all_filters.items():
realm_filter_data[realm_filters_key] = filters
make_md_engine(realm_filters_key, email_gateway)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = []
make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False)
else:
realm_filters = realm_filters_for_realm(realm_filters_key)
if realm_filters_key not in realm_filter_data or \
realm_filter_data[realm_filters_key] != realm_filters:
# Realm filters data has changed, update `realm_filter_data` and any
# of the existing markdown engines using this set of realm filters.
realm_filter_data[realm_filters_key] = realm_filters
for email_gateway_flag in [True, False]:
if (realm_filters_key, email_gateway_flag) in md_engines:
# Update only existing engines(if any), don't create new one.
make_md_engine(realm_filters_key, email_gateway_flag)
if (realm_filters_key, email_gateway) not in md_engines:
# Markdown engine corresponding to this key doesn't exists so create one.
make_md_engine(realm_filters_key, email_gateway)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile('\\w', flags=re.UNICODE)
def privacy_clean_markdown(content: Text) -> Text:
return repr(_privacy_re.sub('x', content))
# Filters such as UserMentionPattern need a message, but python-markdown
# provides no way to pass extra params through to a pattern. Thus, a global.
current_message = None # type: Optional[Message]
# We avoid doing DB queries in our markdown thread to avoid the overhead of
# opening a new DB connection. These connections tend to live longer than the
# threads themselves, as well.
db_data = None # type: Optional[Dict[Text, Any]]
def log_bugdown_error(msg: str) -> None:
"""We use this unusual logging approach to log the bugdown error, in
order to prevent AdminNotifyHandler from sending the santized
original markdown formatting into another Zulip message, which
could cause an infinite exception loop."""
logging.getLogger('').error(msg)
def get_email_info(realm_id: int, emails: Set[Text]) -> Dict[Text, FullNameInfo]:
if not emails:
return dict()
q_list = {
Q(email__iexact=email.strip().lower())
for email in emails
}
rows = UserProfile.objects.filter(
realm_id=realm_id
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'email',
)
dct = {
row['email'].strip().lower(): row
for row in rows
}
return dct
def get_full_name_info(realm_id: int, full_names: Set[Text]) -> Dict[Text, FullNameInfo]:
if not full_names:
return dict()
q_list = {
Q(full_name__iexact=full_name)
for full_name in full_names
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'full_name',
'email',
)
dct = {
row['full_name'].lower(): row
for row in rows
}
return dct
class MentionData:
def __init__(self, realm_id: int, content: Text) -> None:
full_names = possible_mentions(content)
self.full_name_info = get_full_name_info(realm_id, full_names)
self.user_ids = {
row['id']
for row in self.full_name_info.values()
}
user_group_names = possible_user_group_mentions(content)
self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names)
group_ids = [group.id for group in self.user_group_name_info.values()]
membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids)
self.user_group_members = defaultdict(list) # type: Dict[int, List[int]]
for info in membership.values('user_group_id', 'user_profile_id'):
group_id = info['user_group_id']
user_profile_id = info['user_profile_id']
self.user_group_members[group_id].append(user_profile_id)
def get_user(self, name: Text) -> Optional[FullNameInfo]:
return self.full_name_info.get(name.lower(), None)
def get_user_ids(self) -> Set[int]:
"""
Returns the user IDs that might have been mentioned by this
content. Note that because this data structure has not parsed
the message and does not know about escaping/code blocks, this
will overestimate the list of user ids.
"""
return self.user_ids
def get_user_group(self, name: Text) -> Optional[UserGroup]:
return self.user_group_name_info.get(name.lower(), None)
def get_group_members(self, user_group_id: int) -> List[int]:
return self.user_group_members.get(user_group_id, [])
def get_user_group_name_info(realm_id: int, user_group_names: Set[Text]) -> Dict[Text, UserGroup]:
if not user_group_names:
return dict()
rows = UserGroup.objects.filter(realm_id=realm_id,
name__in=user_group_names)
dct = {row.name.lower(): row for row in rows}
return dct
def get_stream_name_info(realm: Realm, stream_names: Set[Text]) -> Dict[Text, FullNameInfo]:
if not stream_names:
return dict()
q_list = {
Q(name=name)
for name in stream_names
}
rows = get_active_streams(
realm=realm,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'name',
)
dct = {
row['name']: row
for row in rows
}
return dct
def do_convert(content: Text,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
possible_words: Optional[Set[Text]]=None,
sent_by_bot: Optional[bool]=False,
mention_data: Optional[MentionData]=None,
email_gateway: Optional[bool]=False) -> Text:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for bugdown purposes
if message is not None:
if message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
realm_filters_key = DEFAULT_BUGDOWN_KEY
else:
realm_filters_key = message_realm.id
if (message is not None and message.sender.realm.is_zephyr_mirror_realm and
message.sending_client.name == "zephyr_mirror"):
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY
maybe_update_markdown_engines(realm_filters_key, email_gateway)
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
_md_engine = md_engines[md_engine_key]
else:
if DEFAULT_BUGDOWN_KEY not in md_engines:
maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False)
_md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
global current_message
current_message = message
# Pre-fetch data from the DB that is used in the bugdown thread
global db_data
if message is not None:
assert message_realm is not None # ensured above if message is not None
if possible_words is None:
possible_words = set() # Set[Text]
# Here we fetch the data structures needed to render
# mentions/avatars/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_data = MentionData(message_realm.id, content)
emails = possible_avatar_emails(content)
email_info = get_email_info(message_realm.id, emails)
stream_names = possible_linked_stream_names(content)
stream_name_info = get_stream_name_info(message_realm, stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = message_realm.get_active_emoji()
else:
active_realm_emoji = dict()
db_data = {
'possible_words': possible_words,
'email_info': email_info,
'mention_data': mention_data,
'active_realm_emoji': active_realm_emoji,
'realm_uri': message_realm.uri,
'sent_by_bot': sent_by_bot,
'stream_names': stream_name_info,
'translate_emoticons': message.sender.translate_emoticons,
}
try:
# Spend at most 5 seconds rendering.
# Sometimes Python-Markdown is really slow; see
# https://trac.zulip.net/ticket/345
rendered_content = timeout(5, _md_engine.convert, content)
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
if len(rendered_content) > MAX_MESSAGE_LENGTH * 2:
raise BugdownRenderingException()
return rendered_content
except Exception:
cleaned = privacy_clean_markdown(content)
# Output error to log as well as sending a zulip and email
log_bugdown_error('Exception in Markdown parser: %sInput (sanitized) was: %s'
% (traceback.format_exc(), cleaned))
subject = "Markdown parser failure on %s" % (platform.node(),)
mail.mail_admins(
subject, "Failed message: %s\n\n%s\n\n" % (cleaned, traceback.format_exc()),
fail_silently=False)
raise BugdownRenderingException()
finally:
current_message = None
db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time() -> float:
return bugdown_total_time
def get_bugdown_requests() -> int:
return bugdown_total_requests
def bugdown_stats_start() -> None:
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish() -> None:
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(content: Text,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
possible_words: Optional[Set[Text]]=None,
sent_by_bot: Optional[bool]=False,
mention_data: Optional[MentionData]=None,
email_gateway: Optional[bool]=False) -> Text:
bugdown_stats_start()
ret = do_convert(content, message, message_realm,
possible_words, sent_by_bot, mention_data, email_gateway)
bugdown_stats_finish()
return ret
| 40.845926
| 162
| 0.601768
|
c0c69e88f78da77ce28f26e8384cb659a1807ba9
| 44,833
|
py
|
Python
|
schematic/schemas/explorer.py
|
linglp/schematic
|
fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29
|
[
"MIT"
] | null | null | null |
schematic/schemas/explorer.py
|
linglp/schematic
|
fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29
|
[
"MIT"
] | null | null | null |
schematic/schemas/explorer.py
|
linglp/schematic
|
fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29
|
[
"MIT"
] | null | null | null |
import os
import string
import json
import logging
from typing import Any, Dict, Optional, Text, List
import inflection
import networkx as nx
from rdflib import Graph, Namespace, plugin, query
from networkx.algorithms.cycles import find_cycle
from networkx.readwrite import json_graph
from schematic.utils.curie_utils import (
expand_curies_in_schema,
uri2label,
extract_name_from_uri_or_curie,
)
from schematic.utils.general import find_duplicates
from schematic.utils.io_utils import load_default, load_json, load_schemaorg
from schematic.utils.schema_utils import (
load_schema_into_networkx,
node_attrs_cleanup,
class_to_node,
relationship_edges,
)
from schematic.utils.general import dict2list, unlist
from schematic.utils.viz_utils import visualize
from schematic.utils.validate_utils import (
validate_class_schema,
validate_property_schema,
validate_schema,
)
from schematic.schemas.curie import uri2curie, curie2uri
namespaces = dict(rdf=Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#"))
logger = logging.getLogger(__name__)
class SchemaExplorer:
"""Class for exploring schema"""
def __init__(self):
self.load_default_schema()
def load_schema(self, schema):
"""Load schema and convert it to networkx graph"""
self.schema = load_json(schema)
self.schema_nx = load_schema_into_networkx(self.schema)
def export_schema(self, file_path):
with open(file_path, "w",encoding="utf8") as f:
json.dump(self.schema, f, sort_keys=True, indent=4, ensure_ascii=False)
def load_default_schema(self):
"""Load default schema, either schema.org or biothings"""
self.schema = load_default()
self.schema_nx = load_schema_into_networkx(self.schema)
def get_nx_schema(self):
return self.schema_nx
def get_edges_by_relationship(
self, class_label: str, relationship: str
) -> List[str]:
"""Get a list of out-edges of a node where the edges match a specifc type of relationship.
i.e., the edges connecting a node to its neighbors are of relationship type -- "parentOf" (set of edges to children / sub-class nodes).
Note: possible edge relationships are -- parentOf, rangeValue, requiresDependency.
Args:
node: the node whose edges we need to look at.
relationship: the type of link(s) that the above node and its immediate neighbors share.
Returns:
List of edges that are connected to the node.
"""
edges = []
mm_graph = self.get_nx_schema()
for (u, v, key, c) in mm_graph.out_edges(node, data=True, keys=True):
if key == relationship:
edges.append((u, v))
return edges
def get_descendants_by_edge_type(
self,
source_node: str,
relationship: str,
connected: bool = True,
ordered: bool = False,
) -> List[str]:
"""Get all nodes that are descendants of a given source node, based on a specific type of edge / relationship type.
Args:
source_node: The node whose descendants need to be retreived.
relationship: Edge / link relationship type with possible values same as in above docs.
connected: If True, we need to ensure that all descendant nodes are reachable from the source node, i.e., they are part of the same connected component.
If False, the descendants could be in multiple connected components.
Default value is True.
ordered: If True, the list of descendants will be topologically ordered.
If False, the list has no particular order (depends on the order in which the descendats were traversed in the subgraph).
Returns:
List of nodes that are descendants from a particular node (sorted / unsorted)
"""
mm_graph = self.get_nx_schema()
# if mm_graph.has_node(source_node):
# get all nodes that are reachable from a specified root /source node in the data model
root_descendants = nx.descendants(mm_graph, source_node)
# else:
# print("The specified source node could not be found im the Networkx graph.")
# return []
subgraph_nodes = list(root_descendants)
subgraph_nodes.append(source_node)
descendants_subgraph = mm_graph.subgraph(subgraph_nodes)
# prune the descendants subgraph so as to include only those edges that match the relationship type
rel_edges = []
for (u, v, key, c) in descendants_subgraph.edges(data=True, keys=True):
if key == relationship:
rel_edges.append((u, v))
relationship_subgraph = nx.DiGraph()
relationship_subgraph.add_edges_from(rel_edges)
descendants = relationship_subgraph.nodes()
if not descendants:
# return empty list if there are no nodes that are reachable from the source node based on this relationship type
return []
if connected and ordered:
# get the set of reachable nodes from the source node
descendants = nx.descendants(relationship_subgraph, source_node)
descendants.add(source_node)
# normally, the descendants from a node are unordered (peculiarity of nx descendants call)
# form the subgraph on descendants and order it topologically
# this assumes an acyclic subgraph
descendants = nx.topological_sort(
relationship_subgraph.subgraph(descendants)
)
elif connected:
# get the nodes that are reachable from a given source node
# after the pruning process above some nodes in the root_descendants subgraph might have become disconnected and will be omitted
descendants = nx.descendants(relationship_subgraph, source_node)
descendants.add(source_node)
elif ordered:
# sort the nodes topologically
# this requires the graph to be an acyclic graph
descendants = nx.topological_sort(relationship_subgraph)
return list(descendants)
def get_adjacent_nodes_by_relationship(
self, node: str, relationship: str
) -> List[str]:
"""Get a list of nodes that is / are adjacent to a given node, based on a relationship type.
Args:
node: the node whose edges we need to look at.
relationship: the type of link(s) that the above node and its immediate neighbors share.
Returns:
List of nodes that are adjacent to the given node.
"""
nodes = set()
mm_graph = self.get_nx_schema()
for (u, v, key, c) in mm_graph.out_edges(node, data=True, keys=True):
if key == relationship:
nodes.add(v)
return list(nodes)
def is_class_in_schema(self, class_label):
if self.schema_nx.nodes[class_label]:
return True
else:
return False
def full_schema_graph(self, size=None):
edges = self.schema_nx.edges()
return visualize(edges, size=size)
def sub_schema_graph(self, source, direction, size=None):
if direction == "down":
edges = list(nx.edge_bfs(self.schema_nx, [source]))
return visualize(edges, size=size)
elif direction == "up":
paths = self.find_parent_classes(source)
edges = []
for _path in paths:
_path.append(source)
for i in range(0, len(_path) - 1):
edges.append((_path[i], _path[i + 1]))
return visualize(edges, size=size)
elif direction == "both":
paths = self.find_parent_classes(source)
edges = list(nx.edge_bfs(self.schema_nx, [source]))
for _path in paths:
_path.append(source)
for i in range(0, len(_path) - 1):
edges.append((_path[i], _path[i + 1]))
return visualize(edges, size=size)
def find_parent_classes(self, schema_class):
"""Find all parents of the class"""
digraph = self.get_digraph_by_edge_type("parentOf")
root_node = list(nx.topological_sort(digraph))[0]
# root_node = list(nx.topological_sort(self.schema_nx))[0]
paths = nx.all_simple_paths(
self.schema_nx, source=root_node, target=schema_class
)
# print(root_node)
return [_path[:-1] for _path in paths]
def find_class_specific_properties(self, schema_class):
"""Find properties specifically associated with a given class"""
schema_uri = self.schema_nx.nodes[schema_class]["uri"]
properties = []
for record in self.schema["@graph"]:
if record["@type"] == "rdf:Property":
if (
type(record["schema:domainIncludes"]) == dict
and record["schema:domainIncludes"]["@id"] == schema_uri
):
properties.append(record["rdfs:label"])
elif (
type(record["schema:domainIncludes"]) == list
and [
item
for item in record["schema:domainIncludes"]
if item["@id"] == schema_uri
]
!= []
):
properties.append(record["rdfs:label"])
return properties
def find_all_class_properties(self, schema_class, display_as_table=False):
"""Find all properties associated with a given class
# TODO : need to deal with recursive paths
"""
parents = self.find_parent_classes(schema_class)
# print(schema_class)
# print(parents)
properties = [
{
"class": schema_class,
"properties": self.find_class_specific_properties(schema_class),
}
]
for path in parents:
path.reverse()
for _parent in path:
# print(_parent)
properties.append(
{
"class": _parent,
"properties": self.find_class_specific_properties(_parent),
}
)
if not display_as_table:
return properties
else:
content = [["Property", "Expected Type", "Description", "Class"]]
for record in properties:
for _property in record["properties"]:
property_info = self.explore_property(_property)
if "range" in property_info:
content.append(
[
_property,
property_info["range"],
property_info["description"],
record["class"],
]
)
else:
content.append(
[_property, property_info["description"], record["class"]]
)
# TODO: Log content
def find_class_usages(self, schema_class):
"""Find where a given class is used as a value of a property"""
usages = []
schema_uri = self.schema_nx.nodes[schema_class]["uri"]
for record in self.schema["@graph"]:
usage = {}
if record["@type"] == "rdf:Property":
if "schema:rangeIncludes" in record:
p_range = dict2list(record["schema:rangeIncludes"])
for _doc in p_range:
if _doc["@id"] == schema_uri:
usage["property"] = record["rdfs:label"]
p_domain = dict2list(record["schema:domainIncludes"])
usage["property_used_on_class"] = unlist(
[self.uri2label(record["@id"]) for record in p_domain]
)
usage["description"] = record["rdfs:comment"]
if usage:
usages.append(usage)
return usages
def find_child_classes(self, schema_class):
"""Find schema classes that inherit from the given class"""
return unlist(list(self.schema_nx.successors(schema_class)))
def find_adjacent_child_classes(self, schema_class):
return self.get_adjacent_nodes_by_relationship(schema_class, "parentOf")
def explore_class(self, schema_class):
"""Find details about a specific schema class"""
parents = []
if "subClassOf" in self.schema_nx.nodes[schema_class]:
schema_node_val = self.schema_nx.nodes[schema_class]["subClassOf"]
parents_list = []
if isinstance(schema_node_val, dict):
parents_list.append(self.schema_nx.nodes[schema_class]["subClassOf"])
else:
parents_list = schema_node_val
for parent in parents_list:
parents.append(extract_name_from_uri_or_curie(parent["@id"]))
requires_range = []
if "rangeIncludes" in self.schema_nx.nodes[schema_class]:
schema_node_val = self.schema_nx.nodes[schema_class]["rangeIncludes"]
if isinstance(schema_node_val, dict):
subclass_list = []
subclass_list.append(
self.schema_nx.nodes[schema_class]["rangeIncludes"]
)
else:
subclass_list = schema_node_val
for range_class in subclass_list:
requires_range.append(
extract_name_from_uri_or_curie(range_class["@id"])
)
requires_dependencies = []
if "requiresDependency" in self.schema_nx.nodes[schema_class]:
schema_node_val = self.schema_nx.nodes[schema_class]["requiresDependency"]
if isinstance(schema_node_val, dict):
subclass_list = []
subclass_list.append(
self.schema_nx.nodes[schema_class]["requiresDependency"]
)
else:
subclass_list = schema_node_val
for dep_class in subclass_list:
requires_dependencies.append(
extract_name_from_uri_or_curie(dep_class["@id"])
)
requires_components = []
if "requiresComponent" in self.schema_nx.nodes[schema_class]:
schema_node_val = self.schema_nx.nodes[schema_class]["requiresComponent"]
if isinstance(schema_node_val, dict):
subclass_list = []
subclass_list.append(
self.schema_nx.nodes[schema_class]["requiresComponent"]
)
else:
subclass_list = schema_node_val
for comp_dep_class in subclass_list:
requires_components.append(
extract_name_from_uri_or_curie(comp_dep_class["@id"])
)
required = False
if "required" in self.schema_nx.nodes[schema_class]:
required = self.schema_nx.nodes[schema_class]["required"]
validation_rules = []
if "validationRules" in self.schema_nx.nodes[schema_class]:
validation_rules = self.schema_nx.nodes[schema_class]["validationRules"]
# TODO: make class_info keys here the same as keys in schema graph nodes(e.g. schema_class above); note that downstream code using explore_class would have to be updated as well (e.g. csv_2_schemaorg)
class_info = {
"properties": self.find_class_specific_properties(schema_class),
"description": self.schema_nx.nodes[schema_class]["description"],
"uri": curie2uri(self.schema_nx.nodes[schema_class]["uri"], namespaces),
#'usage': self.find_class_usages(schema_class),
"usage": "NA",
"child_classes": self.find_adjacent_child_classes(schema_class),
"subClassOf": parents,
"range": requires_range,
"dependencies": requires_dependencies,
"validation_rules": validation_rules,
"required": required,
"component_dependencies": requires_components,
"parent_classes": parents
#'parent_classes': self.find_parent_classes(schema_class)
}
if "displayName" in self.schema_nx.nodes[schema_class]:
class_info["displayName"] = self.schema_nx.nodes[schema_class][
"displayName"
]
return class_info
def get_property_label_from_display_name(self, display_name):
"""Convert a given display name string into a proper property label string"""
"""
label = ''.join(x.capitalize() or ' ' for x in display_name.split(' '))
label = label[:1].lower() + label[1:] if label else ''
"""
display_name = display_name.translate({ord(c): None for c in string.whitespace})
label = inflection.camelize(display_name.strip(), uppercase_first_letter=False)
return label
def get_class_label_from_display_name(self, display_name):
"""Convert a given display name string into a proper class label string"""
"""
label = ''.join(x.capitalize() or ' ' for x in display_name.split(' '))"""
display_name = display_name.translate({ord(c): None for c in string.whitespace})
label = inflection.camelize(display_name.strip(), uppercase_first_letter=True)
return label
def get_class_by_property(self, property_display_name):
schema_property = self.get_property_label_from_display_name(
property_display_name
)
for record in self.schema["@graph"]:
if record["@type"] == "rdf:Property":
if record["rdfs:label"] == schema_property:
p_domain = dict2list(record["schema:domainIncludes"])
return unlist(
[
self.uri2label(schema_class["@id"])
for schema_class in p_domain
]
)
return None
def uri2label(self, uri):
return uri.split(":")[1]
def explore_property(self, schema_property):
"""Find details about a specific property
TODO: refactor so that explore class and explore property reuse logic - they are *very* similar
"""
property_info = {}
for record in self.schema["@graph"]:
if record["@type"] == "rdf:Property":
if record["rdfs:label"] == schema_property:
property_info["id"] = record["rdfs:label"]
property_info["description"] = record["rdfs:comment"]
property_info["uri"] = curie2uri(record["@id"], namespaces)
p_domain = dict2list(record["schema:domainIncludes"])
property_info["domain"] = unlist(
[self.uri2label(record["@id"]) for record in p_domain]
)
if "schema:rangeIncludes" in record:
p_range = dict2list(record["schema:rangeIncludes"])
property_info["range"] = [
self.uri2label(record["@id"]) for record in p_range
]
else:
property_info["range"] = []
if "sms:required" in record:
if "sms:true" == record["sms:required"]:
property_info["required"] = True
else:
property_info["required"] = False
validation_rules = []
if "sms:validationRules" in record:
property_info["validation_rules"] = record[
"sms:validationRules"
]
if "sms:requiresDependency" in record:
p_dependencies = dict2list(record["sms:requiresDependency"])
property_info["dependencies"] = [
self.uri2label(record["@id"]) for record in p_dependencies
]
else:
property_info["dependencies"] = []
if "sms:displayName" in record:
property_info["displayName"] = record["sms:displayName"]
break
# check if properties are added multiple times
return property_info
def generate_class_template(self):
"""Generate a template for schema class"""
template = {
"@id": "uri or curie of the class",
"@type": "rdfs:Class",
"rdfs:comment": "description of the class",
"rdfs:label": "class label, should match @id",
"rdfs:subClassOf": {"@id": "parent class, could be list"},
"schema:isPartOf": {"@id": "http://schema.biothings.io"},
}
return template
def generate_property_template(self):
"""Generate a template for schema property"""
template = {
"@id": "url or curie of the property",
"@type": "rdf:Property",
"rdfs:comment": "description of the property",
"rdfs:label": "carmel case, should match @id",
"schema:domainIncludes": {
"@id": "class which use it as a property, could be list"
},
"schema:isPartOf": {"@id": "http://schema.biothings.io"},
"schema:rangeIncludes": {
"@id": "relates a property to a class that constitutes (one of) the expected type(s) for values of the property"
},
}
return template
def edit_class(self, class_info):
"""Edit an existing class into schema"""
for i, schema_class in enumerate(self.schema["@graph"]):
if schema_class["rdfs:label"] == class_info["rdfs:label"]:
validate_class_schema(class_info) # why are we doing this in a loop?
self.schema["@graph"][i] = class_info
break
# TODO: do we actually need to validate the entire schema if a class is just edited and the class passes validation?
# validate_schema(self.schema)
logger.info(f"Edited the class {class_info['rdfs:label']} successfully.")
self.schema_nx = load_schema_into_networkx(self.schema)
def update_class(self, class_info):
"""Add a new class into schema"""
# print(class_info)
validate_class_schema(class_info)
self.schema["@graph"].append(class_info)
validate_schema(self.schema)
logger.info(f"Updated the class {class_info['rdfs:label']} successfully.")
self.schema_nx = load_schema_into_networkx(self.schema)
def edit_property(self, property_info):
"""Edit an existing property into schema"""
for i, schema_property in enumerate(self.schema["@graph"]):
if schema_property["rdfs:label"] == property_info["rdfs:label"]:
validate_property_schema(property_info)
self.schema["@graph"][i] = property_info
# TODO: check if properties are added/edited multiple times (e.g. look at explore_property)
break
validate_schema(self.schema)
logger.info(f"Edited the property {property_info['rdfs:label']} successfully.")
self.schema_nx = load_schema_into_networkx(self.schema)
def update_property(self, property_info):
"""Add a new property into schema"""
validate_property_schema(property_info)
self.schema["@graph"].append(property_info)
validate_schema(self.schema)
logger.info(f"Updated the property {property_info['rdfs:label']} successfully.")
def get_digraph_by_edge_type(self, edge_type):
multi_digraph = self.schema_nx
digraph = nx.DiGraph()
for (u, v, key, c) in multi_digraph.edges(data=True, keys=True):
if key == edge_type:
digraph.add_edge(u, v)
# print(nx.find_cycle(digraph, orientation = "ignore"))
return digraph
# version of edit_class() method that directly acts on the networkx graph
def edit_schema_object_nx(self, schema_object: dict) -> None:
node_to_replace = class_to_node(class_to_convert=schema_object)
# get the networkx graph associated with the SchemaExplorer object in its current state
schema_graph_nx = self.get_nx_schema()
# outer loop to loop over all the nodes in the graph constructed from master schema
for node, data in schema_graph_nx.nodes(data=True):
# innner loop to loop over the single node that is to be replaced/edited in the master graph
for replace_node, replace_data in node_to_replace.nodes(data=True):
# find the node to be replaced in the graph
if node == replace_node:
# for the "comment", "required", "displayName", "validationRules" fields/keys it's okay to do a direct replacement
# without having to worry about adding/removing any associated edges
# ques. is it more expensive to do a checking operation (diff b/w fields) or a replace operation?
if (
"comment" in data and "comment" in replace_data
): # replace contents of "comment" from replacement node
schema_graph_nx.nodes[node]["comment"] = node_to_replace.nodes[
replace_node
]["comment"]
schema_graph_nx.nodes[node][
"description"
] = node_to_replace.nodes[replace_node]["description"]
if (
"required" in data and "required" in replace_data
): # replace boolean value of "required" from replacement node
schema_graph_nx.nodes[node]["required"] = node_to_replace.nodes[
replace_node
]["required"]
if (
"displayName" in data and "displayName" in replace_data
): # replace contents of "displayName" from replacement node
schema_graph_nx.nodes[node][
"displayName"
] = node_to_replace.nodes[replace_node]["displayName"]
if (
"validationRules" in data and "validationRules" in replace_data
): # replace contents of "validationRules" from replacement node
schema_graph_nx.nodes[node][
"validationRules"
] = node_to_replace.nodes[replace_node]["validationRules"]
# for the "subClassOf", "requiresDependency", "requiresComponent", "rangeIncludes" fields/keys require rejiggering
# of associated edges
# general strategy we follow for rejiggering is remove edges that existed formerly and add new edges based on contents
# of the replacement node
# "subClassOf" key related edge manipulation
if "subClassOf" in replace_data:
# if the "subClassOf" attribute already exists on the node, then remove all the "parentOf" in-edges
# associated with that node
if "subClassOf" in data:
# remove formerly existent edges from the master schema/graph
for (u, v) in list(schema_graph_nx.in_edges([node])):
# there are certain nodes which have "subClassOf" data in list format
if type(data["subClassOf"]) == list:
for _edges_to_replace in data["subClassOf"]:
edge_repl = extract_name_from_uri_or_curie(
_edges_to_replace["@id"]
)
if edge_repl == u:
try:
# we need to make sure to remove only edges that are tagged with the "parentOf" label
schema_graph_nx.remove_edges_from(
[(u, v, "parentOf")]
)
except TypeError:
pass
# there are certain nodes which have "subClassOf" data in dict format
elif type(data["subClassOf"]) == dict:
for k_id, v_curie in data["subClassOf"].items():
edge_repl = extract_name_from_uri_or_curie(
v_curie
)
if edge_repl == u:
try:
schema_graph_nx.remove_edges_from(
[(u, v, "parentOf")]
)
except TypeError:
pass
# extract node names from replacement node and use it to add edges to the master schema/graph
parents = replace_data["subClassOf"]
if type(parents) == list:
for _parent in parents:
target_node = extract_name_from_uri_or_curie(
_parent["@id"]
)
# label to be associated with "subClassOf" keys is "parentOf"
if target_node != replace_node:
# make note of the fact that we are changing in-edges here
schema_graph_nx.add_edge(
target_node, replace_node, key="parentOf"
)
elif type(parents) == dict:
for _k_parent, _v_parent in parents.items():
target_node = extract_name_from_uri_or_curie(_v_parent)
# label to be associated with "subClassOf" keys is "parentOf"
if target_node != replace_node:
# make note of the fact that we are changing in-edges here
schema_graph_nx.add_edge(
target_node, replace_node, key="parentOf"
)
# once the edges have been added, change the contents of the node
schema_graph_nx.nodes[node][
"subClassOf"
] = node_to_replace.nodes[replace_node]["subClassOf"]
# "requiresDependency" key related edge manipulation
if "requiresDependency" in replace_data:
# if the "requiresDependency" attribute already exists on the node, then remove all the "requiresDependency" in-edges
# associated with that node
if "requiresDependency" in data:
for (u, v) in list(schema_graph_nx.out_edges([node])):
# there are certain nodes which have "requiresDependency" data in list format
if type(data["requiresDependency"]) == list:
for _edges_to_replace in data["requiresDependency"]:
edge_repl = extract_name_from_uri_or_curie(
_edges_to_replace["@id"]
)
if edge_repl == v:
try:
schema_graph_nx.remove_edges_from(
[u, v, "requiresDependency"]
)
except TypeError:
pass
# there are certain nodes which have "requiresDependency" data in dict format
elif type(data["requiresDependency"]) == dict:
for k_id, v_curie in data[
"requiresDependency"
].items():
edge_repl = extract_name_from_uri_or_curie(
v_curie
)
if edge_repl == u:
try:
schema_graph_nx.remove_edges_from(
[u, v, "requiresDependency"]
)
except TypeError:
pass
deps = replace_data["requiresDependency"]
if type(deps) == list:
for _dep in deps:
target_node = extract_name_from_uri_or_curie(
_dep["@id"]
)
if target_node != replace_node:
# make not of the fact that edges being added here are out-edges
schema_graph_nx.add_edge(
replace_node,
target_node,
key="requiresDependency",
)
elif type(deps) == dict:
for _k_dep, _v_dep in deps.items():
target_node = extract_name_from_uri_or_curie(_v_dep)
if target_node != replace_node:
# make not of the fact that edges being added here are out-edges
schema_graph_nx.add_edge(
replace_node,
target_node,
key="requiresDependency",
)
schema_graph_nx.nodes[node][
"requiresDependency"
] = node_to_replace.nodes[replace_node]["requiresDependency"]
# "requiresComponent" key related edge manipulation
if "requiresComponent" in replace_data:
if "requiresComponent" in data:
for (u, v) in list(schema_graph_nx.out_edges([node])):
# there are certain nodes which have "requiresComponent" data in list format
if type(data["requiresComponent"]) == list:
for _edges_to_replace in data["requiresComponent"]:
edge_repl = extract_name_from_uri_or_curie(
_edges_to_replace["@id"]
)
if edge_repl == v:
try:
schema_graph_nx.remove_edges_from(
[u, v, "requiresComponent"]
)
except TypeError:
pass
elif type(data["requiresComponent"]) == dict:
for k_id, v_curie in data[
"requiresComponent"
].items():
edge_repl = extract_name_from_uri_or_curie(
v_curie
)
if edge_repl == v:
try:
schema_graph_nx.remove_edges_from(
[u, v, "requiresComponent"]
)
except TypeError:
pass
comps = replace_data["requiresComponent"]
if type(comps) == list:
for _comp in comps:
target_node = extract_name_from_uri_or_curie(
_comp["@id"]
)
if target_node != replace_node:
schema_graph_nx.add_edge(
replace_node,
target_node,
key="requiresComponent",
)
elif type(comps) == dict:
for _k_comp, _v_comp in deps.items():
target_node = extract_name_from_uri_or_curie(_v_comp)
if target_node != replace_node:
# make not of the fact that edges being added here are out-edges
schema_graph_nx.add_edge(
replace_node,
target_node,
key="requiresDependency",
)
schema_graph_nx.nodes[node][
"requiresComponent"
] = node_to_replace.nodes[replace_node]["requiresComponent"]
# "rangeIncludes" key related edge manipulation
if "rangeIncludes" in replace_data:
if "rangeIncludes" in data:
for (u, v) in list(schema_graph_nx.out_edges([node])):
# there are certain nodes which have "rangeIncludes" data in list format
if type(data["rangeIncludes"]) == list:
for _edges_to_replace in data["rangeIncludes"]:
edge_repl = extract_name_from_uri_or_curie(
_edges_to_replace["@id"]
)
if edge_repl == v:
try:
schema_graph_nx.remove_edges_from(
[u, v, "rangeIncludes"]
)
except TypeError:
pass
elif type(data["rangeIncludes"]) == dict:
for k_id, v_curie in data["rangeIncludes"].items():
edge_repl = extract_name_from_uri_or_curie(
v_curie
)
if edge_repl == v:
try:
schema_graph_nx.remove_edges_from(
[u, v, "rangeIncludes"]
)
except TypeError:
pass
range_inc = replace_data["rangeIncludes"]
if type(range_inc) == list:
for _rinc in range_inc:
target_node = extract_name_from_uri_or_curie(
_rinc["@id"]
)
if target_node != replace_node:
schema_graph_nx.add_edge(
replace_node, target_node, key="rangeValue"
)
elif type(range_inc) == dict:
for _k_rinc, _v_rinc in deps.items():
target_node = extract_name_from_uri_or_curie(_v_rinc)
if target_node != replace_node:
# make not of the fact that edges being added here are out-edges
schema_graph_nx.add_edge(
replace_node, target_node, key="rangeValue"
)
schema_graph_nx.nodes[node][
"rangeIncludes"
] = node_to_replace.nodes[replace_node]["rangeIncludes"]
# set the networkx schema graph to the the modified networkx schema
self.schema_nx = schema_graph_nx
# print("Added node {} to the graph successfully.".format(schema_object["rdfs:label"]))
# part of the code that replaces the modified class in the original JSON-LD schema (not in the data/ folder though)
for i, schema_class in enumerate(self.schema["@graph"]):
if schema_class["rdfs:label"] == schema_object["rdfs:label"]:
# validate_class_schema(schema_object) # validate that the class to be modified follows the structure for any generic class (node)
self.schema["@graph"][i] = schema_object
break
# version of update_class() method that directly acts on the networkx graph
def add_schema_object_nx(self, schema_object: dict, **kwargs: dict) -> None:
node = node_attrs_cleanup(schema_object)
if "required" in node:
if "sms:true" == schema_object["sms:required"]:
node["required"] = True
else:
node["required"] = False
if "sms:validationRules" in schema_object:
node["validationRules"] = schema_object["sms:validationRules"]
else:
node["validationRules"] = []
node["uri"] = schema_object["@id"]
node["description"] = schema_object["rdfs:comment"]
# get the networkx graph associated with the SchemaExplorer object in its current state
schema_graph_nx = self.get_nx_schema()
# add node to graph
schema_graph_nx.add_node(schema_object["rdfs:label"], **node)
schema_graph_nx = relationship_edges(schema_graph_nx, schema_object, **kwargs)
# set the networkx schema graph to the the modified networkx schema
self.schema_nx = schema_graph_nx
# print("Edited node {} successfully.".format(schema_object["rdfs:label"]))
# update the JSON-LD schema after modifying the networkx graph
# validate_class_schema(schema_object)
self.schema["@graph"].append(schema_object)
# validate_schema(self.schema)
| 45.240161
| 208
| 0.507974
|
c8f412ad483655845acf8aef199301c262e81a4d
| 2,599
|
py
|
Python
|
yad2k/models/keras_darknet19.py
|
zhangbo2008/YAD2K
|
91ab1dbd6b40564fa95ffea6c146091ed70bd356
|
[
"MIT"
] | null | null | null |
yad2k/models/keras_darknet19.py
|
zhangbo2008/YAD2K
|
91ab1dbd6b40564fa95ffea6c146091ed70bd356
|
[
"MIT"
] | null | null | null |
yad2k/models/keras_darknet19.py
|
zhangbo2008/YAD2K
|
91ab1dbd6b40564fa95ffea6c146091ed70bd356
|
[
"MIT"
] | null | null | null |
"""Darknet19 Model Defined in Keras."""
import functools
from functools import partial
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from ..utils import compose
# Partial wrapper for Convolution2D with static default argument.
#darknet 是一个conv2d的模板,所以用keras也可以实现darknet
#所以,简单总结functools.partial的作用就是,把一个函数的某些参数给固定住(也就是设置默认值),返回一个新的函数,调用这个新函数会更简单。
#就是表示这个函数等价于Conv2D里面padding永远锁定参数是same的这个.
#对于conv网络,一般都用same,效果好.valid会丢失边缘信息!!!!!!!!!!
_DarknetConv2D = partial(Conv2D, padding='same')
@functools.wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet weight regularizer for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs.update(kwargs)
return _DarknetConv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def bottleneck_block(outer_filters, bottleneck_filters):
"""Bottleneck block of 3x3, 1x1, 3x3 convolutions."""
return compose(
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def bottleneck_x2_block(outer_filters, bottleneck_filters):
"""Bottleneck block of 3x3, 1x1, 3x3, 1x1, 3x3 convolutions."""
return compose(
bottleneck_block(outer_filters, bottleneck_filters),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def darknet_body():
"""Generate first 18 conv layers of Darknet-19."""
return compose(
DarknetConv2D_BN_Leaky(32, (3, 3)),
MaxPooling2D(),
DarknetConv2D_BN_Leaky(64, (3, 3)),
MaxPooling2D(),
bottleneck_block(128, 64),
MaxPooling2D(),
bottleneck_block(256, 128),
MaxPooling2D(),
bottleneck_x2_block(512, 256),
MaxPooling2D(),
bottleneck_x2_block(1024, 512))
def darknet19(inputs):
"""Generate Darknet-19 model for Imagenet classification."""
body = darknet_body()(inputs)
logits = DarknetConv2D(1000, (1, 1), activation='softmax')(body)
return Model(inputs, logits)
| 33.753247
| 77
| 0.722201
|
df89f94a205aff3dd4042bd0c04d1e349a130416
| 5,801
|
py
|
Python
|
generators/dcdc-gen/tools/dcdc_netlist.py
|
leochand101/OpenFASOC
|
0e6990e1cd81b12ce3ee971bd03eeafcea52bd5e
|
[
"Apache-2.0"
] | null | null | null |
generators/dcdc-gen/tools/dcdc_netlist.py
|
leochand101/OpenFASOC
|
0e6990e1cd81b12ce3ee971bd03eeafcea52bd5e
|
[
"Apache-2.0"
] | null | null | null |
generators/dcdc-gen/tools/dcdc_netlist.py
|
leochand101/OpenFASOC
|
0e6990e1cd81b12ce3ee971bd03eeafcea52bd5e
|
[
"Apache-2.0"
] | null | null | null |
##for HSPICE netlist
import re
import function
import os
import sys
import math
def gen_dcdc_netlist(cells, args, jsonSpec, platformConfig, srcDir):
# process the top level verilog
r_netlist=open(srcDir + "/dcdcInst.template.v","r")
lines=list(r_netlist.readlines())
w_netlist=open(srcDir + "/dcdcInst.v","w")
netmap_top=function.netmap()
netmap_top.get_net('na',cells['ff_cell'],1,1,1)
netmap_top.get_net('nb',cells['inv_cell'],1,1,1)
netmap_top.get_net('nc',cells['clkgate_cell'],1,1,1)
for line in lines:
netmap_top.printline(line,w_netlist)
# process the non-inverting clock verilog
r_netlist=open(srcDir + "/DCDC_NOV_CLKGEN.template.sv","r")
lines=list(r_netlist.readlines())
w_netlist=open(srcDir + "/DCDC_NOV_CLKGEN.sv","w")
netmap_novclkgen=function.netmap()
netmap_novclkgen.get_net('na',cells['nand2_cell'],1,1,1)
netmap_novclkgen.get_net('nb',cells['clkinv_cell'],1,1,1)
netmap_novclkgen.get_net('nc',cells['clkinv_cell'],1,1,1)
netmap_novclkgen.get_net('ne',cells['clkinv_cell'],1,1,1)
netmap_novclkgen.get_net('nf',cells['clkinv_cell'],1,1,1)
netmap_novclkgen.get_net('nd',cells['nor2_cell'],1,1,1)
for line in lines:
netmap_novclkgen.printline(line,w_netlist)
netmap_buffer=function.netmap()
netmap_buffer.get_net('nb',cells['clkinv_cell'],1,1,1)
netmap_buffer.get_net('nc',cells['clkinv_cell'],1,1,1)
r_netlist=open(srcDir + "/DCDC_BUFFER.template.sv","r")
lines=list(r_netlist.readlines())
w_netlist=open(srcDir + "/DCDC_BUFFER.sv","w")
for line in lines:
netmap_buffer.printline(line,w_netlist)
# Get the design spec & parameters from spec file
try:
Iload = float(jsonSpec['specifications']['Iload (mA)'])
except KeyError as e:
print('Error: Bad Input Specfile. \'Iload (mA)\' value is missing under \'specifications\'.')
sys.exit(1)
except ValueError as e:
print('Error: Bad Input Specfile. Please use a float value for \'Iload (mA)\' under \'specifications\'.')
sys.exit(1)
if Iload > 3.0 or Iload < 0.001:
print('Error: Only support Iload from 0.001 ~ 1.0 now')
sys.exit(1)
try:
OutVolt = float(jsonSpec['specifications']['Output voltage (V)'])
except KeyError as e:
print('Error: Bad Input Specfile. \'Output voltage (V)\' value is missing under \'specifications\'.')
sys.exit(1)
except ValueError as e:
print('Error: Bad Input Specfile. Please use a float value for \'Output voltage (V) under \'specifications\'.')
if OutVolt > 1.0 or OutVolt < 0.3:
print('Error: Only support OutVolt in the range [0.3, 1.0] now')
sys.exit(1)
try:
Frequency = float(jsonSpec['specifications']['Clock frequency (kHz)'])
except KeyError as e:
print('Error: Bad Input Specfile. \'Clock frequency (kHz)\' value is missing under \'specifications\'.')
sys.exit(1)
except ValueError as e:
print('Error: Bad Input Specfile. Please use a float value for \'Clock frequency (kHz)\' under \'specifications\'.')
sys.exit(1)
designName = jsonSpec['module_name']
SupplyVolt = platformConfig['platforms'][args.platform]['nominal_voltage']
print('\n\nDCDC Spec:')
print('DCDC Instance Name - \"' + designName + '\"')
print('Supply Voltage - \"' + str(SupplyVolt) + '\"')
print('Iload(mA) - \"' + str(Iload) + '\"')
print('Output voltage (V) - \"' + str(OutVolt) + '\"')
print('Frequency (kHz) - \"' + str(Frequency) + '\"')
# process the power mux configuration
# process 2:1 stage switch and cap configuration
# Technology parameter ######
if re.search('sky130',args.platform):
k_sqrt_rc = 6.1E-6
deltaV = 0.10
unit_cap_capacitance = 2E-12
unit_r_resistance = 6750
#############################
# Determine the cap and switch size
dcdc_cap_size = int((Iload * 0.001) / (2 * deltaV * Frequency * 1000) / 2 / unit_cap_capacitance)
if dcdc_cap_size == 0:
dcdc_cap_size = 1
dcdc_sw_size = int(unit_r_resistance / (k_sqrt_rc * SupplyVolt * math.sqrt(Frequency * 1000) / (Iload * 0.001)))
if dcdc_sw_size == 0:
dcdc_sw_size = 1
# Determine Offset_y
# offset_y = 50 * int(dcdc_sw_size / (1<<(dcdc_num_stage-1))) # Eventually will need this to tune the APR settings
# if offset_y == 0:
# offset_y = 50
# Determine metals for power lines # Update
# if args.platform == 'gf12lp':
# pg_m_h = "K3"
# pg_m_v = "K2"
# pg_via_hv = "U2"
# pg_unit_cap = "H2"
# else:
# pg_m_h = "M7"
# pg_m_v = "M6"
# pg_via_hv = "VIA6"
# pg_unit_cap = "M9"
# Test Samples
#dcdc_num_stage = 2;
#dcdc_cap_size = 8;
#dcdc_sw_size = 4;
#dcdc_num_stage = 4;
#dcdc_cap_size = 48;
#dcdc_sw_size = 12;
#dcdc_num_stage = 4;
#dcdc_cap_size = 8;
#dcdc_sw_size = 4;
print('\n\n<DCDC Configuration>')
print('dcdc_cap_size: ' + str(dcdc_cap_size))
print('dcdc_sw_size: ' + str(dcdc_sw_size) + '\n\n')
# 6-stage conv Verilog Modification
with open(srcDir + '/DCDC_SIX_STAGES_CONV.template.v', 'r') as file:
filedata = file.read()
filedata = re.sub(r'parameter DCDC_CAP_SIZE = \d+;', r'parameter DCDC_CAP_SIZE = ' + str(dcdc_cap_size) + ';', filedata)
filedata = re.sub(r'parameter DCDC_SW_SIZE = \d+;', r'parameter DCDC_SW_SIZE = ' + str(dcdc_sw_size) + ';', filedata)
with open(srcDir + '/DCDC_SIX_STAGES_CONV.v', 'w') as file:
file.write(filedata)
return
| 34.945783
| 128
| 0.616101
|
5f71fb1d9bac0dd39145e51c20a9ba2e5f8f60d0
| 435
|
py
|
Python
|
eth_abi/utils/string.py
|
kclowes/eth-abi
|
d164feee62e54af81c69b7636618cf2b36dd8ce6
|
[
"MIT"
] | 61
|
2019-07-03T07:40:17.000Z
|
2022-03-06T13:30:53.000Z
|
eth_abi/utils/string.py
|
kclowes/eth-abi
|
d164feee62e54af81c69b7636618cf2b36dd8ce6
|
[
"MIT"
] | 105
|
2019-07-25T08:48:59.000Z
|
2022-03-23T03:47:34.000Z
|
eth_abi/utils/string.py
|
kclowes/eth-abi
|
d164feee62e54af81c69b7636618cf2b36dd8ce6
|
[
"MIT"
] | 61
|
2019-07-03T06:58:42.000Z
|
2022-02-16T08:50:14.000Z
|
from typing import (
Any,
)
def abbr(value: Any, limit: int = 20) -> str:
"""
Converts a value into its string representation and abbreviates that
representation based on the given length `limit` if necessary.
"""
rep = repr(value)
if len(rep) > limit:
if limit < 3:
raise ValueError('Abbreviation limit may not be less than 3')
rep = rep[:limit - 3] + '...'
return rep
| 21.75
| 73
| 0.597701
|
072e6fc797520341c47d9f0dd007069870cb1147
| 17,420
|
py
|
Python
|
ptpip/ptpip.py
|
darkarnium/ptpip
|
c54eed4d7509ecfc6973a00496a9e80fb7473fa2
|
[
"Apache-2.0"
] | null | null | null |
ptpip/ptpip.py
|
darkarnium/ptpip
|
c54eed4d7509ecfc6973a00496a9e80fb7473fa2
|
[
"Apache-2.0"
] | null | null | null |
ptpip/ptpip.py
|
darkarnium/ptpip
|
c54eed4d7509ecfc6973a00496a9e80fb7473fa2
|
[
"Apache-2.0"
] | null | null | null |
import uuid
import time
import socket
import struct
class PtpIpConnection(object):
"""docstring for PtpIP"""
def __init__(self):
super(PtpIpConnection, self).__init__()
self.session = None
self.session_events = None
self.session_id = None
self.cmd_queue = []
self.event_queue = []
self.object_queue = []
def open(self, host='192.168.1.1', port=15740):
# Open both session, first one for for commands, second for events
self.session = self.connect(host=host, port=port)
self.send_recieve_ptpip_packet(PtpIpInitCmdReq(), self.session)
self.session_events = self.connect(host=host, port=port)
self.send_recieve_ptpip_packet(PtpIpEventReq(), self.session_events)
# 0x1002 OpenSession
ptip_cmd = PtpIpCmdRequest(cmd=0x1002, param1=struct.unpack('L', self.session_id)[0])
self.send_recieve_ptpip_packet(ptip_cmd, self.session)
def communication_thread(self):
while True:
if len(self.cmd_queue) == 0:
# do a ping receive a pong (same as ping) as reply to keep the connection alive
# couldnt get any reply onto a propper PtpIpPing packet so i am querying the status
# of the device
ptpip_packet_reply = self.send_recieve_ptpip_packet(PtpIpCmdRequest(cmd=0x90C8),
self.session)
if isinstance(ptpip_packet_reply, PtpIpCmdResponse):
time.sleep(1)
continue
else:
# get the next command from command the queue
ptip_cmd = self.cmd_queue.pop()
ptpip_packet_reply = self.send_recieve_ptpip_packet(ptip_cmd, self.session)
if (ptpip_packet_reply.ptp_response_code == 0x2001 and \
ptpip_packet_reply.ptp_response_code == 0x2019):
print("Cmd send successfully")
else:
print(f"cmd reply is: {ptpip_packet_reply.ptp_response_code}")
# wait 1 second before new packets are processed/send to the camera
time.sleep(1)
pass
def send_ptpip_cmd(self, ptpip_packet):
self.cmd_queue.append(ptpip_packet)
def connect(self, host='192.168.1.1', port=15740):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
s.connect((host, port))
except socket.error as message:
if s:
s.close()
print(f"Could not open socket: {message}")
return s
def send_recieve_ptpip_packet(self, ptpip_packet, session):
if isinstance(ptpip_packet, PtpIpInitCmdReq):
self.send_data(ptpip_packet.data(), session)
# set the session id of the object if the reply is of type PtpIpInitCmdAck
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpInitCmdAck):
self.session_id = ptpip_packet_reply.session_id
elif isinstance(ptpip_packet, PtpIpEventReq):
self.send_ptpip_event_req(ptpip_packet, session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
elif isinstance(ptpip_packet, PtpIpCmdRequest) and ptpip_packet.ptp_cmd == 0x90C7:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpStartDataPacket):
data_length = struct.unpack('I', ptpip_packet_reply.length)[0]
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
data = ptpip_packet_reply.data
while isinstance(ptpip_packet_reply, PtpIpDataPacket):
data = data + ptpip_packet_reply.data
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if data_length == len(data):
events = PtpIpEventFactory(data).get_events()
for event in events:
self.event_queue.append(event)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
elif isinstance(ptpip_packet, PtpIpCmdRequest) and ptpip_packet.ptp_cmd == 0x1009:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if isinstance(ptpip_packet_reply, PtpIpStartDataPacket):
data_length = struct.unpack('I', ptpip_packet_reply.length)[0]
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
data = ptpip_packet_reply.data
while isinstance(ptpip_packet_reply, PtpIpDataPacket):
data = data + ptpip_packet_reply.data
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
if data_length == len(data):
self.object_queue.append(PtpIpDataObject(ptpip_packet.param1, data))
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
else:
self.send_data(ptpip_packet.data(), session)
ptpip_packet_reply = PtpIpPacket().factory(data=self.recieve_data(session))
return ptpip_packet_reply
def send_ptpip_event_req(self, ptpip_packet, session):
# add the session id of the object itself if it is not specified in the package
if ptpip_packet.session_id is None:
ptpip_packet.session_id = self.session_id
self.send_data(ptpip_packet.data(), session)
def send_data(self, data, session):
session.send(struct.pack('I', len(data) + 4) + data)
def recieve_data(self, session):
data = session.recv(4)
(data_length,) = struct.unpack('I', data)
print(f"Packet length: {data_length}")
while (data_length) > len(data):
data += session.recv(data_length - len(data))
return data[4:]
class PtpIpPacket(object):
"""docstring for PtpIpCmd"""
def __init__(self):
super(PtpIpPacket, self).__init__()
def factory(self, data=None):
if data is None:
self.cmdtype = None
else:
print(f"Cmd Type: {struct.unpack('I', data[0:4])[0]}")
self.cmdtype = struct.unpack('I', data[0:4])[0]
if self.cmdtype == 1:
return PtpIpInitCmdReq(data[4:])
elif self.cmdtype == 2:
return PtpIpInitCmdAck(data[4:])
elif self.cmdtype == 3:
return PtpIpEventReq(data[4:])
elif self.cmdtype == 4:
return PtpIpEventAck(data[4:])
elif self.cmdtype == 5:
return PtpIpInitFail(data[4:])
elif self.cmdtype == 6:
return PtpIpCmdRequest(data[4:])
elif self.cmdtype == 7:
return PtpIpCmdResponse(data[4:])
elif self.cmdtype == 9:
return PtpIpStartDataPacket(data[4:])
elif self.cmdtype == 10:
return PtpIpDataPacket(data[4:])
elif self.cmdtype == 12:
return PtpIpEndDataPacket(data[4:])
elif self.cmdtype == 13:
return PtpIpPing(data[4:])
def data(self):
pass
class PtpIpInitCmdReq(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitCmdReq, self).__init__()
self.cmdtype = struct.pack('I', 0x01)
self.version = struct.pack('>I', 0x0100)
if data is None:
guid = uuid.uuid4()
self.guid = guid.bytes
self.hostname = socket.gethostname() + '\x00'
self.hostname = self.hostname.encode('utf-16-le')
else:
self.guid = data[0:16]
self.hostname = data[16:0]
def data(self):
return self.cmdtype + self.guid + self.hostname + self.version
class PtpIpInitCmdAck(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitCmdAck, self).__init__()
self.cmdtype = struct.pack('I', 0x02)
if data is not None:
self.session_id = data[0:4]
self.guid = data[4:20]
self.hostname = data[20:]
class PtpIpEventReq(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None, session_id=None):
super(PtpIpEventReq, self).__init__()
self.cmdtype = struct.pack('I', 0x03)
self.session_id = None
if data is not None:
self.session_id = data[0:4]
elif session_id is not None:
self.session_id = session_id
def data(self):
if self.session_id:
return self.cmdtype + self.session_id
return self.cmdtype
class PtpIpEventAck(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpEventAck, self).__init__()
self.cmdtype = struct.pack('I', 0x04)
class PtpIpInitFail(PtpIpPacket):
"""docstring for PtpIpInitCmd"""
def __init__(self, data=None):
super(PtpIpInitFail, self).__init__()
self.cmdtype = struct.pack('I', 0x05)
class PtpIpCmdRequest(PtpIpPacket):
"""
Operation Code Description
0x1001 GetDeviceInfo
0x1002 OpenSession
0x1003 CloseSession
0x1004 GetStorageIDs
0x1005 GetStorageInfo
0x1006 GetNumObjects
0x1007 GetObjectHandles
0x1008 GetObjectInfo
0x1009 GetObject
0x100A GetThumb
0x100B DeleteObject
0x100C SendObjectInfo
0x100D SendObject
0x100E InitiateCapture
0x100F FormatStore
0x1014 GetDevicePropDesc
0x1015 GetDevicePropValue
0x1016 SetDevicePropValue
0x101B GetPartialObject
0x90C0 InitiateCaptureRecInSdram
0x90C1 AfDrive
0x90C2 ChangeCameraMode
0x90C3 DeleteImagesInSdram
0x90C4 GetLargeThumb
0x90C7 GetEvent
0x90C8 DeviceReady
0x90C9 SetPreWbData
0x90CA GetVendorPropCodes
0x90CB AfAndCaptureRecInSdram
0x90CC GetPicCtrlData
0x90CD SetPicCtrlData
0x90CE DeleteCustomPicCtrl
0x90CF GetPicCtrlCapability
0x9201 StartLiveView
0x9202 EndLiveView
0x9203 GetLiveViewImage
0x9204 MfDrive
0x9205 ChangeAfArea
0x9206 AfDriveCancel
0x9207 InitiateCaptureRecInMedia
0x9209 GetVendorStorageIDs
0x920A StartMovieRecInCard
0x920B EndMovieRec
0x920C TerminateCapture
0x9400 GetPartialObjectHighSpeed
0x9407 SetTransferListLock
0x9408 GetTransferList
0x9409 NotifyFileAcquisitionStart
0x940A NotifyFileAcquisitionEnd
0x940B GetSpecificSizeObject
0x9801 GetObjectPropsSupported
0x9802 GetObjectPropDesc
0x9803 GetObjectPropValue
0x9805 GetObjectPropList
"""
def __init__(self, data=None, cmd=None, param1=None, param2=None, param3=None, param4=None,
param5=None):
super(PtpIpCmdRequest, self).__init__()
self.cmdtype = struct.pack('I', 0x06)
self.unkown = struct.pack('I', 0x01)
self.ptp_cmd = cmd
self.param1 = param1
self.param2 = param2
self.param3 = param3
self.param4 = param4
self.param5 = param5
# Todo: Transaction ID generieren
self.transaction_id = struct.pack('I', 0x06)
self.args = ''
if self.param1 is not None:
self.args = self.args + struct.pack('L', self.param1)
if self.param2 is not None:
self.args = self.args + struct.pack('L', self.param2)
if self.param3 is not None:
self.args = self.args + struct.pack('L', self.param3)
if self.param4 is not None:
self.args = self.args + struct.pack('L', self.param4)
if self.param5 is not None:
self.args = self.args + struct.pack('L', self.param5)
def data(self):
return self.cmdtype + self.unkown + struct.pack('H', self.ptp_cmd) + \
self.transaction_id + self.args
class PtpIpCmdResponse(PtpIpPacket):
"""
ResponseCode Description
0x2000 Undefined
0x2001 OK
0x2002 General Error
0x2003 Session Not Open
0x2004 Invalid TransactionID
0x2005 Operation Not Supported
0x2006 Parameter Not Supported
0x2007 Incomplete Transfer
0x2008 Invalid StorageID
0x2009 Invalid ObjectHandle
0x200A DeviceProp Not Supported
0x200B Invalid ObjectFormatCode
0x200C Store Full
0x200D Object WriteProtected
0x200E Store Read-Only
0x200F Access Denied
0x2010 No Thumbnail Present
0x2011 SelfTest Failed
0x2012 Partial Deletion
0x2013 Store Not Available
0x2014 Specification By Format Unsupported
0x2015 No Valid ObjectInfo
0x2016 Invalid Code Format
0x2017 Unknown Vendor Code
0x2018 Capture Already Terminated
0x2019 Device Busy
0x201A Invalid ParentObject
0x201B Invalid DeviceProp Format
0x201C Invalid DeviceProp Value
0x201D Invalid Parameter
0x201E Session Already Open
0x201F Transaction Cancelled
0x2020 Specification of Destination Unsupported
"""
def __init__(self, data=None):
super(PtpIpCmdResponse, self).__init__()
self.cmdtype = struct.pack('I', 0x07)
if data is not None:
self.ptp_response_code = struct.unpack('H', data[0:2])[0]
self.transaction_id = data[2:6]
self.args = data[6:]
class PtpIpStartDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x09)
super(PtpIpStartDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
self.length = data[4:8]
class PtpIpDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x10)
super(PtpIpDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
self.data = data[4:]
class PtpIpCancelTransaction(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x11)
super(PtpIpCancelTransaction, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
class PtpIpEndDataPacket(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x12)
super(PtpIpEndDataPacket, self).__init__()
if data is not None:
self.transaction_id = data[0:4]
print(f"transaction_id: {struct.unpack('I', self.transaction_id)[0]}")
self.data = data[4:]
class PtpIpPing(PtpIpPacket):
"""docstring for Start_Data_Packet"""
def __init__(self, data=None):
self.cmdtype = struct.pack('I', 0x13)
super(PtpIpPing, self).__init__()
if data is not None:
self.data = ''
def data(self):
return self.cmdtype
class PtpIpEvent(object):
"""
EventCode Description
0x4001 CancelTransaction
0x4002 ObjectAdded
0x4003 ObjectRemoved
0x4004 StoreAdded
0x4005 StoreRemoved
0x4006 DevicePropChanged
0x4007 ObjectInfoChanged
0x4008 DeviceInfoChanged
0x4009 RequestObjectTransfer
0x400A StoreFull
0x400C StorageInfoChanged
0x400D CaptureComplete
0xC101 ObjectAddedInSdram
0xC102 CaptureCompleteRecInSdram
0xC105 RecordingInterrupted
"""
def __init__(self, event_code, event_parameter):
super(PtpIpEvent, self).__init__()
self.event_code = int(event_code)
self.event_parameter = int(event_parameter)
class PtpIpEventFactory(object):
"""
This is a factory to produce an array of PtpIpEvent objects if it got passd a data reply
from a GetEvent request 0x90C7
"""
def __init__(self, data):
super(PtpIpEventFactory, self).__init__()
# create an empty array for the PtpIpEvent object which will be replied
self.events = []
# get the amount of events passed from the data passed to the factory
amount_of_events = struct.unpack('H', data[0:2])[0]
# set an counter and an offset of 2 as the first two bytes are already processed
counter = 1
offset = 2
while counter <= amount_of_events:
# get the event_code which consists of two bytes
event_code = str(struct.unpack('H', data[offset:offset+2])[0])
# get the event_parameter which consists of 4 bytes
event_parameter = str(struct.unpack('I', data[offset+2:offset+6])[0])
self.events.append(PtpIpEvent(event_code, event_parameter))
# increase the offset by 6 to get to the next event_code and event_parameter pair
offset = offset + 6
counter = counter + 1
def get_events(self):
return self.events
class PtpIpDataObject(object):
"""docstring for PtpIpDataObject"""
def __init__(self, object_handle, data):
super(PtpIpDataObject, self).__init__()
self.object_handle = object_handle
self.data = data
| 34.701195
| 99
| 0.644259
|
c9fd7af548a2afba2d60b0cbf85155401be9c48a
| 953
|
py
|
Python
|
sql_helpers/pmpermit_sql.py
|
aashiq075/PepeBot
|
5f40f4316c84ec3875bcbcd476e10448f9214f31
|
[
"Apache-2.0"
] | 1
|
2020-08-18T07:26:43.000Z
|
2020-08-18T07:26:43.000Z
|
sql_helpers/pmpermit_sql.py
|
aashiq075/PepeBot
|
5f40f4316c84ec3875bcbcd476e10448f9214f31
|
[
"Apache-2.0"
] | null | null | null |
sql_helpers/pmpermit_sql.py
|
aashiq075/PepeBot
|
5f40f4316c84ec3875bcbcd476e10448f9214f31
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy import Column, String
from sql_helpers import SESSION, BASE
class PMPermit(BASE):
__tablename__ = "pmpermit"
chat_id = Column(String(14), primary_key=True)
reason = Column(String(127))
def __init__(self, chat_id, reason=""):
self.chat_id = chat_id
self.reason = reason
PMPermit.__table__.create(checkfirst=True)
def is_approved(chat_id):
try:
return SESSION.query(PMPermit).filter(
PMPermit.chat_id == str(chat_id)).one()
except BaseException:
return None
finally:
SESSION.close()
def approve(chat_id, reason):
adder = PMPermit(str(chat_id), str(reason))
SESSION.add(adder)
SESSION.commit()
def disapprove(chat_id):
rem = SESSION.query(PMPermit).get(str(chat_id))
if rem:
SESSION.delete(rem)
SESSION.commit()
def get_all_approved():
rem = SESSION.query(PMPermit).all()
SESSION.close()
return rem
| 21.177778
| 51
| 0.660021
|
9c1d518332e1ae35c1ef2c0e68a984fbc0ec675b
| 1,170
|
py
|
Python
|
reflowreader.py
|
ljaworski88/Reflow-Oven
|
c87898f453b994c7f34ef4d977b0f915bad2a711
|
[
"MIT"
] | null | null | null |
reflowreader.py
|
ljaworski88/Reflow-Oven
|
c87898f453b994c7f34ef4d977b0f915bad2a711
|
[
"MIT"
] | null | null | null |
reflowreader.py
|
ljaworski88/Reflow-Oven
|
c87898f453b994c7f34ef4d977b0f915bad2a711
|
[
"MIT"
] | null | null | null |
import serial
import numpy
import matplotlib.pyplot as plt
from drawnow import *
setpoints = []
avgTemp = []
PIDoutput = []
tempData = serial.Serial('COM3', 115200)
plt.ion()
tempBuffer = 0
def makeFig():
plt.ylim(20, 250)
plt.xlim(0, 360)
plt.title('Reflow Controller Temp and Setpoint Data')
plt.grid(True)
plt.ylabel('Temp C')
plt.plot(setpoints, 'ro-', label='Setpoints')
plt.plot(avgTemp, 'b^-', label='Temperature')
plt.legend(loc='upper left')
plt2 = plt.twinx()
plt.ylim(0,100)
plt2.plot(PIDoutput, 'go-', label = 'PID Output')
plt2.set_ylabel('PID')
plt2.ticklabel_format(useOffset = False)
while True:
while (tempData.inWaiting()==0):
pass
dataString = tempData.readline()
dataArray = dataString.split(',')
tempPID = float(dataArray[0])
tempSetpoint = float(dataArray[1])
tempTemp = float(dataArray[2])
PIDoutput.append(tempPID)
setpoints.append(tempSetpoint)
avgTemp.append(tempTemp)
drawnow(makeFig)
plt.pause(0.000001)
tempBuffer += 1
if(tempBuffer > 360):
PIDoutput.pop(0)
setpoints.pop(0)
avgTemp.pop(0)
| 23.877551
| 57
| 0.64188
|
f2eb59bdf4ca1889637a1707aa59d324fbe6c681
| 1,536
|
py
|
Python
|
sok/management/commands/stats.py
|
josepaiva94/sokman
|
c7848600ba3f61c55b25521e8f2c855a314cc1b2
|
[
"0BSD"
] | 4
|
2021-03-10T14:19:56.000Z
|
2021-03-23T15:43:29.000Z
|
sok/management/commands/stats.py
|
josepaiva94/sokman
|
c7848600ba3f61c55b25521e8f2c855a314cc1b2
|
[
"0BSD"
] | 2
|
2021-04-08T07:02:57.000Z
|
2021-04-09T17:30:54.000Z
|
sok/management/commands/stats.py
|
josepaiva94/sokman
|
c7848600ba3f61c55b25521e8f2c855a314cc1b2
|
[
"0BSD"
] | 1
|
2021-04-09T19:27:33.000Z
|
2021-04-09T19:27:33.000Z
|
from typing import Set
from django.core.management.base import BaseCommand
from django.db.models import Count, Q
import sok.management.commands.dblpimport as dblp
from sok.models import Publication, SearchTerm
class Command(BaseCommand):
def echo(self, msg: str, bold=True):
if bold:
msg = self.style.HTTP_INFO(msg)
self.stdout.write(msg)
# BaseCommand
def handle(self, *args, **options):
publications_found: Set[str] = set()
publications_peer_reviewed: Set[str] = set()
publications_relevant: Set[str] = set()
self.echo("Loading DBLP dump...")
all_cite_keys = dblp.get_all_cite_keys(dblp.DUMP_PATH)
for search_term in SearchTerm.objects.all():
# DBLP search result
self.echo(f"Searching DBLP for '{search_term}'")
query, results, total = dblp.PublicationResult.from_search(search_term.name, 1000)
for result in results:
if result.cite_key not in all_cite_keys:
continue
publications_found.add(result.cite_key)
if result.is_peer_reviewed:
publications_peer_reviewed.add(result.cite_key)
# Relevant publications
for publication in Publication.objects.filter(
publicationsource__search_term=search_term,
exclusion_criteria__isnull=True,
).distinct():
publications_relevant.add(publication.cite_key)
# Output
self.echo(f"Total publications: {len(publications_found):4d}", bold=True)
self.echo(f"- peer reviewed: {len(publications_peer_reviewed):4d}", bold=True)
self.echo(f"- relevant: {len(publications_relevant):4d}", bold=True)
| 30.72
| 85
| 0.744792
|
52e47ce599d2cd934de9845cdf5c9e557c7c6645
| 33,925
|
py
|
Python
|
src/plottoolbox/functions/taylor.py
|
timcera/plottoolbox
|
b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298
|
[
"BSD-3-Clause"
] | null | null | null |
src/plottoolbox/functions/taylor.py
|
timcera/plottoolbox
|
b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298
|
[
"BSD-3-Clause"
] | 6
|
2021-09-06T21:26:12.000Z
|
2022-03-30T11:55:56.000Z
|
src/plottoolbox/functions/taylor.py
|
timcera/plottoolbox
|
b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Collection of functions for the manipulation of time series."""
from __future__ import absolute_import, division, print_function
import itertools
import os
import warnings
import mando
import numpy as np
import pandas as pd
from mando.rst_text_formatter import RSTHelpFormatter
from tstoolbox import tsutils
from .. import plotutils
warnings.filterwarnings("ignore")
@mando.command("taylor", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(plotutils.ldocstrings)
def taylor_cli(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
names=None,
ofilename="plot.png",
xtitle="",
ytitle="",
title="",
figsize="10,6.0",
legend=None,
legend_names=None,
subplots=False,
sharex=True,
sharey=False,
colors="auto",
linestyles="auto",
markerstyles=" ",
bar_hatchstyles="auto",
style="auto",
logx=False,
logy=False,
xaxis="arithmetic",
yaxis="arithmetic",
xlim=None,
ylim=None,
secondary_y=False,
mark_right=True,
scatter_matrix_diagonal="kde",
bootstrap_size=50,
bootstrap_samples=500,
norm_xaxis=False,
norm_yaxis=False,
lognorm_xaxis=False,
lognorm_yaxis=False,
xy_match_line="",
grid=False,
label_rotation=None,
label_skip=1,
force_freq=None,
drawstyle="default",
por=False,
invert_xaxis=False,
invert_yaxis=False,
round_index=None,
plotting_position="weibull",
prob_plot_sort_values="descending",
source_units=None,
target_units=None,
lag_plot_lag=1,
plot_styles="bright",
hlines_y=None,
hlines_xmin=None,
hlines_xmax=None,
hlines_colors=None,
hlines_linestyles="-",
vlines_x=None,
vlines_ymin=None,
vlines_ymax=None,
vlines_colors=None,
vlines_linestyles="-",
):
r"""Taylor diagram to plot goodness of fit.
"taylor" will create a taylor diagram that compares three goodness of fit
statistics on one plot. The three goodness of fit statistics calculated
and displayed are standard deviation, correlation coefficient, and centered
root mean square deviation. The data columns have to be organized as
'observed,simulated1,simulated2,simulated3,...etc.'
Parameters
----------
{input_ts}
ofilename : str
[optional, defaults to 'plot.png']
Output filename for the plot. Extension defines
the type, for example 'filename.png' will create a PNG file.
If used within Python, and `ofilename` is None will return the
Matplotlib figure that can then be changed or added to as
needed.
lag_plot_lag
[optional, default to 1]
The lag used if ``type`` "lag_plot" is chosen.
xtitle : str
[optional, default depends on ``type``]
Title of x-axis.
ytitle : str
[optional, default depends on ``type``]
Title of y-axis.
title : str
[optional, defaults to '']
Title of chart.
figsize : str
[optional, defaults to '10,6.5']
The 'width,height' of plot in inches.
legend
[optional, defaults to True]
Whether to display the legend.
legend_names : str
[optional, defaults to None]
Legend would normally use the time-series names associated with
the input data. The 'legend_names' option allows you to
override the names in the data set. You must supply a comma
separated list of strings for each time-series in the data set.
subplots
[optional, defaults to False]
Make separate subplots for each time series.
sharex
[optional, default to True]
In case subplots=True, share x axis.
sharey
[optional, default to False]
In case subplots=True, share y axis.
colors
[optional, default is 'auto']
The default 'auto' will cycle through matplotlib colors in the chosen
style.
At the command line supply a comma separated matplotlib
color codes, or within Python a list of color code strings.
Can identify colors in four different ways.
1. Use 'CN' where N is a number from 0 to 9 that gets the Nth color
from the current style.
2. Single character code from the table below.
+------+---------+
| Code | Color |
+======+=========+
| b | blue |
+------+---------+
| g | green |
+------+---------+
| r | red |
+------+---------+
| c | cyan |
+------+---------+
| m | magenta |
+------+---------+
| y | yellow |
+------+---------+
| k | black |
+------+---------+
3. Number between 0 and 1 that represents the level of gray, where 0 is
white an 1 is black.
4. Any of the HTML color names.
+------------------+
| HTML Color Names |
+==================+
| red |
+------------------+
| burlywood |
+------------------+
| chartreuse |
+------------------+
| ...etc. |
+------------------+
Color reference:
http://matplotlib.org/api/colors_api.html
linestyles
[optional, default to 'auto']
If 'auto' will iterate through the available matplotlib line types.
Otherwise on the command line a comma separated list, or a list of
strings if using the Python API.
To not display lines use a space (' ') as the linestyle code.
Separated 'colors', 'linestyles', and 'markerstyles' instead of using
the 'style' keyword.
+---------+--------------+
| Code | Lines |
+=========+==============+
| ``-`` | solid |
+---------+--------------+
| -- | dashed |
+---------+--------------+
| -. | dash_dot |
+---------+--------------+
| : | dotted |
+---------+--------------+
| None | draw nothing |
+---------+--------------+
| ' ' | draw nothing |
+---------+--------------+
| '' | draw nothing |
+---------+--------------+
Line reference:
http://matplotlib.org/api/artist_api.html
markerstyles
[optional, default to ' ']
The default ' ' will not plot a marker. If 'auto' will iterate through
the available matplotlib marker types. Otherwise on the command line
a comma separated list, or a list of strings if using the Python API.
Separated 'colors', 'linestyles', and 'markerstyles' instead of using
the 'style' keyword.
+-------+----------------+
| Code | Markers |
+=======+================+
| . | point |
+-------+----------------+
| o | circle |
+-------+----------------+
| v | triangle down |
+-------+----------------+
| ^ | triangle up |
+-------+----------------+
| < | triangle left |
+-------+----------------+
| > | triangle right |
+-------+----------------+
| 1 | tri_down |
+-------+----------------+
| 2 | tri_up |
+-------+----------------+
| 3 | tri_left |
+-------+----------------+
| 4 | tri_right |
+-------+----------------+
| 8 | octagon |
+-------+----------------+
| s | square |
+-------+----------------+
| p | pentagon |
+-------+----------------+
| ``*`` | star |
+-------+----------------+
| h | hexagon1 |
+-------+----------------+
| H | hexagon2 |
+-------+----------------+
| ``+`` | plus |
+-------+----------------+
| x | x |
+-------+----------------+
| D | diamond |
+-------+----------------+
| d | thin diamond |
+-------+----------------+
| _ | hlines_y |
+-------+----------------+
| None | nothing |
+-------+----------------+
| ' ' | nothing |
+-------+----------------+
| '' | nothing |
+-------+----------------+
Marker reference:
http://matplotlib.org/api/markers_api.html
style
[optional, default is None]
Still available, but if None is replaced by 'colors', 'linestyles', and
'markerstyles' options. Currently the 'style' option will override the
others.
Comma separated matplotlib style strings per time-series. Just
combine codes in 'ColorMarkerLine' order, for example 'r*--' is
a red dashed line with star marker.
bar_hatchstyles
[optional, default to "auto", only used if type equal to "bar", "barh",
"bar_stacked", and "barh_stacked"]
If 'auto' will iterate through the available matplotlib hatch types.
Otherwise on the command line a comma separated list, or a list of
strings if using the Python API.
+-----------------+-------------------+
| bar_hatchstyles | Description |
+=================+===================+
| / | diagonal hatching |
+-----------------+-------------------+
| ``\`` | back diagonal |
+-----------------+-------------------+
| ``|`` | vertical |
+-----------------+-------------------+
| - | horizontal |
+-----------------+-------------------+
| + | crossed |
+-----------------+-------------------+
| x | crossed diagonal |
+-----------------+-------------------+
| o | small circle |
+-----------------+-------------------+
| O | large circle |
+-----------------+-------------------+
| . | dots |
+-----------------+-------------------+
| * | stars |
+-----------------+-------------------+
logx
DEPRECATED: use '--xaxis="log"' instead.
logy
DEPRECATED: use '--yaxis="log"' instead.
xlim
[optional, default is based on range of x values]
Comma separated lower and upper limits for the x-axis of the
plot. For example, '--xlim 1,1000' would limit the plot from
1 to 1000, where '--xlim ,1000' would base the lower limit on
the data and set the upper limit to 1000.
ylim
[optional, default is based on range of y values]
Comma separated lower and upper limits for the y-axis of the
plot. See `xlim` for examples.
xaxis : str
[optional, default is 'arithmetic']
Defines the type of the xaxis. One of 'arithmetic', 'log'.
yaxis : str
[optional, default is 'arithmetic']
Defines the type of the yaxis. One of 'arithmetic', 'log'.
secondary_y
[optional, default is False]
Whether to plot on the secondary y-axis. If a list/tuple, which
time-series to plot on secondary y-axis.
mark_right
[optional, default is True]
When using a secondary_y axis, should the legend label the axis of the
various time-series automatically.
scatter_matrix_diagonal : str
[optional, defaults to 'kde']
If plot type is 'scatter_matrix', this specifies the plot along the
diagonal. One of 'kde' for Kernel Density Estimation or 'hist'
for a histogram.
bootstrap_size : int
[optional, defaults to 50]
The size of the random subset for 'bootstrap' plot.
bootstrap_samples
[optional, defaults to 500]
The number of random subsets of 'bootstrap_size'.
norm_xaxis
DEPRECATED: use '--type="norm_xaxis"' instead.
norm_yaxis
DEPRECATED: use '--type="norm_yaxis"' instead.
lognorm_xaxis
DEPRECATED: use '--type="lognorm_xaxis"' instead.
lognorm_yaxis
DEPRECATED: use '--type="lognorm_yaxis"' instead.
xy_match_line : str
[optional, defaults is '']
Will add a match line where x == y. Set to a line style code.
grid
[optional, default is False]
Whether to plot grid lines on the major ticks.
label_rotation : int
[optional]
Rotation for major labels for bar plots.
label_skip : int
[optional]
Skip for major labels for bar plots.
drawstyle : str
[optional, default is 'default']
'default' connects the points with lines. The
steps variants produce step-plots. 'steps' is equivalent to 'steps-pre'
and is maintained for backward-compatibility.
ACCEPTS::
['default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post']
por
[optional]
Plot from first good value to last good value. Strips NANs
from beginning and end.
{force_freq}
invert_xaxis
[optional, default is False]
Invert the x-axis.
invert_yaxis
[optional, default is False]
Invert the y-axis.
plotting_position : str
[optional, default is 'weibull']
{plotting_position_table}
Only used for norm_xaxis, norm_yaxis, lognorm_xaxis,
lognorm_yaxis, weibull_xaxis, and weibull_yaxis.
prob_plot_sort_values : str
[optional, default is 'descending']
How to sort the values for the probability plots.
Only used for norm_xaxis, norm_yaxis, lognorm_xaxis,
lognorm_yaxis, weibull_xaxis, and weibull_yaxis.
{columns}
{start_date}
{end_date}
{clean}
{skiprows}
{index_type}
{names}
{source_units}
{target_units}
{round_index}
plot_styles: str
[optional, default is "default"]
Set the style of the plot. One or more of Matplotlib styles "classic",
"Solarize_Light2", "bmh", "dark_background", "fast", "fivethirtyeight",
"ggplot", "grayscale", "seaborn", "seaborn-bright",
"seaborn-colorblind", "seaborn-dark", "seaborn-dark-palette",
"seaborn-darkgrid", "seaborn-deep", "seaborn-muted",
"seaborn-notebook", "seaborn-paper", "seaborn-pastel",
"seaborn-poster", "seaborn-talk", "seaborn-ticks", "seaborn-white",
"seaborn-whitegrid", "tableau-colorblind10", and
SciencePlots styles "science", "grid", "ieee", "scatter", "notebook",
"high-vis", "bright", "vibrant", "muted", and "retro".
If multiple styles then each over rides some or all of the
characteristics of the previous.
Color Blind Appropriate Styles
The styles "seaborn-colorblind", "tableau-colorblind10", "bright",
"vibrant", and "muted" are all styles that are setup to be able to be
distinguished by someone with color blindness.
Black, White, and Gray Styles
The "ieee" style is appropriate for black, white, and gray, however the
"ieee" also will change the chart size to fit in a column of the "IEEE"
journal.
The "grayscale" is another style useful for photo-copyable black,
white, nd gray.
Matplotlib styles:
https://matplotlib.org/3.3.1/gallery/style_sheets/style_sheets_reference.html
SciencePlots styles:
https://github.com/garrettj403/SciencePlots
hlines_y:
[optional, defaults to None]
Number or list of y values where to place a horizontal line.
hlines_xmin:
[optional, defaults to None]
List of minimum x values to start the horizontal line. If a list must
be same length as `hlines_y`. If a single number will be used as the
minimum x values for all horizontal lines. A missing value or None
will start at the minimum x value for the entire plot.
hlines_xmax:
[optional, defaults to None]
List of maximum x values to end each horizontal line. If a list must
be same length as `hlines_y`. If a single number will be the maximum
x value for all horizontal lines. A missing value or None will end at
the maximum x value for the entire plot.
hlines_colors:
[optional, defaults to None]
List of colors for the horizontal lines. If a single color then will
be used as the color for all horizontal lines. If a list must be same
length as `hlines_y`. If None will take from the color pallette in the
current plot style.
hlines_linestyles:
[optional, defaults to None]
List of linestyles for the horizontal lines. If a single linestyle
then will be used as the linestyle for all horizontal lines. If a list
must be same length as `hlines_y`. If None will take for the standard
linestyles list.
vlines_x:
[optional, defaults to None]
List of x values where to place a vertical line.
vlines_ymin:
[optional, defaults to None]
List of minimum y values to start the vertical line. If a list must be
same length as `vlines_x`. If a single number will be used as the
minimum x values for all vertical lines. A missing value or None will
start at the minimum x value for the entire plot.
vlines_ymax:
[optional, defaults to None]
List of maximum x values to end each vertical line. If a list must be
same length as `vlines_x`. If a single number will be the maximum
x value for all vertical lines. A missing value or None will end at
the maximum x value for the entire plot.
vlines_colors:
[optional, defaults to None]
List of colors for the vertical lines. If a single color then will be
used as the color for all vertical lines. If a list must be same
length as `vlines_x`. If None will take from the color pallette in the
current plot style.
vlines_linestyles:
[optional, defaults to None]
List of linestyles for the vertical lines. If a single linestyle then
will be used as the linestyle for all vertical lines. If a list must
be same length as `vlines_x`. If None will take for the standard
linestyles list.
"""
plt = taylor(
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
clean=clean,
skiprows=skiprows,
index_type=index_type,
names=names,
ofilename=ofilename,
xtitle=xtitle,
ytitle=ytitle,
title=title,
figsize=figsize,
legend=legend,
legend_names=legend_names,
subplots=subplots,
sharex=sharex,
sharey=sharey,
colors=colors,
linestyles=linestyles,
markerstyles=markerstyles,
bar_hatchstyles=bar_hatchstyles,
style=style,
logx=logx,
logy=logy,
xaxis=xaxis,
yaxis=yaxis,
xlim=xlim,
ylim=ylim,
secondary_y=secondary_y,
mark_right=mark_right,
scatter_matrix_diagonal=scatter_matrix_diagonal,
bootstrap_size=bootstrap_size,
bootstrap_samples=bootstrap_samples,
norm_xaxis=norm_xaxis,
norm_yaxis=norm_yaxis,
lognorm_xaxis=lognorm_xaxis,
lognorm_yaxis=lognorm_yaxis,
xy_match_line=xy_match_line,
grid=grid,
label_rotation=label_rotation,
label_skip=label_skip,
force_freq=force_freq,
drawstyle=drawstyle,
por=por,
invert_xaxis=invert_xaxis,
invert_yaxis=invert_yaxis,
round_index=round_index,
plotting_position=plotting_position,
prob_plot_sort_values=prob_plot_sort_values,
source_units=source_units,
target_units=target_units,
lag_plot_lag=lag_plot_lag,
plot_styles=plot_styles,
hlines_y=hlines_y,
hlines_xmin=hlines_xmin,
hlines_xmax=hlines_xmax,
hlines_colors=hlines_colors,
hlines_linestyles=hlines_linestyles,
vlines_x=vlines_x,
vlines_ymin=vlines_ymin,
vlines_ymax=vlines_ymax,
vlines_colors=vlines_colors,
vlines_linestyles=vlines_linestyles,
)
# @tsutils.validator(
# ofilename=[str, ["pass", []], 1],
# type=[str, ["domain", ["taylor",],], 1,],
# lag_plot_lag=[int, ["range", [1, None]], 1],
# xtitle=[str, ["pass", []], 1],
# ytitle=[str, ["pass", []], 1],
# title=[str, ["pass", []], 1],
# figsize=[float, ["range", [0, None]], 2],
# legend=[bool, ["domain", [True, False]], 1],
# legend_names=[str, ["pass", []], 1],
# subplots=[bool, ["domain", [True, False]], 1],
# sharex=[bool, ["domain", [True, False]], 1],
# sharey=[bool, ["domain", [True, False]], 1],
# colors=[str, ["pass", []], None],
# linestyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST], None],
# markerstyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.MARKER_LIST], None],
# bar_hatchstyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.HATCH_LIST], None],
# style=[str, ["pass", []], None],
# xlim=[float, ["pass", []], 2],
# ylim=[float, ["pass", []], 2],
# xaxis=[str, ["domain", ["arithmetic", "log"]], 1],
# yaxis=[str, ["domain", ["arithmetic", "log"]], 1],
# secondary_y=[bool, ["domain", [True, False]], 1],
# mark_right=[bool, ["domain", [True, False]], 1],
# scatter_matrix_diagonal=[str, ["domain", ["kde", "hist"]], 1],
# bootstrap_size=[int, ["range", [0, None]], 1],
# xy_match_line=[str, ["pass", []], 1],
# grid=[bool, ["domain", [True, False]], 1],
# label_rotation=[float, ["pass", []], 1],
# label_skip=[int, ["range", [1, None]], 1],
# drawstyle=[str, ["pass", []], 1],
# por=[bool, ["domain", [True, False]], 1],
# invert_xaxis=[bool, ["domain", [True, False]], 1],
# invert_yaxis=[bool, ["domain", [True, False]], 1],
# plotting_position=[
# str,
# [
# "domain",
# ["weibull", "benard", "tukey", "gumbel", "hazen", "cunnane", "california"],
# ],
# 1,
# ],
# prob_plot_sort_values=[str, ["domain", ["ascending", "descending"]], 1],
# plot_styles=[
# str,
# [
# "domain",
# [
# "classic",
# "Solarize_Light2",
# "bmh",
# "dark_background",
# "fast",
# "fivethirtyeight",
# "ggplot",
# "grayscale",
# "seaborn",
# "seaborn-bright",
# "seaborn-colorblind",
# "seaborn-dark",
# "seaborn-dark-palette",
# "seaborn-darkgrid",
# "seaborn-deep",
# "seaborn-muted",
# "seaborn-notebook",
# "seaborn-paper",
# "seaborn-pastel",
# "seaborn-poster",
# "seaborn-talk",
# "seaborn-ticks",
# "seaborn-white",
# "seaborn-whitegrid",
# "tableau-colorblind10",
# "science",
# "grid",
# "ieee",
# "scatter",
# "notebook",
# "high-vis",
# "bright",
# "vibrant",
# "muted",
# "retro",
# ],
# ],
# None,
# ],
# hlines_y=[float, ["pass", []], None],
# hlines_xmin=[float, ["pass", []], None],
# hlines_xmax=[float, ["pass", []], None],
# hlines_colors=[str, ["pass", []], None],
# hlines_linestyles=[
# str,
# ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST],
# None,
# ],
# vlines_x=[float, ["pass", []], None],
# vlines_ymin=[float, ["pass", []], None],
# vlines_ymax=[float, ["pass", []], None],
# vlines_colors=[str, ["pass", []], None],
# vlines_linestyles=[
# str,
# ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST],
# None,
# ],
# )
def taylor(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
names=None,
ofilename="plot.png",
xtitle="",
ytitle="",
title="",
figsize="10,6.0",
legend=None,
legend_names=None,
subplots=False,
sharex=True,
sharey=False,
colors="auto",
linestyles="auto",
markerstyles=" ",
bar_hatchstyles="auto",
style="auto",
logx=False,
logy=False,
xaxis="arithmetic",
yaxis="arithmetic",
xlim=None,
ylim=None,
secondary_y=False,
mark_right=True,
scatter_matrix_diagonal="kde",
bootstrap_size=50,
bootstrap_samples=500,
norm_xaxis=False,
norm_yaxis=False,
lognorm_xaxis=False,
lognorm_yaxis=False,
xy_match_line="",
grid=False,
label_rotation=None,
label_skip=1,
force_freq=None,
drawstyle="default",
por=False,
invert_xaxis=False,
invert_yaxis=False,
round_index=None,
plotting_position="weibull",
prob_plot_sort_values="descending",
source_units=None,
target_units=None,
lag_plot_lag=1,
plot_styles="bright",
hlines_y=None,
hlines_xmin=None,
hlines_xmax=None,
hlines_colors=None,
hlines_linestyles="-",
vlines_x=None,
vlines_ymin=None,
vlines_ymax=None,
vlines_colors=None,
vlines_linestyles="-",
**kwds,
):
r"""Plot data."""
# Need to work around some old option defaults with the implementation of
# mando
legend = bool(legend == "" or legend == "True" or legend is None)
type = "taylor"
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna="all",
source_units=source_units,
target_units=target_units,
clean=clean,
por=por,
)
tsd, lnames = plotutils.check(type, tsd, legend_names)
# This is to help pretty print the frequency
try:
try:
pltfreq = str(tsd.index.freq, "utf-8").lower()
except TypeError:
pltfreq = str(tsd.index.freq).lower()
if pltfreq.split(" ")[0][1:] == "1":
beginstr = 3
else:
beginstr = 1
if pltfreq == "none":
short_freq = ""
else:
# short freq string (day) OR (2 day)
short_freq = "({})".format(pltfreq[beginstr:-1])
except AttributeError:
short_freq = ""
if colors == "auto":
colors = None
else:
colors = tsutils.make_list(colors)
if linestyles == "auto":
linestyles = plotutils.LINE_LIST
else:
linestyles = tsutils.make_list(linestyles)
if bar_hatchstyles == "auto":
bar_hatchstyles = plotutils.HATCH_LIST
else:
bar_hatchstyles = tsutils.make_list(bar_hatchstyles)
if markerstyles == "auto":
markerstyles = plotutils.MARKER_LIST
else:
markerstyles = tsutils.make_list(markerstyles)
if markerstyles is None:
markerstyles = " "
if style != "auto":
nstyle = tsutils.make_list(style)
if len(nstyle) != len(tsd.columns):
raise ValueError(
tsutils.error_wrapper(
"""
You have to have the same number of style strings as time-series to plot.
You supplied '{}' for style which has {} style strings,
but you have {} time-series.
""".format(
style, len(nstyle), len(tsd.columns)
)
)
)
colors = []
markerstyles = []
linestyles = []
for st in nstyle:
colors.append(st[0])
if len(st) == 1:
markerstyles.append(" ")
linestyles.append("-")
continue
if st[1] in plotutils.MARKER_LIST:
markerstyles.append(st[1])
try:
linestyles.append(st[2:])
except IndexError:
linestyles.append(" ")
else:
markerstyles.append(" ")
linestyles.append(st[1:])
if linestyles is None:
linestyles = [" "]
else:
linestyles = [" " if i in [" ", None] else i for i in linestyles]
markerstyles = [" " if i is None else i for i in markerstyles]
if colors is not None:
icolors = itertools.cycle(colors)
else:
icolors = None
imarkerstyles = itertools.cycle(markerstyles)
ilinestyles = itertools.cycle(linestyles)
# Only for bar, barh, bar_stacked, and barh_stacked.
ibar_hatchstyles = itertools.cycle(bar_hatchstyles)
if (
logx is True
or logy is True
or norm_xaxis is True
or norm_yaxis is True
or lognorm_xaxis is True
or lognorm_yaxis is True
):
warnings.warn(
"""
*
* The --logx, --logy, --norm_xaxis, --norm_yaxis, --lognorm_xaxis, and
* --lognorm_yaxis options are deprecated.
*
* For --logx use --xaxis="log"
* For --logy use --yaxis="log"
* For --norm_xaxis use --type="norm_xaxis"
* For --norm_yaxis use --type="norm_yaxis"
* For --lognorm_xaxis use --type="lognorm_xaxis"
* For --lognorm_yaxis use --type="lognorm_yaxis"
*
"""
)
if xaxis == "log":
logx = True
if yaxis == "log":
logy = True
xlim = plotutils.know_your_limits(xlim, axis=xaxis)
ylim = plotutils.know_your_limits(ylim, axis=yaxis)
plot_styles = tsutils.make_list(plot_styles) + ["no-latex"]
style_loc = os.path.join(
os.path.dirname(__file__), os.pardir, "SciencePlots_styles"
)
plot_styles = [
os.path.join(style_loc, i + ".mplstyle")
if os.path.exists(os.path.join(style_loc, i + ".mplstyle"))
else i
for i in plot_styles
]
plt.style.use(plot_styles)
figsize = tsutils.make_list(figsize, n=2)
_, ax = plt.subplots(figsize=figsize)
if type in ["taylor"]:
from ..skill_metrics import centered_rms_dev, taylor_diagram
ref = tsd.iloc[:, 0]
std = [np.std(ref)]
ccoef = [1.0]
crmsd = [0.0]
for col in range(1, len(tsd.columns)):
std.append(np.std(tsd.iloc[:, col]))
ccoef.append(np.corrcoef(tsd.iloc[:, col], ref)[0][1])
crmsd.append(centered_rms_dev(tsd.iloc[:, col].values, ref.values))
taylor_diagram(np.array(std), np.array(crmsd), np.array(ccoef))
if hlines_y is not None:
hlines_y = tsutils.make_list(hlines_y)
hlines_xmin = tsutils.make_list(hlines_xmin)
hlines_xmax = tsutils.make_list(hlines_xmax)
hlines_colors = tsutils.make_list(hlines_colors)
hlines_linestyles = tsutils.make_list(hlines_linestyles)
nxlim = ax.get_xlim()
if hlines_xmin is None:
hlines_xmin = nxlim[0]
if hlines_xmax is None:
hlines_xmax = nxlim[1]
if vlines_x is not None:
vlines_x = tsutils.make_list(vlines_x)
vlines_ymin = tsutils.make_list(vlines_ymin)
vlines_ymax = tsutils.make_list(vlines_ymax)
vlines_colors = tsutils.make_list(vlines_colors)
vlines_linestyles = tsutils.make_list(vlines_linestyles)
nylim = ax.get_ylim()
if vlines_ymin is None:
vlines_ymin = nylim[0]
if vlines_ymax is None:
vlines_ymax = nylim[1]
if type in [
"time",
"xy",
"bar",
"bar_stacked",
"histogram",
"norm_xaxis",
"lognorm_xaxis",
"weibull_xaxis",
"norm_yaxis",
"lognorm_yaxis",
"weibull_yaxis",
]:
if hlines_y is not None:
if type in ["norm_yaxis", "lognorm_yaxis", "weibull_yaxis"]:
hlines_y = ppf(tsutils.make_list(hlines_y))
plt.hlines(
hlines_y,
hlines_xmin,
hlines_xmax,
colors=hlines_colors,
linestyles=hlines_linestyles,
)
if vlines_x is not None:
if type in ["norm_xaxis", "lognorm_xaxis", "weibull_xaxis"]:
vlines_x = ppf(tsutils.make_list(vlines_x))
plt.vlines(
vlines_x,
vlines_ymin,
vlines_ymax,
colors=vlines_colors,
linestyles=vlines_linestyles,
)
plt.xlabel(xtitle)
plt.ylabel(ytitle)
if invert_xaxis is True:
plt.gca().invert_xaxis()
if invert_yaxis is True:
plt.gca().invert_yaxis()
plt.grid(grid)
plt.title(title)
plt.tight_layout()
if ofilename is not None:
plt.savefig(ofilename)
return plt
taylor.__doc__ = taylor_cli.__doc__
| 30.209261
| 100
| 0.535947
|
e63d3398c5efe725fc2418654a53b43435a2fa35
| 469
|
py
|
Python
|
stdlib/tkinter/canvas_colours.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 13
|
2017-08-22T12:26:07.000Z
|
2021-07-29T16:13:50.000Z
|
stdlib/tkinter/canvas_colours.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 1
|
2021-02-08T10:24:33.000Z
|
2021-02-08T10:24:33.000Z
|
stdlib/tkinter/canvas_colours.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 17
|
2018-08-13T11:10:33.000Z
|
2021-07-29T16:14:02.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import tkinter
root = tkinter.Tk()
root.title('Canvas colours')
canvas = tkinter.Canvas(root)
canvas.create_rectangle(30, 10, 120, 80,
outline="#fb0", fill="#fb0")
canvas.create_rectangle(150, 10, 240, 80,
outline="#f50", fill="#f50")
canvas.create_rectangle(270, 10, 370, 80,
outline="#05f", fill="#05f")
canvas.pack(fill=tkinter.BOTH, expand=1)
root.geometry("400x100+300+300")
root.mainloop()
| 23.45
| 44
| 0.656716
|
5d5d58edca7be3d1cea0b30452ccc498ebf5aad1
| 2,230
|
py
|
Python
|
fallfromgrace/process.py
|
bjornedstrom/fall-from-grace
|
b190969e838535f67d46aaf8358069a59c4edfd7
|
[
"MIT"
] | 1
|
2015-01-08T20:19:33.000Z
|
2015-01-08T20:19:33.000Z
|
fallfromgrace/process.py
|
bjornedstrom/fall-from-grace
|
b190969e838535f67d46aaf8358069a59c4edfd7
|
[
"MIT"
] | null | null | null |
fallfromgrace/process.py
|
bjornedstrom/fall-from-grace
|
b190969e838535f67d46aaf8358069a59c4edfd7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Björn Edström <be@bjrn.se>
# See LICENSE for details.
import logging
import psutil
log = logging.getLogger('fall-from-grace')
def get_pids():
"""Yields a snapshot of currently active process ids."""
for proc in psutil.get_process_list():
yield proc.pid
def get_cmdline(pid):
"""Returns the cmdline of the given pid."""
try:
proc = psutil.Process(pid)
return ' '.join(proc.cmdline)
except psutil.NoSuchProcess:
return None
except Exception, e:
log.warning('process exception for pid %s: %s', pid, e)
return None
def get_memory_usage(pid):
"""Returns a dict with memory usage information for the given
pid. The dict has the following keys:
"vmem": virtual memory usage,
"rmem": residential memory usage.
"""
proc = psutil.Process(pid)
usage = {}
usage['rmem'], usage['vmem'] = proc.get_memory_info()
return usage
def get_parent_pids(pid):
"""Returns a list of parent pids, up to pid 1.
"""
pids = []
while True:
proc = psutil.Process(pid)
pids.append(proc.ppid)
if proc.ppid <= 1:
break
pid = proc.ppid
return pids
def get_snapshot():
"""Returns a snapshot of currently running processes.
Specifically, returns (tree, cmdlines) where tree is a dict from
pid to a set of children, and cmdlines is a dict from pid to
cmdline.
"""
tree = {}
cmdlines = {}
for proc in psutil.get_process_list():
try:
ppid = proc.ppid
except Exception, e:
continue
try:
cmdline = ' '.join(proc.cmdline)
except Exception, e:
continue
cmdlines[proc.pid] = cmdline
if ppid not in tree:
tree[ppid] = set()
if proc.pid not in tree:
tree[proc.pid] = set()
tree[ppid].add(proc.pid)
return tree, cmdlines
def walk_children(tree, pid):
"""Yields all children of pid given in tree (returned from
get_snapshot above).
"""
for cpid in tree[pid]:
yield cpid
for ccpid in walk_children(tree, cpid):
yield ccpid
| 22.989691
| 68
| 0.595964
|
27d3d751f560f3e894b875862c9276e484ee8a5a
| 2,378
|
py
|
Python
|
reconcile/utils/jump_host.py
|
mmclanerh/qontract-reconcile
|
57f3d5a38e6811843c234754df083d7bb35787bb
|
[
"Apache-2.0"
] | null | null | null |
reconcile/utils/jump_host.py
|
mmclanerh/qontract-reconcile
|
57f3d5a38e6811843c234754df083d7bb35787bb
|
[
"Apache-2.0"
] | null | null | null |
reconcile/utils/jump_host.py
|
mmclanerh/qontract-reconcile
|
57f3d5a38e6811843c234754df083d7bb35787bb
|
[
"Apache-2.0"
] | null | null | null |
import tempfile
import shutil
import os
import reconcile.utils.gql as gql
from reconcile.utils.secret_reader import SecretReader
from reconcile.exceptions import FetchResourceError
class HTTPStatusCodeError(Exception):
def __init__(self, msg):
super(HTTPStatusCodeError, self).__init__(
"HTTP status code error: " + str(msg)
)
class JumpHostBase(object):
def __init__(self, jh, settings=None):
self.hostname = jh['hostname']
self.user = jh['user']
self.port = 22 if jh['port'] is None else jh['port']
secret_reader = SecretReader(settings=settings)
self.identity = secret_reader.read(jh['identity'])
self.init_identity_file()
def init_identity_file(self):
self._identity_dir = tempfile.mkdtemp()
identity_file = self._identity_dir + '/id'
with open(identity_file, 'w') as f:
f.write(self.identity.decode('utf-8'))
os.chmod(identity_file, 0o600)
self.identity_file = identity_file
def cleanup(self):
shutil.rmtree(self._identity_dir)
class JumpHostSSH(JumpHostBase):
def __init__(self, jh, settings=None):
JumpHostBase.__init__(self, jh, settings=settings)
self.known_hosts = self.get_known_hosts(jh)
self.init_known_hosts_file()
def get_known_hosts(self, jh):
known_hosts_path = jh['knownHosts']
gqlapi = gql.get_api()
try:
known_hosts = gqlapi.get_resource(known_hosts_path)
except gql.GqlGetResourceError as e:
raise FetchResourceError(str(e))
return known_hosts['content']
def init_known_hosts_file(self):
known_hosts_file = self._identity_dir + '/known_hosts'
with open(known_hosts_file, 'w') as f:
f.write(self.known_hosts)
os.chmod(known_hosts_file, 0o600)
self.known_hosts_file = known_hosts_file
def get_ssh_base_cmd(self):
user_host = '{}@{}'.format(self.user, self.hostname)
return [
'ssh',
'-o', 'ControlMaster=auto',
'-o', 'ControlPath=/tmp/controlmaster-%r@%h:%p',
'-o', 'ControlPersist=600',
'-o', 'StrictHostKeyChecking=yes',
'-o', 'UserKnownHostsFile={}'.format(self.known_hosts_file),
'-i', self.identity_file, '-p', str(self.port), user_host]
| 31.706667
| 72
| 0.639193
|
d6ac7a53c614230976e6647e6a0be7dd93a47b07
| 29
|
py
|
Python
|
Demo_XRD_patterns_from_dpp/ds_section/__init__.py
|
SHDShim/PMatRes
|
92440c11f2723861dbb82cecdc321fcef9de4443
|
[
"Apache-2.0"
] | 15
|
2017-09-02T13:55:35.000Z
|
2022-03-26T08:20:16.000Z
|
Demo_XRD_patterns_from_dpp/ds_section/__init__.py
|
SHDShim/PMatRes
|
92440c11f2723861dbb82cecdc321fcef9de4443
|
[
"Apache-2.0"
] | null | null | null |
Demo_XRD_patterns_from_dpp/ds_section/__init__.py
|
SHDShim/PMatRes
|
92440c11f2723861dbb82cecdc321fcef9de4443
|
[
"Apache-2.0"
] | 2
|
2018-05-16T13:32:08.000Z
|
2019-06-16T08:09:38.000Z
|
from .section import Section
| 14.5
| 28
| 0.827586
|
29749cbb666379c2927f30cfea9ca63030159c1c
| 401
|
py
|
Python
|
python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py
|
unisound-ail/Paddle
|
e1f57bfd66245f78d04f47d670cba5592d5734b2
|
[
"Apache-2.0"
] | 1
|
2016-10-23T09:31:38.000Z
|
2016-10-23T09:31:38.000Z
|
python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py
|
adnywei/Paddle
|
e1f57bfd66245f78d04f47d670cba5592d5734b2
|
[
"Apache-2.0"
] | 3
|
2016-10-22T16:06:11.000Z
|
2016-11-07T06:30:37.000Z
|
python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py
|
adnywei/Paddle
|
e1f57bfd66245f78d04f47d670cba5592d5734b2
|
[
"Apache-2.0"
] | null | null | null |
from paddle.trainer_config_helpers import *
settings(
learning_rate=1e-4,
batch_size=1000
)
data = data_layer(name='input', size=300)
lbl = data_layer(name='label', size=1)
wt = data_layer(name='weight', size=1)
fc = fc_layer(input=data, size=10, act=SoftmaxActivation())
outputs(classification_cost(input=fc, label=lbl, weight=wt),
regression_cost(input=fc, label=lbl, weight=wt))
| 26.733333
| 60
| 0.72818
|
702fb5b60045d8292da5c9c9484ea71a43f474ed
| 479
|
py
|
Python
|
src/app_registry/__init__.py
|
YoussefMabrouk/big-map-registry
|
8943514755f268ac39b57430572702825a1dead1
|
[
"MIT"
] | 3
|
2021-02-03T14:09:15.000Z
|
2021-09-07T17:48:36.000Z
|
src/app_registry/__init__.py
|
YoussefMabrouk/big-map-registry
|
8943514755f268ac39b57430572702825a1dead1
|
[
"MIT"
] | 6
|
2021-01-28T15:31:39.000Z
|
2022-02-18T08:53:01.000Z
|
src/app_registry/__init__.py
|
YoussefMabrouk/big-map-registry
|
8943514755f268ac39b57430572702825a1dead1
|
[
"MIT"
] | 11
|
2021-01-28T14:58:30.000Z
|
2022-03-22T14:06:45.000Z
|
# -*- coding: utf-8 -*-
"""Manage a registry of applications."""
from .core import AppRegistryData
from .core import AppRegistrySchemas
from .metadata import generate_apps_meta
from .version import __version__
from .web import build_from_config
from .web import build_html
from .web import write_schemas
__all__ = [
"AppRegistryData",
"AppRegistrySchemas",
"__version__",
"build_from_config",
"build_html",
"generate_apps_meta",
"write_schemas",
]
| 21.772727
| 40
| 0.734864
|
16e85830ffa51ec428951570cc7a038f3d10c873
| 1,560
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_lod_rank_table.py
|
jerrywgz/Paddle
|
85c4912755b783dd7554a9d6b9dae4a7e40371bc
|
[
"Apache-2.0"
] | 1
|
2018-08-06T11:55:16.000Z
|
2018-08-06T11:55:16.000Z
|
python/paddle/fluid/tests/unittests/test_lod_rank_table.py
|
jerrywgz/Paddle
|
85c4912755b783dd7554a9d6b9dae4a7e40371bc
|
[
"Apache-2.0"
] | 3
|
2017-07-15T14:20:08.000Z
|
2019-05-06T03:16:54.000Z
|
python/paddle/fluid/tests/unittests/test_lod_rank_table.py
|
jerrywgz/Paddle
|
85c4912755b783dd7554a9d6b9dae4a7e40371bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.layers import data
from paddle.fluid.layers.control_flow import lod_rank_table
from paddle.fluid.executor import Executor
import paddle.fluid.core as core
import numpy
import unittest
class TestLoDRankTable(unittest.TestCase):
def test_lod_rank_table(self):
x = data(name='x', shape=[100])
cpu = core.CPUPlace()
rank_table = lod_rank_table(x=x, level=1)
rank_table.persistable = True
exe = Executor(cpu)
scope = core.Scope()
tensor = core.LoDTensor()
tensor.set(numpy.random.random(size=(17, 100)), cpu)
tensor.set_recursive_sequence_lengths(
[[1, 2], [5, 1, 1], [3, 1, 5, 1, 3, 3, 1]])
exe.run(scope=scope, feed={'x': tensor})
var = scope.find_var(rank_table.name)
table = var.get_lod_rank_table()
self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items())
if __name__ == '__main__':
unittest.main()
| 35.454545
| 74
| 0.685256
|
f8fdd8eefad72fa36cb6ec87a85a3757b17f7439
| 952
|
py
|
Python
|
results/neural_nets/testdata/make_zzz3_subsets.py
|
k-ivey/FastSK
|
3316f8078a516e808c2c4fe7ed3fdc8db808fc11
|
[
"Apache-2.0"
] | 13
|
2020-04-23T21:25:51.000Z
|
2021-11-19T23:56:17.000Z
|
results/neural_nets/testdata/make_zzz3_subsets.py
|
k-ivey/FastSK
|
3316f8078a516e808c2c4fe7ed3fdc8db808fc11
|
[
"Apache-2.0"
] | 3
|
2020-08-24T22:15:50.000Z
|
2021-05-11T12:42:14.000Z
|
results/neural_nets/testdata/make_zzz3_subsets.py
|
k-ivey/FastSK
|
3316f8078a516e808c2c4fe7ed3fdc8db808fc11
|
[
"Apache-2.0"
] | 10
|
2020-04-24T09:27:19.000Z
|
2021-06-16T21:05:46.000Z
|
import random
file = "testdata/ZZZ3.train.fasta"
labels, sequences = [], []
# load the data
with open(file, "r") as f:
label_line = True
for line in f:
line = line.strip()
if label_line:
labels.append(line)
label_line = False
else:
sequences.append(line)
label_line = True
assert len(labels) == len(sequences)
num_sample = len(labels)
subset_sizes = []
for i in range(1, 11):
size = i * 1000
if size > num_sample:
size = num_sample
zipped = list(zip(labels, sequences))
random.shuffle(zipped)
labels_, sequences_ = zip(*zipped)
labels_, sequences_ = labels_[:size], sequences_[:size]
assert len(labels_) == len(sequences_)
assert len(labels_) <= num_sample
with open("testdata/ZZZ3_{}.train.fasta".format(i), "w+") as f:
for (label, seq) in zip(labels_, sequences_):
f.write(label + "\n" + seq + "\n")
| 25.72973
| 67
| 0.597689
|
d7cc9f3c8098bfb5100ef4c2afebf3c41f3a25c8
| 5,679
|
py
|
Python
|
src/app/voltdb/voltdb_src/lib/python/voltcli/voltdb.d/collect.py
|
OpenMPDK/SMDK
|
8f19d32d999731242cb1ab116a4cb445d9993b15
|
[
"BSD-3-Clause"
] | 44
|
2022-03-16T08:32:31.000Z
|
2022-03-31T16:02:35.000Z
|
src/app/voltdb/voltdb_src/lib/python/voltcli/voltdb.d/collect.py
|
H2O0Lee/SMDK
|
eff49bc17a55a83ea968112feb2e2f2ea18c4ff5
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T02:30:28.000Z
|
2022-03-30T03:40:46.000Z
|
src/app/voltdb/voltdb_src/lib/python/voltcli/voltdb.d/collect.py
|
H2O0Lee/SMDK
|
eff49bc17a55a83ea968112feb2e2f2ea18c4ff5
|
[
"BSD-3-Clause"
] | 18
|
2022-03-19T04:41:04.000Z
|
2022-03-31T03:32:12.000Z
|
# This file is part of VoltDB.
# Copyright (C) 2008-2021 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import sys, os, subprocess
from voltcli import utility
collect_help = ('Collect logs on the current node for problem analysis')
dir_spec_help = ('root directory for the database. The default is the current working directory.')
output_help = ('file name to store collect data in compressed format. \'-\' means standard output. '
'The default is the \'voltdb_collect_<hostname or IP>.zip\' in the current working directory.')
@VOLT.Command(
description = collect_help,
options = (
VOLT.StringOption (None, '--prefix', 'prefix',
'file name prefix for uniquely identifying collection. (Deprecated. Please use --output).',
default = ''),
VOLT.PathOption('-o', '--output', 'output', output_help, default=''),
VOLT.BooleanOption(None, '--dry-run', 'dryrun',
'list the log files without collecting them.',
default = False),
VOLT.BooleanOption(None, '--skip-heap-dump', 'skipheapdump',
'exclude heap dump file from collection.',
default = True),
VOLT.IntegerOption(None, '--days', 'days',
'number of days of files to collect (files included are log, crash files), Current day value is 1.',
default = 7),
VOLT.PathOption('-D', '--dir', 'directory_spec', dir_spec_help, default=''),
VOLT.BooleanOption('-f', '--force', 'force', 'Overwrite the existing file.', default = False),
),
arguments = (
VOLT.PathArgument('voltdbroot', 'the voltdbroot path. (Deprecated. Please use --dir).', absolute = True, optional=True, default=None)
)
)
def collect(runner):
if int(runner.opts.days) == 0:
utility.abort(' \'0\' is invalid entry for option --days')
process_voltdbroot_args(runner)
process_outputfile_args(runner)
runner.args.extend(['--dryrun=' + str(runner.opts.dryrun), '--skipheapdump=' + str(runner.opts.skipheapdump),
'--days=' + str(runner.opts.days), '--force=' + str(runner.opts.force)])
runner.java_execute('org.voltdb.utils.Collector', None, *runner.args)
def process_voltdbroot_args(runner) :
if (runner.opts.directory_spec) and (runner.opts.voltdbroot):
utility.abort('Cannot specify both --dir and command line argument. Please use --dir option.')
os.environ['PATH'] += os.pathsep + os.pathsep.join(s for s in sys.path if os.path.join('voltdb', 'bin') in s)
# If database directory is given, derive voltdbroot path to store results of systemcheck in voltdbroot directory
if runner.opts.directory_spec:
if os.path.isdir(runner.opts.directory_spec) and os.access(runner.opts.directory_spec, os.R_OK|os.W_OK|os.X_OK):
voltdbrootDir = os.path.join(runner.opts.directory_spec, 'voltdbroot')
else:
utility.abort('Specified database directory is not valid', runner.opts.directory_spec)
elif runner.opts.voltdbroot:
utility.warning('Specifying voltdbroot directory using command argument is deprecated. Consider using --dir '
'option to specify database directory.');
voltdbrootDir = runner.opts.voltdbroot
else:
voltdbrootDir = os.path.join(os.getcwd(), 'voltdbroot')
runner.args.extend(['--voltdbroot=' + voltdbrootDir])
performSystemCheck(runner, voltdbrootDir)
def performSystemCheck(runner, dirPath):
if os.path.isdir(dirPath) and os.access(dirPath, os.R_OK|os.W_OK|os.X_OK):
checkFD = os.open(os.path.join(dirPath, 'systemcheck'), os.O_WRONLY|os.O_CREAT|os.O_TRUNC)
checkOutput = os.fdopen(checkFD, 'w')
subprocess.call('voltdb check', stdout=checkOutput, shell=True)
checkOutput.close()
else:
if runner.opts.directory_spec:
utility.abort('Invalid database directory ' + runner.opts.directory_spec +
'. Specify valid database directory using --dir option.')
elif runner.opts.voltdbroot:
utility.abort('Invalid voltdbroot path ' + runner.opts.voltdbroot +
'. Specify valid database directory using --dir option.')
else:
utility.abort('Invalid database directory ' + os.getcwd() +
'. Specify valid database directory using --dir option.')
def process_outputfile_args(runner):
if runner.opts.output and runner.opts.prefix:
utility.abort('Cannot specify both --output and --prefix. Please use --output option.')
if runner.opts.output:
runner.args.extend(['--outputFile=' + runner.opts.output])
elif runner.opts.prefix:
utility.warning('Specifying prefix for outputfile name is deprecated. Consider using --output option to specify'
' output file name.')
runner.args.extend(['--prefix=' + runner.opts.prefix])
| 50.256637
| 141
| 0.654164
|
4a4052cfa3350413da3bba8b3b22bb65908eb550
| 12,139
|
py
|
Python
|
packages/Python/lldbsuite/test/lang/c/stepping/TestStepAndBreakpoints.py
|
nathawes/swift-lldb
|
3cbf7470e0f9191ec1fc1c69ce8048c1dc64ec77
|
[
"Apache-2.0"
] | 2
|
2019-05-24T14:10:24.000Z
|
2019-05-24T14:27:38.000Z
|
packages/Python/lldbsuite/test/lang/c/stepping/TestStepAndBreakpoints.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | null | null | null |
packages/Python/lldbsuite/test/lang/c/stepping/TestStepAndBreakpoints.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | null | null | null |
"""Test stepping over vrs. hitting breakpoints & subsequent stepping in various forms."""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCStepping(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c"
@add_test_categories(['pyapi', 'basic_process'])
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr17932')
@expectedFailureAll(oslist=["linux"], bugnumber="llvm.org/pr14437")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24777")
def test_and_python_api(self):
"""Test stepping over vrs. hitting breakpoints & subsequent stepping in various forms."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
breakpoints_to_disable = []
break_1_in_main = target.BreakpointCreateBySourceRegex(
'// frame select 2, thread step-out while stopped at .c.1..',
self.main_source_spec)
self.assertTrue(break_1_in_main, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_1_in_main)
break_in_a = target.BreakpointCreateBySourceRegex(
'// break here to stop in a before calling b', self.main_source_spec)
self.assertTrue(break_in_a, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_a)
break_in_b = target.BreakpointCreateBySourceRegex(
'// thread step-out while stopped at .c.2..', self.main_source_spec)
self.assertTrue(break_in_b, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_b)
break_in_c = target.BreakpointCreateBySourceRegex(
'// Find the line number of function .c. here.', self.main_source_spec)
self.assertTrue(break_in_c, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_c)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_1_in_main)
if len(threads) != 1:
self.fail("Failed to stop at first breakpoint in main.")
thread = threads[0]
# Get the stop id and for fun make sure it increases:
old_stop_id = process.GetStopID()
# Now step over, which should cause us to hit the breakpoint in "a"
thread.StepOver()
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_a)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in a.")
# Check that the stop ID increases:
new_stop_id = process.GetStopID()
self.assertTrue(
new_stop_id > old_stop_id,
"Stop ID increases monotonically.")
thread = threads[0]
# Step over, and we should hit the breakpoint in b:
thread.StepOver()
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_b)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in b.")
thread = threads[0]
# Now try running some function, and make sure that we still end up in the same place
# and with the same stop reason.
frame = thread.GetFrameAtIndex(0)
current_line = frame.GetLineEntry().GetLine()
current_file = frame.GetLineEntry().GetFileSpec()
current_bp = []
current_bp.append(thread.GetStopReasonDataAtIndex(0))
current_bp.append(thread.GetStopReasonDataAtIndex(1))
stop_id_before_expression = process.GetStopID()
stop_id_before_including_expressions = process.GetStopID(True)
frame.EvaluateExpression("(int) printf (print_string)")
frame = thread.GetFrameAtIndex(0)
self.assertTrue(
current_line == frame.GetLineEntry().GetLine(),
"The line stayed the same after expression.")
self.assertTrue(
current_file == frame.GetLineEntry().GetFileSpec(),
"The file stayed the same after expression.")
self.assertTrue(
thread.GetStopReason() == lldb.eStopReasonBreakpoint,
"We still say we stopped for a breakpoint.")
self.assertTrue(thread.GetStopReasonDataAtIndex(0) == current_bp[
0] and thread.GetStopReasonDataAtIndex(1) == current_bp[1], "And it is the same breakpoint.")
# Also make sure running the expression didn't change the public stop id
# but did change if we are asking for expression stops as well.
stop_id_after_expression = process.GetStopID()
stop_id_after_including_expressions = process.GetStopID(True)
self.assertTrue(
stop_id_before_expression == stop_id_after_expression,
"Expression calling doesn't change stop ID")
self.assertTrue(
stop_id_after_including_expressions > stop_id_before_including_expressions,
"Stop ID including expressions increments over expression call.")
# Do the same thing with an expression that's going to crash, and make
# sure we are still unchanged.
frame.EvaluateExpression("((char *) 0)[0] = 'a'")
frame = thread.GetFrameAtIndex(0)
self.assertTrue(
current_line == frame.GetLineEntry().GetLine(),
"The line stayed the same after expression.")
self.assertTrue(
current_file == frame.GetLineEntry().GetFileSpec(),
"The file stayed the same after expression.")
self.assertTrue(
thread.GetStopReason() == lldb.eStopReasonBreakpoint,
"We still say we stopped for a breakpoint.")
self.assertTrue(thread.GetStopReasonDataAtIndex(0) == current_bp[
0] and thread.GetStopReasonDataAtIndex(1) == current_bp[1], "And it is the same breakpoint.")
# Now continue and make sure we just complete the step:
# Disable all our breakpoints first - sometimes the compiler puts two line table entries in for the
# breakpoint a "b" and we don't want to hit that.
for bkpt in breakpoints_to_disable:
bkpt.SetEnabled(False)
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "a")
self.assertTrue(thread.GetStopReason() == lldb.eStopReasonPlanComplete)
# And one more time should get us back to main:
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "main")
self.assertTrue(thread.GetStopReason() == lldb.eStopReasonPlanComplete)
# Now make sure we can call a function, break in the called function,
# then have "continue" get us back out again:
frame = thread.GetFrameAtIndex(0)
frame = thread.GetFrameAtIndex(0)
current_line = frame.GetLineEntry().GetLine()
current_file = frame.GetLineEntry().GetFileSpec()
break_in_b.SetEnabled(True)
options = lldb.SBExpressionOptions()
options.SetIgnoreBreakpoints(False)
options.SetFetchDynamicValue(False)
options.SetUnwindOnError(False)
frame.EvaluateExpression("b (4)", options)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_b)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in b when calling b.")
thread = threads[0]
# So do a step over here to make sure we can still do that:
thread.StepOver()
# See that we are still in b:
func_name = thread.GetFrameAtIndex(0).GetFunctionName()
self.assertTrue(
func_name == "b",
"Should be in 'b', were in %s" %
(func_name))
# Okay, now if we continue, we will finish off our function call and we
# should end up back in "a" as if nothing had happened:
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(
0).GetLineEntry().GetLine() == current_line)
self.assertTrue(thread.GetFrameAtIndex(
0).GetLineEntry().GetFileSpec() == current_file)
# Now we are going to test step in targeting a function:
break_in_b.SetEnabled(False)
break_before_complex_1 = target.BreakpointCreateBySourceRegex(
'// Stop here to try step in targeting b.', self.main_source_spec)
self.assertTrue(break_before_complex_1, VALID_BREAKPOINT)
break_before_complex_2 = target.BreakpointCreateBySourceRegex(
'// Stop here to try step in targeting complex.', self.main_source_spec)
self.assertTrue(break_before_complex_2, VALID_BREAKPOINT)
break_before_complex_3 = target.BreakpointCreateBySourceRegex(
'// Stop here to step targeting b and hitting breakpoint.', self.main_source_spec)
self.assertTrue(break_before_complex_3, VALID_BREAKPOINT)
break_before_complex_4 = target.BreakpointCreateBySourceRegex(
'// Stop here to make sure bogus target steps over.', self.main_source_spec)
self.assertTrue(break_before_complex_4, VALID_BREAKPOINT)
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_1)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_1.SetEnabled(False)
thread.StepInto("b")
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "b")
# Now continue out and stop at the next call to complex. This time
# step all the way into complex:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_2)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_2.SetEnabled(False)
thread.StepInto("complex")
self.assertTrue(thread.GetFrameAtIndex(
0).GetFunctionName() == "complex")
# Now continue out and stop at the next call to complex. This time
# enable breakpoints in a and c and then step targeting b:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_3)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_3.SetEnabled(False)
break_at_start_of_a = target.BreakpointCreateByName('a')
break_at_start_of_c = target.BreakpointCreateByName('c')
thread.StepInto("b")
threads = lldbutil.get_stopped_threads(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(len(threads) == 1)
thread = threads[0]
stop_break_id = thread.GetStopReasonDataAtIndex(0)
self.assertTrue(stop_break_id == break_at_start_of_a.GetID()
or stop_break_id == break_at_start_of_c.GetID())
break_at_start_of_a.SetEnabled(False)
break_at_start_of_c.SetEnabled(False)
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "b")
# Now continue out and stop at the next call to complex. This time
# enable breakpoints in a and c and then step targeting b:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_4)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_4.SetEnabled(False)
thread.StepInto("NoSuchFunction")
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "main")
| 40.598662
| 117
| 0.66274
|
fb8b41513a1598cdfa3b338945bbc49a3b4973aa
| 2,849
|
py
|
Python
|
kNN_regression.py
|
marcos-delcueto/Tutorial_kNN_custom_metric
|
00abafcd474a21dcc72f39aa7aeb104ebdc500ed
|
[
"MIT"
] | 4
|
2020-11-29T14:52:45.000Z
|
2021-07-25T02:45:39.000Z
|
kNN_regression.py
|
marcos-delcueto/Tutorial_kNN_custom_metric
|
00abafcd474a21dcc72f39aa7aeb104ebdc500ed
|
[
"MIT"
] | null | null | null |
kNN_regression.py
|
marcos-delcueto/Tutorial_kNN_custom_metric
|
00abafcd474a21dcc72f39aa7aeb104ebdc500ed
|
[
"MIT"
] | 2
|
2021-03-01T05:58:44.000Z
|
2021-07-25T02:45:42.000Z
|
#!/usr/bin/env python3
# Marcos del Cueto
# Import necessary libraries
import math
import pandas as pd
from sklearn.neighbors import KNeighborsRegressor, DistanceMetric
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr
### Function just used to test custom metrics ###
def mimic_minkowski(X1,X2):
distance=0.0 # Initialize distance
for i in range(len(X1)): # For each element in X1 (in this case it is just 1D)
distance=distance+(X1[i]-X2[i])**2 # Make sum of squared differences
distance=math.sqrt(distance) # Calculate final distance as sqrt of previous sum
return distance
### Function just used to test custom metrics ###
def custom_metric(X1,X2):
diff = X1[0]-X2[0] # Calculate Day difference between X1 and X2
diff = 360/365*diff # Transforms Day difference to angle difference
diff = diff * math.pi/180 # Transform degree to radians
distance = math.sqrt(1-math.cos(diff)) # Calculate distance in polar coordinates
return distance
###################### MAIN CODE ######################
# Read data
db_file='dataset.csv' # Name of csv file with dataset
df=pd.read_csv(db_file,index_col=0) # Read dataset into a dataframe
X = df['Day'].values # Assign 'Day' descriptor
y = df['T (deg C)'].values # Assign 'T (deg C)' target property
# kNN regression
Neighbors=[1,2,3,4,5,6,7,8,9,10] # Specify number of neighbors k used in grid search
# Grid search loop
for k in Neighbors:
# Initialize lists
y_predicted=[]
y_real=[]
# Specify options for kNN regression
kNNreg = KNeighborsRegressor(n_neighbors=k, weights='distance', metric=custom_metric)
# Leave-one-out loop
for train_index, test_index in LeaveOneOut().split(X):
# Assign train/test values
X_train,X_test=X[train_index],X[test_index]
y_train,y_test=y[train_index],y[test_index]
X_train = X_train.reshape(-1, 1)
X_test = X_test.reshape(-1, 1)
# Predict data
y_pred = kNNreg.fit(X_train, y_train.ravel()).predict(X_test)
# Append data of each leave-one-out iteration
y_predicted.append(y_pred.tolist())
y_real.append(y_test.tolist())
# Flatten lists with real and predicted values
y_real = [item for dummy in y_real for item in dummy ]
y_predicted = [item for dummy in y_predicted for item in dummy ]
# Calculate r and rmse metrics
r, _ = pearsonr(y_real, y_predicted)
rmse = math.sqrt(mean_squared_error(y_real, y_predicted))
# Print results for each k value in the grid search
print('Neighbors: %i. r: %.3f. rmse: %.3f' %(k, r, rmse))
| 47.483333
| 97
| 0.648649
|
e39e13d1cbdb966b75acf17bf5d784bb836df7d1
| 1,243
|
py
|
Python
|
test/proj4/proj-regression-EPSG-3857-7.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 7
|
2019-03-19T09:32:41.000Z
|
2022-02-07T13:20:33.000Z
|
test/proj4/proj-regression-EPSG-3857-7.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 2
|
2021-03-30T05:37:20.000Z
|
2021-08-17T13:58:04.000Z
|
test/proj4/proj-regression-EPSG-3857-7.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 5
|
2019-03-19T10:43:46.000Z
|
2021-09-09T14:28:39.000Z
|
from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
#Setting output
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
#Setting the geographical area
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
#Setting the coastlines
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
#Picking the grib metadata
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
#Plotting
plot(png,area,background,title,)
plot_area("EPSG:3857", -43.78390547590436, -83.07181387234992, 33.69286169167958, -5.726166399790497 )
| 28.906977
| 103
| 0.629928
|
1163f6ee9ab0d2067c7e9dbd03dd41d863ec8b88
| 34,043
|
py
|
Python
|
test/functional/test_framework/test_node.py
|
who-biz/chipschain
|
9e8c1a20de529dc38837d39c809681827e82e2b4
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
who-biz/chipschain
|
9e8c1a20de529dc38837d39c809681827e82e2b4
|
[
"MIT"
] | 4
|
2021-07-08T06:03:31.000Z
|
2021-12-08T02:42:23.000Z
|
test/functional/test_framework/test_node.py
|
who-biz/chipschain
|
9e8c1a20de529dc38837d39c809681827e82e2b4
|
[
"MIT"
] | 3
|
2021-07-14T23:56:56.000Z
|
2021-09-23T16:02:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .descriptors import descsum_create
from .p2p import P2P_SUBVERSION
from .util import (
MAX_NODES,
assert_equal,
append_config,
delete_cookie_file,
get_auth_cookie,
get_rpc_proxy,
rpc_url,
wait_until_helper,
p2p_port,
EncodeDecimal,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.p2p_conn_index = 1
self.datadir = datadir
self.bitcoinconf = os.path.join(self.datadir, "chips.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
self.descriptors = descriptors
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
if use_valgrind:
default_suppressions_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "contrib", "valgrind.supp")
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
default_suppressions_file)
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
if self.version_is_at_least(190000):
self.args.append("-logthreadnames")
if self.version_is_at_least(219900):
self.args.append("-logsourcelocations")
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
self.timeout_factor = timeout_factor
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'chipsd exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(
rpc_url(self.datadir, self.index, self.chain, self.rpchost),
self.index,
timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT
coveragedir=self.coverage_dir,
)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
if self.version_is_at_least(190000):
# getmempoolinfo.loaded is available since commit
# bb8ae2c (version 0.19.0)
wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor)
# Wait for the node to finish reindex, block import, and
# loading the mempool. Usually importing happens fast or
# even "immediate" when the node is started. However, there
# is no guarantee and sometimes ThreadImport might finish
# later. This is going to cause intermittent test failures,
# because generally the tests assume the node is fully
# ready after being started.
#
# For example, the node will reject block messages from p2p
# when it is still importing with the error "Unexpected
# block message received"
#
# The wait is done here to make tests as robust as possible
# and prevent racy tests and intermittent failures as much
# as possible. Some tests might not need this, but the
# overhead is trivial, and the added guarantees are worth
# the minimal performance cost.
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ConnectionResetError:
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
# succeeds. Try again to properly raise the FailedToStartError
pass
except OSError as e:
if e.errno == errno.ETIMEDOUT:
pass # Treat identical to ConnectionResetError
elif e.errno == errno.ECONNREFUSED:
pass # Port not yet open?
else:
raise # unknown OS error
except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to chipsd after {}s".format(self.rpc_timeout))
def wait_for_cookie_credentials(self):
"""Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up."""
self.log.debug("Waiting for cookie credentials")
# Poll at a rate of four times per second.
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
try:
get_auth_cookie(self.datadir, self.chain)
self.log.debug("Cookie credentials successfully retrieved")
return
except ValueError: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
pass # so we continue polling until RPC credentials are retrieved
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors)
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors)
def version_is_at_least(self, ver):
return self.version is None or self.version >= ver
def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
# Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
if self.version_is_at_least(180000):
self.stop(wait=wait)
else:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
if wait_until_stopped:
self.wait_until_stopped()
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
if unexpected_msgs is None:
unexpected_msgs = []
time_end = time.time() + timeout * self.timeout_factor
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
yield
while True:
found = True
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for unexpected_msg in unexpected_msgs:
if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):
self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
if found:
return
if time.time() >= time_end:
break
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
def profile_with_perf(self, profile_name: str):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name: This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into chipsd")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('chipsd failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
except subprocess.TimeoutExpired:
self.process.kill()
self.running = False
self.process = None
assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s '
if expected_msg is None:
assert_msg = "chipsd should have exited with an error"
else:
assert_msg = "chipsd should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add an inbound p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
self.p2ps.append(p2p_conn)
p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
if wait_for_verack:
# Wait for the node to send us the version and verack
p2p_conn.wait_for_verack()
# At this point we have sent our version message and received the version and verack, however the full node
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
# established (fSuccessfullyConnected).
#
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
# transaction that will be added to the mempool as soon as we return here.
#
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
# in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
p2p_conn.sync_with_ping()
# Consistency check that the Bitcoin Core has received our user agent string. This checks the
# node's newest peer. It could be racy if another Bitcoin Core node has connected since we opened
# our connection, but we don't expect that to happen.
assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION)
return p2p_conn
def add_outbound_p2p_connection(self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs):
"""Add an outbound p2p connection from node. Either
full-relay("outbound-full-relay") or
block-relay-only("block-relay-only") connection.
This method adds the p2p connection to the self.p2ps list and returns
the connection to the caller.
"""
def addconnection_callback(address, port):
self.log.debug("Connecting to %s:%d %s" % (address, port, connection_type))
self.addconnection('%s:%d' % (address, port), connection_type)
p2p_conn.peer_accept_connection(connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, **kwargs)()
p2p_conn.wait_for_connect()
self.p2ps.append(p2p_conn)
p2p_conn.wait_for_verack()
p2p_conn.sync_with_ping()
return p2p_conn
def num_test_p2p_connections(self):
"""Return number of test framework p2p connections to the node."""
return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION])
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor)
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif arg is None:
return 'null'
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg, default=EncodeDecimal)
else:
return str(arg)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.chipscli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running chips-cli {}".format(p_args[2:]))
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except (json.JSONDecodeError, decimal.InvalidOperation):
return cli_stdout.rstrip("\n")
class RPCOverloadWrapper():
def __init__(self, rpc, cli=False, descriptors=False):
self.rpc = rpc
self.is_cli = cli
self.descriptors = descriptors
def __getattr__(self, name):
return getattr(self.rpc, name)
def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None, external_signer=None):
if descriptors is None:
descriptors = self.descriptors
return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup, external_signer)
def importprivkey(self, privkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importprivkey')(privkey, label, rescan)
desc = descsum_create('combo(' + privkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def addmultisigaddress(self, nrequired, keys, label=None, address_type=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type)
cms = self.createmultisig(nrequired, keys, address_type)
req = [{
'desc': cms['descriptor'],
'timestamp': 0,
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
return cms
def importpubkey(self, pubkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importpubkey')(pubkey, label, rescan)
desc = descsum_create('combo(' + pubkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def importaddress(self, address, label=None, rescan=None, p2sh=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importaddress')(address, label, rescan, p2sh)
is_hex = False
try:
int(address ,16)
is_hex = True
desc = descsum_create('raw(' + address + ')')
except:
desc = descsum_create('addr(' + address + ')')
reqs = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
if is_hex and p2sh:
reqs.append({
'desc': descsum_create('p2sh(raw(' + address + '))'),
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
})
import_res = self.importdescriptors(reqs)
for res in import_res:
if not res['success']:
raise JSONRPCException(res['error'])
| 44.911609
| 242
| 0.624798
|
6c9228a0472b2a563b3c8f6ecefe529eebc3e4c6
| 75
|
py
|
Python
|
inheritance/exercise/project_need_for_speed/family_car.py
|
ivan-yosifov88/python_oop
|
82b210e427cb80dbab3b9a5c3fceab431ee60164
|
[
"MIT"
] | 1
|
2021-05-21T20:28:55.000Z
|
2021-05-21T20:28:55.000Z
|
inheritance/exercise/project_need_for_speed/family_car.py
|
ivan-yosifov88/python_oop
|
82b210e427cb80dbab3b9a5c3fceab431ee60164
|
[
"MIT"
] | null | null | null |
inheritance/exercise/project_need_for_speed/family_car.py
|
ivan-yosifov88/python_oop
|
82b210e427cb80dbab3b9a5c3fceab431ee60164
|
[
"MIT"
] | null | null | null |
from project_need_for_speed.car import Car
class FamilyCar(Car):
pass
| 15
| 42
| 0.786667
|
b2dd9895b6ee701b92b4c64071246b5f5883a113
| 1,386
|
py
|
Python
|
frag6_mf0long.py
|
bluhm/frag6-regress
|
1f2dbb1030e61764bc5f1e1d46f6f743e6c095ed
|
[
"0BSD"
] | 1
|
2017-09-13T14:54:02.000Z
|
2017-09-13T14:54:02.000Z
|
frag6_mf0long.py
|
bluhm/frag6-regress
|
1f2dbb1030e61764bc5f1e1d46f6f743e6c095ed
|
[
"0BSD"
] | null | null | null |
frag6_mf0long.py
|
bluhm/frag6-regress
|
1f2dbb1030e61764bc5f1e1d46f6f743e6c095ed
|
[
"0BSD"
] | null | null | null |
#!/usr/local/bin/python2.7
print "ping6 fragment that overlaps longer than the last fragment with m=0"
# |---------|
# |XXXX|
# |----|
import os
from addr import *
from scapy.all import *
pid=os.getpid()
eid=pid & 0xffff
payload="ABCDEFGHIJKLMNOP"
dummy="01234567"
packet=IPv6(src=LOCAL_ADDR6, dst=REMOTE_ADDR6)/ \
ICMPv6EchoRequest(id=eid, data=payload)
frag=[]
fid=pid & 0xffffffff
frag.append(IPv6ExtHdrFragment(nh=58, id=fid, offset=1)/str(packet)[48:64])
frag.append(IPv6ExtHdrFragment(nh=58, id=fid, offset=3)/dummy)
frag.append(IPv6ExtHdrFragment(nh=58, id=fid, m=1)/str(packet)[40:48])
eth=[]
for f in frag:
pkt=IPv6(src=LOCAL_ADDR6, dst=REMOTE_ADDR6)/f
eth.append(Ether(src=LOCAL_MAC, dst=REMOTE_MAC)/pkt)
if os.fork() == 0:
time.sleep(1)
sendp(eth, iface=LOCAL_IF)
os._exit(0)
ans=sniff(iface=LOCAL_IF, timeout=3, filter=
"ip6 and src "+REMOTE_ADDR6+" and dst "+LOCAL_ADDR6+" and icmp6")
for a in ans:
if a and a.type == ETH_P_IPV6 and \
ipv6nh[a.payload.nh] == 'ICMPv6' and \
icmp6types[a.payload.payload.type] == 'Echo Reply':
id=a.payload.payload.id
print "id=%#x" % (id)
if id != eid:
print "WRONG ECHO REPLY ID"
exit(2)
data=a.payload.payload.data
print "payload=%s" % (data)
if data == payload:
print "ECHO REPLY"
exit(1)
print "PAYLOAD!=%s" % (payload)
exit(2)
print "no echo reply"
exit(0)
| 25.666667
| 75
| 0.670996
|
7eb62ea3512f422dda2d75ab52a83fdf54ac1f68
| 216
|
py
|
Python
|
fastapi_admin/enums.py
|
myWorkshop123/fastapi-admin
|
e2f23ae63f45d2b9c74c85a34f5d71d1039a2913
|
[
"Apache-2.0"
] | 636
|
2020-06-03T09:59:03.000Z
|
2022-03-31T23:02:26.000Z
|
fastapi_admin/enums.py
|
changzhi777/fastapi-admin
|
6f50cede91a3a06faa6601addaaeb3eaa0c753a1
|
[
"Apache-2.0"
] | 60
|
2021-05-01T12:52:44.000Z
|
2022-02-22T06:58:30.000Z
|
fastapi_admin/enums.py
|
changzhi777/fastapi-admin
|
6f50cede91a3a06faa6601addaaeb3eaa0c753a1
|
[
"Apache-2.0"
] | 114
|
2021-04-25T16:21:10.000Z
|
2022-03-25T17:30:23.000Z
|
from enum import Enum
class StrEnum(str, Enum):
def __str__(self):
return self.value
class Method(StrEnum):
GET = "GET"
POST = "POST"
DELETE = "DELETE"
PUT = "PUT"
PATCH = "PATCH"
| 14.4
| 25
| 0.583333
|
5958ffb88518876b4ae2d10ccf364587a1165170
| 4,063
|
py
|
Python
|
src/util/Notice.py
|
Denon/redis-monitor
|
db78e2c7c13e636c4071ae3df23ac43d7288e36f
|
[
"MIT"
] | 2
|
2016-07-29T03:53:51.000Z
|
2017-01-03T06:23:27.000Z
|
src/util/Notice.py
|
Denon/redis-monitor
|
db78e2c7c13e636c4071ae3df23ac43d7288e36f
|
[
"MIT"
] | null | null | null |
src/util/Notice.py
|
Denon/redis-monitor
|
db78e2c7c13e636c4071ae3df23ac43d7288e36f
|
[
"MIT"
] | null | null | null |
import threading
import re
import traceback
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from datetime import datetime
from datetime import timedelta
import multiprocessing
from setting import settings
DEFAUTL_MONITOR_PARAM = {
"used_memory_peak_human": "< 1K"
}
class EmailNotice(multiprocessing.Process):
def __init__(self, data_queue, settings=None):
super(EmailNotice, self).__init__()
self.queue = data_queue
self.settings = settings or {}
self.smtp_server = self.settings.get("email", {}).get("server", "")
self.user = self.settings.get("email", {}).get("user", "")
self.password = self.settings.get("email", {}).get("password", "")
self.monitor_param = self.settings.get("monitor_param") or DEFAUTL_MONITOR_PARAM
self.senddelta = timedelta(hours=self.settings.get("email", {}).get("senddelta", ""))
self.send_history = {}
self._stop = threading.Event()
def connect(self):
try:
self.s = smtplib.SMTP_SSL(self.smtp_server, 465)
self.s.login(self.user, self.password)
except smtplib.SMTPException:
self.s = None
def _time_check(self, key):
now = datetime.now()
if not self.send_history.get(key) or now - self.send_history.get(key) >= self.senddelta:
return True
else:
return False
def monitor(self, data):
def _compare(v1, v2, operator):
pattern = re.compile(r"\d*")
v1 = pattern.match(v1).group()
v2 = pattern.match(v2).group()
if operator == "<" and v1 > v2:
return True
elif operator == ">" and v1 < v2:
return True
elif operator == "=" and v1 != v2:
return True
else:
return False
alert_messages = []
if data.get("connection_error"):
if self._time_check("connection_error"):
now = datetime.now()
msg = data.get("error_message")
alert_messages.append(msg)
self.send_history["connection_error"] = now
else:
for key, value in self.monitor_param.items():
operator, alert_value = value.split(" ")
if _compare(data.get(key), alert_value, operator):
now = datetime.now()
if self._time_check(key):
msg = "{}, expected:{}, current:{}".format(key, value, data.get(key))
alert_messages.append(msg)
self.send_history[key] = now
return '\n'.join(alert_messages)
def send(self, text):
try:
if self.s:
msg = MIMEText(text)
msg['From'] = self.user
msg['Subject'] = Header("redis-monitor", 'utf-8')
self.s.sendmail(self.user, self.settings.get("email", {}).get("to_addr"), msg.as_string())
else:
# todo add log later
# print(text)
pass
except Exception:
traceback.print_exc()
def main_loop(self):
while True:
try:
data = self.queue.get()
alert_message = self.monitor(data)
if alert_message:
self.send(alert_message)
except Exception:
traceback.print_exc()
def run(self):
self.connect()
self.main_loop()
return
def run_notice(data_queue):
try:
notice = EmailNotice(data_queue, settings=settings)
notice.daemon = True
notice.start()
return notice
except Exception:
traceback.print_exc()
if __name__ == "__main__":
data_queue = multiprocessing.Queue()
notice = EmailNotice(data_queue, settings=settings)
data_queue.put({"used_memory_peak_human": "100K"})
data_queue.put({"used_memory_peak_human": "100K"})
notice.start()
| 34.432203
| 106
| 0.5587
|
1fe498563b633c76d11e0a27ce2138ba56e1c74d
| 2,879
|
py
|
Python
|
vis_movie.py
|
RuthAngus/astro-viz
|
d49de667cff6d22a1b049fd2268ed9255d9f7dbd
|
[
"MIT"
] | null | null | null |
vis_movie.py
|
RuthAngus/astro-viz
|
d49de667cff6d22a1b049fd2268ed9255d9f7dbd
|
[
"MIT"
] | null | null | null |
vis_movie.py
|
RuthAngus/astro-viz
|
d49de667cff6d22a1b049fd2268ed9255d9f7dbd
|
[
"MIT"
] | null | null | null |
# Make frames of exoplanet orbits.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
plt.rcParams['axes.facecolor'] = 'black'
def get_orbit(a, radius, period, frames, xcenter=0, ycenter=0,
slow_factor=30):
"""
nframes = snapshots per orbit.
if period long snapshots per orbit goes up.
if period short, snapshots per orbit goes down.
snapshots per orbit (nframes) is proportional to the period and
1/angular frequency.
"""
random_phase = np.random.uniform(0, 2*np.pi)
x = a * np.cos(np.pi/(period*slow_factor) * np.arange(nframes)
+ random_phase)
y = a * np.sin(np.pi/(period*slow_factor) * np.arange(nframes)
+ random_phase)
return x, y
if __name__ == "__main__":
# Read in the data
df = pd.read_csv("planets.csv", skiprows=69)
# Select only transiting planets.
m = []
for i, method in enumerate(list(df.pl_discmethod.values)):
if method == "Transit":
m.append(i)
df = df.iloc[np.array(m)]
R_j = 6.991e7
radius_ratio = R_j/6.371e6 # Convert to Earth radii
nframes = 800
assert nframes % 2 == 0, "nframes must be an even number"
nstar = 800
a_s = df.pl_orbsmax.values #* 1.496e11 / R_j
radii = df.pl_radj.values * radius_ratio
periods = df.pl_orbper.values
xpositions = np.zeros((nstar, nframes))
ypositions = np.zeros((nstar, nframes))
for star in range(nstar):
x, y = get_orbit(a_s[star], radii[star], periods[star], nframes)
xpositions[star, :] = x
ypositions[star, :] = y
lim_min = -np.logspace(-2, np.log10(.5), nframes)
lim_max = np.logspace(-2, np.log10(.5), nframes)
for pos in range(nframes):
plt.clf()
plt.figure(figsize=(16, 9))
for star in range(nstar):
plt.scatter(xpositions[star, pos], ypositions[star, pos],
s=radii[star]*1./(lim_max[pos]-lim_min[pos]),
c="w", alpha=.5)
plt.xlim(lim_min[pos], lim_max[pos])
plt.ylim(lim_min[pos], lim_max[pos])
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("planet_cloud_movie/frame_{}".format(str(pos).zfill(4)))
plt.close()
# Make the movie file
import os
# framerate = 30
framerate = 20
quality = 25
os.system("/Applications/ffmpeg -r {0} -f image2 -s 1920x1080 -i "\
"planet_cloud_movie/frame_%04d.png -vcodec libx264 -crf {1} "\
"-pix_fmt yuv420p planet_cloud_movie.mp4".format(framerate,
quality))
| 33.870588
| 76
| 0.589441
|
173d3565c62e2da51456ac84fa1e55533870f6ac
| 151
|
py
|
Python
|
entrypoint.py
|
crbaker/actions-twister
|
9a197651b14ba5a0e143838743310518eef17bb2
|
[
"MIT"
] | null | null | null |
entrypoint.py
|
crbaker/actions-twister
|
9a197651b14ba5a0e143838743310518eef17bb2
|
[
"MIT"
] | null | null | null |
entrypoint.py
|
crbaker/actions-twister
|
9a197651b14ba5a0e143838743310518eef17bb2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import requests
twist_url = sys.argv[1]
content = sys.argv[2]
requests.post(twist_url, json = {"content": content})
| 16.777778
| 53
| 0.721854
|
bd4fc0b5e5e4f28cb834a116e1f8ab5ed5f68413
| 4,303
|
py
|
Python
|
userbot/modules/getmusic.py
|
Metamor-phosis/UserButt
|
05ed15916639bd40765169482ca712968876c58e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/getmusic.py
|
Metamor-phosis/UserButt
|
05ed15916639bd40765169482ca712968876c58e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/getmusic.py
|
Metamor-phosis/UserButt
|
05ed15916639bd40765169482ca712968876c58e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2020-12-20T08:16:08.000Z
|
2020-12-20T08:16:08.000Z
|
# Copyright (C) 2020 Aidil Aryanto.
# All rights reserved.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import datetime
import asyncio
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from userbot import bot, CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.netease(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
song = event.pattern_match.group(1)
chat = "@WooMaiBot"
link = f"/netease {song}"
await event.edit("```Getting Your Music```")
async with bot.conversation(chat) as conv:
await asyncio.sleep(2)
await event.edit("`Downloading...Please wait`")
try:
msg = await conv.send_message(link)
response = await conv.get_response()
respond = await conv.get_response()
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await event.reply("```Please unblock @WooMaiBot and try again```")
return
await event.edit("`Sending Your Music...`")
await asyncio.sleep(3)
await bot.send_file(event.chat_id, respond)
await event.client.delete_messages(conv.chat_id,
[msg.id, response.id, respond.id])
await event.delete()
@register(outgoing=True, pattern="^.sdd(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
d_link = event.pattern_match.group(1)
if ".com" not in d_link:
await event.edit("` I need a link to download something pro.`**(._.)**")
else:
await event.edit("**Initiating Download!**")
chat = "@DeezLoadBot"
async with bot.conversation(chat) as conv:
try:
msg_start = await conv.send_message("/start")
response = await conv.get_response()
r = await conv.get_response()
msg = await conv.send_message(d_link)
details = await conv.get_response()
song = await conv.get_response()
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @DeezLoadBot `and retry!`")
return
await bot.send_file(event.chat_id, song, caption=details.text)
await event.client.delete_messages(conv.chat_id,
[msg_start.id, response.id, r.id, msg.id, details.id, song.id])
await event.delete()
@register(outgoing=True, pattern="^.smd(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
link = event.pattern_match.group(1)
chat = "@SpotifyMusicDownloaderBot"
await event.edit("```Getting Your Music```")
async with bot.conversation(chat) as conv:
await asyncio.sleep(2)
await event.edit("`Downloading music taking some times, Stay Tuned.....`")
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=752979930))
msg = await bot.send_message(chat, link)
respond = await response
res = conv.wait_event(events.NewMessage(incoming=True,from_users=752979930))
r = await res
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await event.reply("```Please unblock @SpotifyMusicDownloaderBot and try again```")
return
await bot.forward_messages(event.chat_id, respond.message)
await event.client.delete_messages(conv.chat_id,
[msg.id, r.id, respond.id])
await event.delete()
CMD_HELP.update({
"getmusic":
"`.netease <Artist - Song Title>`"
"\nUsage: Download music with @WooMaiBot"
"\n\n`.sdd <Spotify/Deezer Link>`"
"\nUsage: Download music from Spotify or Deezer"
"\n\n`.smd <Artist - Song Title>`"
"\nUsage: Download music from Spotify"
})
| 40.980952
| 108
| 0.609342
|
fb7ccf1545d8c1efa9113c4b36716b5efca9580c
| 4,511
|
py
|
Python
|
sdk/containerregistry/azure-containerregistry/tests/test_container_repository_client.py
|
romahamu/azure-sdk-for-python
|
a57c9f73b9121f79d317e1679b81fd460d6a25b8
|
[
"MIT"
] | 1
|
2021-04-05T17:38:42.000Z
|
2021-04-05T17:38:42.000Z
|
sdk/containerregistry/azure-containerregistry/tests/test_container_repository_client.py
|
romahamu/azure-sdk-for-python
|
a57c9f73b9121f79d317e1679b81fd460d6a25b8
|
[
"MIT"
] | null | null | null |
sdk/containerregistry/azure-containerregistry/tests/test_container_repository_client.py
|
romahamu/azure-sdk-for-python
|
a57c9f73b9121f79d317e1679b81fd460d6a25b8
|
[
"MIT"
] | 1
|
2021-12-18T20:01:22.000Z
|
2021-12-18T20:01:22.000Z
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import functools
import os
import pytest
from devtools_testutils import AzureTestCase, PowerShellPreparer
from azure.containerregistry import (
ContainerRepositoryClient,
ContainerRegistryClient,
RepositoryProperties,
ContentPermissions,
TagProperties,
TagOrderBy,
)
from azure.core.paging import ItemPaged
from testcase import ContainerRegistryTestClass
acr_preparer = functools.partial(
PowerShellPreparer,
"containerregistry",
containerregistry_baseurl="fake_url.azurecr.io",
)
class TestContainerRepositoryClient(AzureTestCase, ContainerRegistryTestClass):
repository = "hello-world"
@pytest.mark.live_test_only
@acr_preparer()
def test_get_attributes(self, containerregistry_baseurl):
client = self.create_repository_client(containerregistry_baseurl, self.repository)
repo_attribs = client.get_properties()
assert repo_attribs is not None
assert repo_attribs.content_permissions is not None
@pytest.mark.live_test_only
@acr_preparer()
def test_get_properties(self, containerregistry_baseurl):
reg_client = self.create_repository_client(containerregistry_baseurl, "hello-world")
properties = reg_client.get_properties()
assert isinstance(properties, RepositoryProperties)
assert properties.name == "hello-world"
assert properties.registry == containerregistry_baseurl
assert properties.content_permissions is not None
assert isinstance(properties.content_permissions, ContentPermissions)
@pytest.mark.skip("Pending")
@acr_preparer()
def test_get_registry_artifact_properties(self, containerregistry_baseurl):
reg_client = self.create_repository_client(containerregistry_baseurl, "hello-world")
digest = "sha256:90659bf80b44ce6be8234e6ff90a1ac34acbeb826903b02cfa0da11c82cbc042"
tag = "first"
properties = reg_client.get_registry_artifact_properties(digest)
first_properties = reg_client.get_registry_artifact_properties(tag)
self.assert_registry_artifact(properties, digest)
self.assert_registry_artifact(first_properties, tag)
@pytest.mark.live_test_only
@acr_preparer()
def test_get_tag(self, containerregistry_baseurl):
client = self.create_repository_client(containerregistry_baseurl, self.repository)
tag = client.get_tag_properties("latest")
self.assert_tag(tag)
@pytest.mark.live_test_only
@acr_preparer()
def test_list_tags(self, containerregistry_baseurl):
client = self.create_repository_client(containerregistry_baseurl, self.repository)
tags = client.list_tags()
assert isinstance(tags, ItemPaged)
count = 0
for tag in tags:
count += 1
print(tag)
assert count > 0
@pytest.mark.live_test_only
@acr_preparer()
def test_list_tags_descending(self, containerregistry_baseurl):
client = self.create_repository_client(containerregistry_baseurl, self.repository)
# TODO: This is giving time in ascending order
tags = client.list_tags(order_by=TagOrderBy.LAST_UPDATE_TIME_DESCENDING)
assert isinstance(tags, ItemPaged)
last_updated_on = None
count = 0
for tag in tags:
print(tag.last_updated_on)
# if last_updated_on:
# assert tag.last_updated_on < last_updated_on
last_updated_on = tag.last_updated_on
count += 1
# print(tag)
assert count > 0
@pytest.mark.skip("List pending")
@acr_preparer()
def test_list_registry_artifacts(self, containerregistry_baseurl):
client = self.create_repository_client(containerregistry_baseurl, self.repository)
repo_attribs = client.list_registry_artifacts()
print(repo_attribs)
@pytest.mark.skip("Don't want to delete right now")
@acr_preparer()
def test_delete_repository(self, containerregistry_baseurl):
client = self.create_repository_client(containerregistry_baseurl, self.repository)
client.delete()
reg_client = self.create_registry_client(containerregistry_baseurl)
repo_count = 0
for repo in reg_client.list_repositories():
repo_count += 1
assert repo_count == 0
| 32.927007
| 92
| 0.711594
|
c7a54192a28f9fcda762cd932ecfd0307755366d
| 389
|
py
|
Python
|
Main.py
|
matthiasamberg/TigerJython---The-fantastic-Elevator-Game
|
9ede94676b5e0fcd034c049eb0f700eff041c09a
|
[
"Apache-2.0"
] | 1
|
2020-12-14T10:26:19.000Z
|
2020-12-14T10:26:19.000Z
|
Main.py
|
matthiasamberg/TigerJython---The-fantastic-Elevator-Game
|
9ede94676b5e0fcd034c049eb0f700eff041c09a
|
[
"Apache-2.0"
] | null | null | null |
Main.py
|
matthiasamberg/TigerJython---The-fantastic-Elevator-Game
|
9ede94676b5e0fcd034c049eb0f700eff041c09a
|
[
"Apache-2.0"
] | null | null | null |
import globalvars
from gamestate import *
import random
# das ist der Code aus Beispiel3.py
# Der Lift fährt zufällig irgend ein Stockwerk an
def play():
obersterStock = getNumFloors() - 1
zufaelligerStock = random.randint(0, obersterStock)
setElevatorDestination(zufaelligerStock)
#------------ bitte drin lassen (aber anderweitig ignorieren)
execfile ("functions.py")
| 20.473684
| 61
| 0.737789
|
dc6ececbeeb4c08a0114e1d19dba2ae3a875ff64
| 17,474
|
py
|
Python
|
aldryn_search/tests.py
|
nephila/aldryn-search
|
3163f9abefc44882216e4003f19b45d431540bbd
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_search/tests.py
|
nephila/aldryn-search
|
3163f9abefc44882216e4003f19b45d431540bbd
|
[
"BSD-3-Clause"
] | 1
|
2020-04-09T10:50:31.000Z
|
2020-04-10T08:21:08.000Z
|
aldryn_search/tests.py
|
nephila/aldryn-search
|
3163f9abefc44882216e4003f19b45d431540bbd
|
[
"BSD-3-Clause"
] | null | null | null |
from django.template import Template
from django.test import TestCase
from cms.api import create_page, add_plugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.placeholdermodel import Placeholder
from cms.models import CMSPlugin, Title
from aldryn_search.search_indexes import TitleIndex
from .helpers import get_plugin_index_data, get_request
from haystack import connections
from haystack.constants import DEFAULT_ALIAS
class FakeTemplateLoader(object):
is_usable = True
def __init__(self, name, dirs):
pass
def __iter__(self):
yield self.__class__
yield "{{baz}}"
class NotIndexedPlugin(CMSPluginBase):
model = CMSPlugin
plugin_content = 'rendered plugin content'
render_template = Template(plugin_content)
def render(self, context, instance, placeholder):
return context
plugin_pool.register_plugin(NotIndexedPlugin)
class HiddenPlugin(CMSPluginBase):
model = CMSPlugin
plugin_content = 'never search for this content'
render_template = Template(plugin_content)
def render(self, context, instance, placeholder):
return context
plugin_pool.register_plugin(HiddenPlugin)
class BaseTestCase(TestCase):
def setUp(self):
pass
def get_title_index(self):
search_conn = connections[DEFAULT_ALIAS]
unified_index = search_conn.get_unified_index()
index = unified_index.get_index(Title)
return index
class PluginIndexingTests(BaseTestCase):
def setUp(self):
self.index = TitleIndex()
self.request = get_request(language='en')
def get_plugin(self):
instance = CMSPlugin(
language='en',
plugin_type="NotIndexedPlugin",
placeholder=Placeholder(id=1235)
)
instance.cmsplugin_ptr = instance
instance.pk = 1234 # otherwise plugin_meta_context_processor() crashes
return instance
def test_plugin_indexing_is_enabled_by_default(self):
cms_plugin = self.get_plugin()
indexed_content = self.index.get_plugin_search_text(cms_plugin, self.request)
self.assertEqual(NotIndexedPlugin.plugin_content, indexed_content)
def test_plugin_indexing_can_be_disabled_on_model(self):
cms_plugin = self.get_plugin()
cms_plugin.search_fulltext = False
indexed_content = self.index.get_plugin_search_text(cms_plugin, self.request)
self.assertEqual('', indexed_content)
def test_plugin_indexing_can_be_disabled_on_plugin(self):
NotIndexedPlugin.search_fulltext = False
try:
self.assertEqual('', self.index.get_plugin_search_text(self.get_plugin(), self.request))
finally:
del NotIndexedPlugin.search_fulltext
def test_page_title_is_indexed_using_prepare(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="home", template="page.html", language="en")
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('home', indexed['title'])
self.assertEqual('home', indexed['text'])
def test_page_title_is_indexed_using_update_object(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="home", template="page.html", language="en")
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('home', indexed['title'])
self.assertEqual('home', indexed['text'])
class PluginFilterIndexingTests(BaseTestCase):
def test_page_title_is_indexed_using_prepare_with_filter_option(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="test_page", reverse_id='testpage', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('test_page', indexed['title'])
self.assertEqual('test_page rendered plugin content', indexed['text'])
def test_page_title_is_indexed_using_update_object_with_filter_option(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="test_page", reverse_id='testpage', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('test_page', indexed['title'])
self.assertEqual('test_page rendered plugin content', indexed['text'])
class PluginExcludeAndFilterIndexingTests2(BaseTestCase):
def test_page_title_is_indexed_using_prepare_with_excluding_filter_option2(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="test_page2", reverse_id='testpage2', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('test_page2', indexed['title'])
self.assertEqual('test_page2 rendered plugin content never search for this content', indexed['text'])
def test_page_title_is_indexed_using_update_object_with_excluding_filter_option2(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="test_page2", reverse_id='testpage2', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('test_page2', indexed['title'])
self.assertEqual('test_page2 rendered plugin content never search for this content', indexed['text'])
class PluginExcludeAndFilterIndexingTests3(BaseTestCase):
def test_page_title_is_indexed_using_prepare_with_excluding_filter_option3(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="test_page3", reverse_id='testpage3', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('test_page3', indexed['title'])
self.assertEqual('test_page3', indexed['text'])
def test_page_title_is_indexed_using_update_object_with_excluding_filter_option3(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="test_page3", reverse_id='testpage3', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('test_page3', indexed['title'])
self.assertEqual('test_page3', indexed['text'])
class PluginExcludeAndFilterIndexingTests4(BaseTestCase):
def test_page_title_is_indexed_using_prepare_with_excluding_filter_option4(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="test_page4", reverse_id='testpage4', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('test_page4', indexed['title'])
self.assertEqual('test_page4 rendered plugin content', indexed['text'])
def test_page_title_is_indexed_using_update_object_with_excluding_filter_option4(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="test_page4", reverse_id='testpage4', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('test_page4', indexed['title'])
self.assertEqual('test_page4 rendered plugin content', indexed['text'])
class PluginExcludeAndFilterIndexingTests5(BaseTestCase):
def test_page_title_is_indexed_using_prepare_with_excluding_filter_option5(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="test_page5", reverse_id='testpage5', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('test_page5', indexed['title'])
self.assertEqual('test_page5 never search for this content', indexed['text'])
def test_page_title_is_indexed_using_update_object_with_excluding_filter_option5(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="test_page5", reverse_id='testpage5', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('test_page5', indexed['title'])
self.assertEqual('test_page5 never search for this content', indexed['text'])
class PluginExcludeAndFilterIndexingTests6(BaseTestCase):
def test_page_title_is_indexed_using_prepare_with_excluding_filter_option6(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="test_page6", reverse_id='testpage6', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('test_page6', indexed['title'])
self.assertEqual('test_page6 rendered plugin content never search for this content', indexed['text'])
def test_page_title_is_indexed_using_update_object_with_excluding_filter_option6(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="test_page6", reverse_id='testpage6', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('test_page6', indexed['title'])
self.assertEqual('test_page6 rendered plugin content never search for this content', indexed['text'])
class PluginExcludeAndFilterIndexingTests7(BaseTestCase):
def test_page_title_is_indexed_using_prepare_with_excluding_filter_option7(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="test_page7", reverse_id='testpage7', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('test_page7', indexed['title'])
self.assertEqual('test_page7 never search for this content', indexed['text'])
def test_page_title_is_indexed_using_update_object_with_excluding_filter_option7(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="test_page7", reverse_id='testpage7', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('test_page7', indexed['title'])
self.assertEqual('test_page7 never search for this content', indexed['text'])
class PluginExcludeAndFilterIndexingTests8(BaseTestCase):
def test_page_title_is_indexed_using_prepare_with_excluding_filter_option8(self):
"""This tests the indexing path way used by update_index mgmt command"""
page = create_page(title="test_page8", reverse_id='testpage8', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.index_queryset(DEFAULT_ALIAS) # initialises index._backend_alias
indexed = index.prepare(title)
self.assertEqual('test_page8', indexed['title'])
self.assertEqual('test_page8 rendered plugin content never search for this content', indexed['text'])
def test_page_title_is_indexed_using_update_object_with_excluding_filter_option8(self):
"""This tests the indexing path way used by the RealTimeSignalProcessor"""
page = create_page(title="test_page8", reverse_id='testpage8', template="test.html", language="en")
plugin = add_plugin(page.placeholders.get(slot='content'), NotIndexedPlugin, 'en')
plugin2 = add_plugin(page.placeholders.get(slot='hidden_content'), HiddenPlugin, 'en')
index = self.get_title_index()
title = Title.objects.get(pk=page.title_set.all()[0].pk)
index.update_object(title, using=DEFAULT_ALIAS)
indexed = index.prepared_data
self.assertEqual('test_page8', indexed['title'])
self.assertEqual('test_page8 rendered plugin content never search for this content', indexed['text'])
| 52.951515
| 109
| 0.717294
|
f932544f1d30db2e9d7f616e24bdda7c2d619f49
| 3,263
|
py
|
Python
|
src/saxpy.py
|
Idein/qmkl6
|
ac3e631ca89293c562db24b78576eef66426ab65
|
[
"BSD-3-Clause"
] | 38
|
2020-09-24T08:46:33.000Z
|
2022-03-23T01:40:03.000Z
|
src/saxpy.py
|
Idein/qmkl6
|
ac3e631ca89293c562db24b78576eef66426ab65
|
[
"BSD-3-Clause"
] | 3
|
2021-02-03T08:33:55.000Z
|
2021-08-24T00:04:05.000Z
|
src/saxpy.py
|
Idein/qmkl6
|
ac3e631ca89293c562db24b78576eef66426ab65
|
[
"BSD-3-Clause"
] | 6
|
2020-09-24T13:40:19.000Z
|
2021-06-08T19:35:43.000Z
|
import sys
from videocore6.assembler import assemble, qpu
@qpu
def qpu_saxpy(asm, *, num_qpus, unroll_shift, code_offset,
align_cond=lambda pos: True):
g = globals()
for i, v in enumerate(['length', 'coef', 'x', 'x_inc', 'y_src', 'y_dst',
'y_inc', 'qpu_num']):
g[f'reg_{v}'] = rf[i]
nop(sig=ldunifrf(reg_length))
nop(sig=ldunifrf(reg_coef))
nop(sig=ldunifrf(reg_x))
nop(sig=ldunifrf(reg_x_inc))
nop(sig=ldunifrf(reg_y_src))
nop(sig=ldunifrf(reg_y_inc))
if num_qpus == 1:
num_qpus_shift = 0
mov(reg_qpu_num, 0)
elif num_qpus == 8:
num_qpus_shift = 3
tidx(r0)
shr(r0, r0, 2)
band(reg_qpu_num, r0, 0b1111)
else:
raise Exception('num_qpus must be 1 or 8')
# addr += 4 * (thread_num + 16 * qpu_num) * inc
shl(r0, reg_qpu_num, 4)
eidx(r1)
add(r0, r0, r1)
shl(r0, r0, 2)
umul24(r1, r0, reg_x_inc)
add(reg_x, reg_x, r1).umul24(r1, r0, reg_y_inc)
add(reg_y_src, reg_y_src, r1).add(reg_y_dst, reg_y_src, r1)
# inc *= 4 * 16 * num_qpus
mov(r0, 1)
shl(r0, r0, 6 + num_qpus_shift)
umul24(reg_x_inc, reg_x_inc, r0)
umul24(reg_y_inc, reg_y_inc, r0)
# length /= 16 * 4 * num_qpus * unroll
shr(reg_length, reg_length, 4 + 2 + num_qpus_shift + unroll_shift)
nop(sig=thrsw)
nop()
nop()
while not align_cond(code_offset + len(asm)):
nop()
with loop as l:
unroll = 1 << unroll_shift
for i in range(4):
mov(tmua, reg_x).add(reg_x, reg_x, reg_x_inc)
mov(tmua, reg_y_src).add(reg_y_src, reg_y_src, reg_y_inc)
for j in range(unroll - 1):
for i in range(4):
nop(sig=ldtmu(r0))
fmul(r0, r0, reg_coef, sig=ldtmu(r1))
fadd(tmud, r0, r1)
mov(tmua, reg_y_dst).add(reg_y_dst, reg_y_dst, reg_y_inc)
mov(tmua, reg_x).add(reg_x, reg_x, reg_x_inc)
mov(tmua, reg_y_src).add(reg_y_src, reg_y_src, reg_y_inc)
for i in range(2):
nop(sig=ldtmu(r0))
fmul(r0, r0, reg_coef, sig=ldtmu(r1))
fadd(tmud, r0, r1).mov(r2, 1)
mov(tmua, reg_y_dst).add(reg_y_dst, reg_y_dst, reg_y_inc)
nop(sig=ldtmu(r0))
fmul(r0, r0, reg_coef, sig=ldtmu(r1))
fadd(tmud, r0, r1).sub(reg_length, reg_length, 1, cond='pushz')
mov(tmua, reg_y_dst).add(reg_y_dst, reg_y_dst, reg_y_inc)
nop(sig=ldtmu(r0))
l.b(cond='na0')
fmul(r0, r0, reg_coef, sig=ldtmu(r1)) # delay slot
fadd(tmud, r0, r1) # delay slot
mov(tmua, reg_y_dst).add(reg_y_dst, reg_y_dst, reg_y_inc) # delay slot
barrierid(syncb, sig=thrsw)
nop()
nop()
nop(sig=thrsw)
nop(sig=thrsw)
nop()
nop()
nop(sig=thrsw)
nop()
nop()
nop()
def main():
num_qpus, unroll_shift, code_offset = map(int, sys.argv[1:])
for insn in assemble(qpu_saxpy, num_qpus=num_qpus,
unroll_shift=unroll_shift, code_offset=code_offset):
print(f'UINT64_C({insn:#018x}),')
if __name__ == '__main__':
main()
| 27.191667
| 79
| 0.559301
|
79a8ed34fc24ba926a8d1ec8476d08f5c88b06dd
| 862
|
py
|
Python
|
django_rt/event.py
|
jhannington/django-rt
|
55abd4dbe7ab015039043a724f99aeaced557f37
|
[
"BSD-3-Clause"
] | 1
|
2016-01-04T22:11:08.000Z
|
2016-01-04T22:11:08.000Z
|
django_rt/event.py
|
jhannington/django-rt
|
55abd4dbe7ab015039043a724f99aeaced557f37
|
[
"BSD-3-Clause"
] | null | null | null |
django_rt/event.py
|
jhannington/django-rt
|
55abd4dbe7ab015039043a724f99aeaced557f37
|
[
"BSD-3-Clause"
] | null | null | null |
from django.utils import timezone
from django_rt.utils import SerializableObject
class ResourceEvent(SerializableObject):
def __init__(self, data=None, time=None, event_type=None):
assert data or event_type
self.data = data
self.event_type = event_type
if time:
self.time = time
else:
# Default to current time
self.time = timezone.now()
def serialize(self):
obj = {
'time': self.time
}
if self.data:
obj['data'] = self.data
if self.event_type:
obj['type'] = self.event_type
return obj
@classmethod
def deserialize(cls, data):
return cls(
data=data.get('data', None),
time=data.get('time', None),
event_type=data.get('type', None),
)
| 23.297297
| 62
| 0.553364
|
58807db5d7c0f83fd643d86f7ae2eb39de232484
| 2,338
|
py
|
Python
|
examples/acquiring_data/downloading_hmi.py
|
johan12345/sunpy
|
56e1ab0c2c992f99e0fe3e6bff468b731a51228c
|
[
"BSD-2-Clause"
] | 2
|
2020-01-03T16:39:28.000Z
|
2020-04-24T15:12:08.000Z
|
examples/acquiring_data/downloading_hmi.py
|
johan12345/sunpy
|
56e1ab0c2c992f99e0fe3e6bff468b731a51228c
|
[
"BSD-2-Clause"
] | 1
|
2020-05-11T13:38:56.000Z
|
2020-05-11T13:38:56.000Z
|
examples/acquiring_data/downloading_hmi.py
|
johan12345/sunpy
|
56e1ab0c2c992f99e0fe3e6bff468b731a51228c
|
[
"BSD-2-Clause"
] | null | null | null |
"""
===========================================
Downloading and plotting an HMI magnetogram
===========================================
This example shows how to download a HMI magnetogram data with Fido and make a plot.
"""
import matplotlib.pyplot as plt
import sunpy.map
from sunpy.net import Fido
from sunpy.net import attrs as a
###############################################################################
# To download the required data, we use `sunpy.net.Fido`, a downloader client,
# to query the Virtual Solar Observatory to acquire HMI data.
result = Fido.search(a.Time('2020/01/20 00:00:00', '2020/01/20 00:01:00'),
a.Instrument.hmi, a.Physobs.los_magnetic_field)
###############################################################################
# Now we can see what results we obtained from our search.
print(result)
###############################################################################
# The following shows how to download the results. If we
# don't provide a path it will download the file into the sunpy data directory.
# The output provides the path of the downloaded files.
downloaded_file = Fido.fetch(result)
print(downloaded_file)
###############################################################################
# Now load it into a map and plot it.
# We see that solar North is pointed down instead of up in this image, which is
# indicated by the coordinates (that range from positive to negative, rather
# than negative to positive).
hmi_map = sunpy.map.Map(downloaded_file[0])
fig = plt.figure()
hmi_map.plot()
plt.show()
###############################################################################
# Now rotate the image such that solar North is pointed up.
# We have to do this because the HMI instrument is mounted upside-down
# relative to the AIA instrument on the SDO satellite, which means most
# of the images are taken with solar North pointed up.
# The roll angle of the instrument is reported in the FITS header
# keyword `CROTA2` (see Figure 17 of
# `Couvidat et al. (2016) <https://dx.doi.org/10.1007/s11207-016-0957-3>`_,
# which states that "the nominal CROTA2 for HMI is ≈179.93").
#
# The order keyword, below, specifies the type of interpolation;
# in this case, 3 refers to bi-cubic.
hmi_rotated = hmi_map.rotate(order=3)
hmi_rotated.plot()
plt.show()
| 41.75
| 84
| 0.592387
|
dd1be5d260dafbc7d260929e8328bc4b26e2b577
| 108
|
py
|
Python
|
Python3/POO/exemplo_superclasse.py
|
arthursiq5/programacao-progressiva
|
2ec91602a6f37c93b99e6a92239045cd1c4cef6b
|
[
"MIT"
] | null | null | null |
Python3/POO/exemplo_superclasse.py
|
arthursiq5/programacao-progressiva
|
2ec91602a6f37c93b99e6a92239045cd1c4cef6b
|
[
"MIT"
] | null | null | null |
Python3/POO/exemplo_superclasse.py
|
arthursiq5/programacao-progressiva
|
2ec91602a6f37c93b99e6a92239045cd1c4cef6b
|
[
"MIT"
] | null | null | null |
class A:
def spam(self):
print(1)
class B(A):
def spam(self):
print(2)
super().spam()
B().spam()
| 9
| 16
| 0.564815
|
e4c2e43cc8f1c69f1152efd275820b16bab2810e
| 2,944
|
py
|
Python
|
experiments/training_script.py
|
ameroueh/oaz
|
7cf192b02adaa373b7b93bedae3ef67886ea53af
|
[
"MIT"
] | 8
|
2021-03-18T16:06:42.000Z
|
2022-03-09T10:42:44.000Z
|
experiments/training_script.py
|
ameroueh/oaz
|
7cf192b02adaa373b7b93bedae3ef67886ea53af
|
[
"MIT"
] | null | null | null |
experiments/training_script.py
|
ameroueh/oaz
|
7cf192b02adaa373b7b93bedae3ef67886ea53af
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import os
import sys
import toml
from logzero import setup_logger
from pathlib import Path
from pyoaz.training import Trainer
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Useful for RTX cards
# os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
# turn on C++ logging
# os.environ["OAZ_LOGGING"] = "true"
os.environ["OAZ_LOGGING"] = "false"
def overwrite_config(configuration, args_dict):
for key, config_stage in configuration.items():
try:
config_stage.update(
{
k: v
for k, v in args_dict.items()
if k in config_stage and v is not None
}
)
except AttributeError:
pass
def setup_logging(logfile=None, debug_mode=False):
logger = setup_logger(logfile=logfile)
if debug_mode:
logger.level = logging.DEBUG
else:
logger.level = logging.INFO
return logger
def main(args):
if args.cpu:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
configuration = toml.load(args.configuration_path)
overwrite_config(configuration, vars(args))
save_path = Path(configuration["save"]["save_path"])
save_path.mkdir(exist_ok=True)
logger = setup_logging(
logfile=save_path / "logs.log", debug_mode=args.debug_mode,
)
trainer = Trainer(configuration, load_path=args.load_path, logger=logger)
try:
trainer.train(debug_mode=args.debug_mode)
trainer.save()
except KeyboardInterrupt:
while True:
print(
"\nKeyboard interrupt detected. Would you like to save "
"the current model? y/n"
)
ans = input()
if ans in ["y", "Y", "yes"]:
trainer.save()
sys.exit()
elif ans in ["n", "N", "no"]:
sys.exit()
else:
print("Invalid input, try again")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--configuration_path",
required=True,
help="path to configuration file.",
)
parser.add_argument(
"--load_path",
required=False,
help="path to from which to load the model. By default, this is None "
"which means the script will create a model from scratch.",
default=None,
)
parser.add_argument(
"--save_path",
required=False,
help="path to which the model will be saved.",
)
parser.add_argument(
"--n_generations",
type=int,
required=False,
help="Number of generations for which to train. Default is 5",
)
parser.add_argument("--debug_mode", action="store_true")
parser.add_argument("--cpu", action="store_true")
args = parser.parse_args()
main(args)
| 25.162393
| 78
| 0.594429
|
f6e9c2d73215fd245c31bd17fb54b2e8af766994
| 33,494
|
py
|
Python
|
parlai/scripts/train_model.py
|
Uzornd/ParlAI
|
5c07877f493db75847029ed9906aabc48c4d9f54
|
[
"MIT"
] | 1
|
2021-07-03T06:21:04.000Z
|
2021-07-03T06:21:04.000Z
|
parlai/scripts/train_model.py
|
Uzornd/ParlAI
|
5c07877f493db75847029ed9906aabc48c4d9f54
|
[
"MIT"
] | null | null | null |
parlai/scripts/train_model.py
|
Uzornd/ParlAI
|
5c07877f493db75847029ed9906aabc48c4d9f54
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for ParlAI.
The standard way to train a model. After training, also computes
validation and test error.
The user must provide a model (with `--model`) and a task (with
`--task`).
## Examples
```shell
parlai train_model --model ir_baseline --task dialog_babi:Task:1 --model-file /tmp/model
parlai train_model --model seq2seq --task babi:Task10k:1 --model-file '/tmp/model' --batchsize 32 --learningrate 0.5
```
""" # noqa: E501
# TODO List:
# * More logging (e.g. to files), make things prettier.
import json
import numpy as np
import signal
from typing import Tuple
from parlai.core.metrics import Metric
from parlai.core.agents import create_agent, create_agent_from_shared
from parlai.core.exceptions import StopTrainException
from parlai.core.logs import TensorboardLogger, WandbLogger
from parlai.core.metrics import (
aggregate_named_reports,
aggregate_unnamed_reports,
dict_report,
)
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser, print_announcements
from parlai.core.worlds import create_task, World
from parlai.scripts.build_dict import build_dict, setup_args as setup_dict_args
from parlai.utils.distributed import (
sync_object,
is_primary_worker,
all_gather_list,
is_distributed,
num_workers,
)
from parlai.utils.misc import Timer, nice_report
from parlai.core.script import ParlaiScript, register_script
import parlai.utils.logging as logging
from parlai.utils.io import PathManager
def _num_else_inf(opt: Opt, key: str, distributed_warn=False):
if opt[key] > 0:
if distributed_warn and is_distributed():
nicekey = '--' + key.replace('_', '-')
logging.warning(
f'Using {nicekey} in distributed mode can lead to slowdowns. '
'See https://github.com/facebookresearch/ParlAI/pull/3379 for more info.'
)
value = opt[key]
else:
value = float('inf')
return value
def setup_args(parser=None) -> ParlaiParser:
"""
Build the ParlAI parser, adding command line args if necessary.
:param ParlaiParser parser:
Preexisting parser to append options to. Will be created if needed.
:returns:
the ParlaiParser with CLI options added.
"""
if parser is None:
parser = ParlaiParser(True, True, 'Train a model')
train = parser.add_argument_group('Training Loop Arguments')
train.add_argument(
'-et',
'--evaltask',
help='task to use for valid/test (defaults to the one used for training)',
)
train.add_argument(
'--eval-batchsize',
type=int,
hidden=True,
help='Eval time batch size (defaults to same as -bs)',
)
train.add_argument(
'--eval-dynamic-batching', # FIXME: see https://github.com/facebookresearch/ParlAI/issues/3367
default=None,
type='nonestr',
choices={None, 'off', 'full', 'batchsort'},
help=(
'Set dynamic batching at evaluation time. Set to off for '
'train-only dynamic batching. Set to none (default) to use same '
'setting as --dynamic-batching.'
),
)
train.add_argument(
'--num-workers',
default=0,
type=int,
help='Number of background workers (training only)',
)
train.add_argument('--display-examples', type='bool', default=False, hidden=True)
train.add_argument('-eps', '--num-epochs', type=float, default=-1)
train.add_argument('-ttim', '--max-train-time', type=float, default=-1)
train.add_argument(
'-tstep',
'--max-train-steps',
'--max-lr-steps',
type=int,
default=-1,
help='End training after n model updates',
)
train.add_argument('-ltim', '--log-every-n-secs', type=float, default=-1)
train.add_argument(
'-lstep',
'--log-every-n-steps',
type=int,
default=50,
help='Log every n training steps',
)
train.add_argument(
'-vtim',
'--validation-every-n-secs',
type=float,
default=-1,
help='Validate every n seconds. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-vstep',
'--validation-every-n-steps',
type=int,
default=-1,
help='Validate every n training steps. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-stim',
'--save-every-n-secs',
type=float,
default=-1,
help='Saves the model to model_file.checkpoint after '
'every n seconds (default -1, never).',
)
train.add_argument(
'-sval',
'--save-after-valid',
type='bool',
default=False,
help='Saves the model to model_file.checkpoint after '
'every validation (default %(default)s).',
)
train.add_argument(
'-veps',
'--validation-every-n-epochs',
type=float,
default=-1,
help='Validate every n epochs. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-vme',
'--validation-max-exs',
type=int,
default=-1,
hidden=True,
help='max examples to use during validation (default -1 uses all)',
)
train.add_argument(
'--short-final-eval',
default=False,
hidden=True,
type='bool',
help='If true, obeys --validation-max-exs in the final '
'validation and test evaluations.',
)
train.add_argument(
'-vp',
'--validation-patience',
type=int,
default=10,
help=(
'number of iterations of validation where result'
' does not improve before we stop training'
),
)
train.add_argument(
'-vmt',
'--validation-metric',
default='accuracy',
help='key into report table for selecting best validation',
)
train.add_argument(
'-vmm',
'--validation-metric-mode',
type=str,
choices=['max', 'min'],
help='how to optimize validation metric (max or min)',
)
train.add_argument(
'-vcut',
'--validation-cutoff',
type=float,
default=1.0,
hidden=True,
help='value at which training will stop if exceeded by metric',
)
train.add_argument(
'-lfc',
'--load-from-checkpoint',
type='bool',
default=True,
hidden=True,
help='load model from checkpoint if available',
)
train.add_argument(
'-vshare',
'--validation-share-agent',
default=False,
hidden=True,
help='use a shared copy of the agent for validation. '
'this will eventually default to True, but '
'currently defaults to False.',
)
train.add_argument(
'-mcs',
'--metrics',
type=str,
default='default',
help='list of metrics to show/compute, e.g. all, default,'
'or give a list split by , like '
'ppl,f1,accuracy,hits@1,rouge,bleu'
'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l',
)
train.add_argument(
'-micro',
'--aggregate-micro',
type='bool',
default=False,
help='Report micro-averaged metrics instead of macro averaged metrics.',
recommended=False,
)
TensorboardLogger.add_cmdline_args(parser, partial_opt=None)
WandbLogger.add_cmdline_args(parser, partial_opt=None)
parser = setup_dict_args(parser)
return parser
def load_eval_worlds(agent, opt, datatype):
"""
Create a new eval world for the agent and the given opt.
Overrides the datatype options for doing this. Handles some magic
overrides of other special options for the training script.
:param Agent agent:
The model being trained.
:param Opt opt:
The global CLI opts.
:param string datatype:
The new datatype.
"""
if 'stream' in opt['datatype']:
datatype += ':stream'
opt = opt.copy()
opt['datatype'] = datatype
if opt.get('evaltask'):
# if a different eval task is specified, use it.
opt['task'] = opt['evaltask']
if opt.get('eval_batchsize'):
# override eval time batchsize
opt['batchsize'] = opt['eval_batchsize']
if opt.get('eval_dynamic_batching'):
# FIXME: see issue tracked in https://github.com/facebookresearch/ParlAI/issues/3367
# override eval time dynamic batching settings
eval_dyn_batch = (
None
if opt['eval_dynamic_batching'] == 'off'
else opt['eval_dynamic_batching']
)
opt['dynamic_batching'] = eval_dyn_batch
tasks = opt['task'].split(',')
worlds = []
# possibly load agent
if opt.get('validation_share_agent', False):
valid_agent = create_agent_from_shared(agent.share())
else:
valid_agent = agent
# create worlds
for task in tasks:
task_opt = opt.copy() # copy opt since we edit the task
task_opt['task'] = task
valid_world = create_task(task_opt, valid_agent)
worlds.append(valid_world)
return worlds
class TrainLoop:
"""
TrainLoop contains the core training loop logic.
"""
def __init__(self, opt):
# if python is called from a non-interactive shell, like a bash script,
# it will by-default ignore SIGINTs, and KeyboardInterrupt exceptions are
# not produced. This line brings them back
signal.signal(signal.SIGINT, signal.default_int_handler)
# Possibly load from checkpoint
trainstats_suffix = '.trainstats' # we might load training statistics from here
if (
opt['load_from_checkpoint']
and opt.get('model_file')
and PathManager.exists(opt['model_file'] + '.checkpoint')
):
opt['init_model'] = opt['model_file'] + '.checkpoint'
trainstats_suffix = '.checkpoint.trainstats'
# Possibly build a dictionary (not all models do this).
if not (opt.get('dict_file') or opt.get('model_file')):
raise RuntimeError(
'WARNING: For train_model, please specify either a '
'model_file or dict_file.'
)
if 'dict_file' in opt:
if opt['dict_file'] is None and opt.get('model_file'):
opt['dict_file'] = opt['model_file'] + '.dict'
logging.info("building dictionary first...")
build_dict(opt, skip_if_built=True)
# Create model and assign it to the specified task
self.agent = create_agent(opt)
self.agent.opt.log()
self.world = create_task(opt, self.agent)
# set up timers
self.train_time = Timer()
self.validate_time = Timer()
self.log_time = Timer()
self.save_time = Timer()
self.parleys = 0
self._train_steps = 0
self._last_log_steps = 0
self.update_freq = opt.get('update_freq', 1)
self.max_num_epochs = _num_else_inf(opt, 'num_epochs', distributed_warn=True)
self.max_train_time = _num_else_inf(
opt, 'max_train_time', distributed_warn=True
)
self.max_train_steps = _num_else_inf(opt, 'max_train_steps')
self.log_every_n_secs = _num_else_inf(
opt, 'log_every_n_secs', distributed_warn=True
)
self.log_every_n_steps = _num_else_inf(opt, 'log_every_n_steps')
self.val_every_n_secs = _num_else_inf(
opt, 'validation_every_n_secs', distributed_warn=True
)
self.val_every_n_epochs = _num_else_inf(
opt, 'validation_every_n_epochs', distributed_warn=True
)
self.val_every_n_steps = _num_else_inf(opt, 'validation_every_n_steps')
self.save_every_n_secs = _num_else_inf(
opt, 'save_every_n_secs', distributed_warn=True
)
# smart defaults for --validation-metric-mode
if opt['validation_metric'] in {'loss', 'ppl', 'mean_rank'}:
opt['validation_metric_mode'] = 'min'
elif opt['validation_metric'] in {'accuracy', 'hits@1', 'hits@5', 'f1', 'bleu'}:
opt['validation_metric_mode'] = 'max'
if opt.get('validation_metric_mode') is None:
opt['validation_metric_mode'] = 'max'
self.last_valid_epoch = 0
self._last_valid_steps = 0
self.valid_optim = 1 if opt['validation_metric_mode'] == 'max' else -1
self.train_reports = []
self.valid_reports = []
self.best_valid = None
self.impatience = 0
self.saved = False
self.valid_worlds = None
self.opt = opt
# we may have been preempted, make sure we note that amount
self._preempted_epochs = 0.0
if opt.get('model_file') and PathManager.exists(
opt['model_file'] + trainstats_suffix
):
# looks like we were preempted. make sure we load up our total
# training stats, etc
with PathManager.open(opt['model_file'] + trainstats_suffix) as ts:
obj = json.load(ts)
self.parleys = obj.get('parleys', 0)
self._preempted_epochs = obj.get('total_epochs', 0)
self.train_time.total = obj.get('train_time', 0)
self._train_steps = obj.get('train_steps', 0)
self.impatience = obj.get('impatience', 0)
self.valid_reports = obj.get('valid_reports', [])
if self.valid_reports:
self.last_valid_epoch = self.valid_reports[-1].get(
'total_epochs', 0.0
)
self.train_reports = obj.get('train_reports', [])
if 'best_valid' in obj:
self.best_valid = obj['best_valid']
else:
# old method
if opt.get('model_file') and PathManager.exists(
opt['model_file'] + '.best_valid'
):
with PathManager.open(
opt['model_file'] + ".best_valid", 'r'
) as f:
x = f.readline()
self.best_valid = float(x)
f.close()
if opt['tensorboard_log'] and is_primary_worker():
self.tb_logger = TensorboardLogger(opt)
if opt['wandb_log'] and is_primary_worker():
model = self.agent.model if hasattr(self.agent, 'model') else None
self.wb_logger = WandbLogger(opt, model)
def save_model(self, suffix=None):
"""
Save the model to disk, possibly with a suffix.
"""
if not is_primary_worker():
# never do IO as a non-primary worker
return
if not self.opt.get('model_file'):
# nothing to save to, just exit
return
fn = self.opt['model_file']
if suffix:
fn += suffix
while True:
# don't ever let a ctrl-c interrupt saving
try:
self.agent.save(fn)
self._save_train_stats(suffix)
break
except KeyboardInterrupt:
pass
def _save_train_stats(self, suffix=None):
fn = self.opt['model_file']
if suffix:
fn += suffix
fn += '.trainstats'
with PathManager.open(fn, 'w') as f:
json.dump(
{
'parleys': self.parleys,
'train_time': self.train_time.time(),
'train_steps': self._train_steps,
'total_epochs': self._total_epochs,
'train_reports': self.train_reports,
'valid_reports': self.valid_reports,
'best_valid': self.best_valid,
'impatience': self.impatience,
},
f,
indent=4,
)
def validate(self):
"""
Perform a validation run, checking whether we should stop training.
:return: boolean indicating whether training should stop
:rtype: bool
"""
opt = self.opt
if self.valid_worlds is None:
# we need to load the world now
self.valid_worlds = load_eval_worlds(self.agent, opt, 'valid')
# run evaluation on valid set
valid_report = self._run_eval(
self.valid_worlds, opt, 'valid', opt['validation_max_exs']
)
v = dict_report(valid_report)
v['train_time'] = self.train_time.time()
v['parleys'] = self.parleys
v['train_steps'] = self._train_steps
v['total_exs'] = self._total_exs
v['total_epochs'] = self._total_epochs
self.valid_reports.append(v)
# logging
if opt['tensorboard_log'] and is_primary_worker():
valid_report['total_exs'] = self._total_exs
self.tb_logger.log_metrics('valid', self.parleys, valid_report)
# flush on a validation
self.tb_logger.flush()
if opt['wandb_log'] and is_primary_worker():
valid_report['total_exs'] = self._total_exs
self.wb_logger.log_metrics('valid', self.parleys, valid_report)
# send valid metrics to agent if the agent wants them
if hasattr(self.agent, 'receive_metrics'):
self.agent.receive_metrics(valid_report)
# check which metric to look at
new_valid = valid_report[opt['validation_metric']]
if isinstance(new_valid, Metric):
new_valid = new_valid.value()
# check if this is the best validation so far
if (
self.best_valid is None
or self.valid_optim * new_valid > self.valid_optim * self.best_valid
):
logging.success(
'new best {}: {:.4g}{}'.format(
opt['validation_metric'],
new_valid,
' (previous best was {:.4g})'.format(self.best_valid)
if self.best_valid is not None
else '',
)
)
self.best_valid = new_valid
self.impatience = 0
if opt.get('model_file') and is_primary_worker():
logging.info(f"saving best valid model: {opt['model_file']}")
self.save_model()
self.saved = True
if (
opt['validation_metric_mode'] == 'max'
and self.best_valid >= opt['validation_cutoff']
) or (
opt['validation_metric_mode'] == 'min'
and self.best_valid <= opt['validation_cutoff']
):
logging.info('task solved! stopping.')
return True
else:
self.impatience += 1
logging.report(
'did not beat best {}: {} impatience: {}'.format(
opt['validation_metric'], round(self.best_valid, 4), self.impatience
)
)
self.validate_time.reset()
# saving
if (
opt.get('model_file')
and opt.get('save_after_valid')
and is_primary_worker()
):
logging.info(f"saving model checkpoint: {opt['model_file']}.checkpoint")
self.save_model('.checkpoint')
# check if we are out of patience
if (
opt['validation_patience'] > 0
and self.impatience >= opt['validation_patience']
):
logging.info('ran out of patience! stopping training.')
return True
return False
def _run_single_eval(self, opt, valid_world, max_exs):
# run evaluation on a single world
valid_world.reset()
cnt = 0
max_cnt = max_exs if max_exs > 0 else float('inf')
while not valid_world.epoch_done() and cnt < max_cnt:
valid_world.parley()
if cnt == 0 and opt['display_examples']:
print(valid_world.display() + '\n~~')
print(valid_world.report())
cnt = valid_world.report().get('exs') or 0
valid_report = valid_world.report()
if opt.get('validation_share_agent', False):
valid_world.reset() # make sure world doesn't remember valid data
return valid_report
def _run_eval(self, valid_worlds, opt, datatype, max_exs=-1, write_log=False):
"""
Eval on validation/test data.
:param valid_world:
list of the pre-created validation worlds.
:param opt:
the options that specific the task, eval_task, etc
:param datatype:
the datatype to use, such as "valid" or "test"
:param bool write_log:
specifies to write metrics to file if the model_file is set
:param int max_exs:
limits the number of examples if max_exs > 0
"""
logging.info(f'running eval: {datatype}')
timer = Timer()
reports = []
max_exs_per_worker = max_exs / (len(valid_worlds) * num_workers())
for v_world in valid_worlds:
task_report = self._run_single_eval(opt, v_world, max_exs_per_worker)
reports.append(task_report)
tasks = [world.getID() for world in valid_worlds]
named_reports = dict(zip(tasks, reports))
report = aggregate_named_reports(
named_reports, micro_average=self.opt.get('aggregate_micro', False)
)
# get the results from all workers
report = self._sync_metrics(report)
metrics = f'{datatype}:\n{nice_report(report)}\n'
logging.info(f'eval completed in {timer.time():.2f}s')
logging.report(metrics)
# write to file
if write_log and opt.get('model_file') and is_primary_worker():
# Write out metrics
with PathManager.open(opt['model_file'] + '.' + datatype, 'a') as f:
f.write(f'{metrics}\n')
return report
def _sync_metrics(self, metrics):
"""
Sync training metrics across workers.
A handful of special cases are handled as exceptions, and the remaining metrics
are simply averaged across workers.
"""
if not is_distributed():
# nothing special needed
return metrics
all_versions = all_gather_list(metrics)
return aggregate_unnamed_reports(all_versions)
def _compute_eta(
self, epochs_completed: float, time_elapsed: float, steps_taken: int
):
"""
Compute the estimated seconds remaining in training.
:param float epochs_completed: number of epochs already completed.
:param float time_elapsed: total time spent already, in seconds.
:return: ETA in seconds, or None if not computable
"""
# start off with no estimate
eta = None
# Determine time_left and num_epochs
max_epochs = self.opt.get('num_epochs', 0)
if max_epochs > 0 and epochs_completed > 0:
epoch_progress = epochs_completed / max_epochs
eta = (1 - epoch_progress) * time_elapsed / epoch_progress
max_training_time = self.opt.get('max_training_time', -1)
if max_training_time > 0:
time_left = max_training_time - time_elapsed
if eta is None or time_left < eta:
eta = time_left
max_train_steps = self.opt.get('max_train_steps', -1)
if max_train_steps > 0 and steps_taken > 0:
steps_progress = steps_taken / max_train_steps
eta = (1 - steps_progress) * time_elapsed / steps_progress
return eta
def _get_time(self, world: World) -> Tuple[float, float, float]:
"""
Return train, log, and validate timing.
If relying on the time for validation/logging/max train time purposes,
we sync and return primary worker's time.
Otherwise, it's not super relevant what we do here.
**SIDE EFFECT**: Update _total_epochs trained.
:param world:
current running world
:return (train, log, valid):
return time for each of train, log, and validation
"""
if (
self.max_train_time < float('inf')
or self.log_every_n_secs < float('inf')
or self.val_every_n_secs < float('inf')
or self.val_every_n_epochs < float('inf')
or self.max_num_epochs < float('inf')
):
self._total_epochs = self._preempted_epochs + sum(
all_gather_list(world.get_total_epochs())
)
train_time, log_time, validate_time = sync_object(
(
self.train_time.time(),
self.log_time.time(),
self.validate_time.time(),
)
)
else:
train_time, log_time, validate_time = (
self.train_time.time(),
self.log_time.time(),
self.validate_time.time(),
)
self._total_epochs = self._preempted_epochs + (
num_workers() * world.get_total_epochs()
)
return train_time, log_time, validate_time
def log(self):
"""
Output a training log entry.
"""
opt = self.opt
if opt['display_examples']:
print(self.world.display() + '\n~~')
logs = []
# get report
train_report = self.world.report()
train_report = self._sync_metrics(train_report)
self.world.reset_metrics()
train_report_trainstats = dict_report(train_report)
train_report_trainstats['total_epochs'] = self._total_epochs
train_report_trainstats['total_exs'] = self._total_exs
train_report_trainstats['parleys'] = self.parleys
train_report_trainstats['train_steps'] = self._train_steps
train_report_trainstats['train_time'] = self.train_time.time()
self.train_reports.append(train_report_trainstats)
# time elapsed
logs.append(f'time:{self.train_time.time():.0f}s')
logs.append(f'total_exs:{self._total_exs}')
logs.append(f'total_steps:{self._train_steps}')
if self._total_epochs >= 0:
# only if it's unbounded
logs.append(f'epochs:{self._total_epochs:.2f}')
time_left = self._compute_eta(
self._total_epochs, self.train_time.time(), self._train_steps
)
if time_left is not None:
logs.append(f'time_left:{max(0,time_left):.0f}s')
log = '{}\n{}\n'.format(' '.join(logs), nice_report(train_report))
logging.info(log)
self.log_time.reset()
self._last_log_steps = 0
if opt['tensorboard_log'] and is_primary_worker():
self.tb_logger.log_metrics('train', self.parleys, train_report)
if opt['wandb_log'] and is_primary_worker():
self.wb_logger.log_metrics('train', self.parleys, train_report)
return train_report
def train_steps(self):
"""
Core training loop.
Yields a metrics dict with each log.
"""
logging.info('training...')
opt = self.opt
world = self.world
with world:
while True:
# do one example / batch of examples
try:
world.parley()
except StopTrainException as e:
logging.info(f"Stopping from {e}")
break
self.parleys += 1
self._train_steps = self.parleys // self.update_freq
self._last_log_steps += 1 / self.update_freq
# the following additionally updates self._total_epochs
train_time, log_time, validate_time = self._get_time(world)
# get the total training examples done, compute epochs
exs_per_epoch = world.num_examples()
self._total_exs = int(np.round(self._total_epochs * exs_per_epoch))
# check counters and timers
if self._total_epochs >= self.max_num_epochs:
yield self.log()
logging.info(
f'num_epochs completed:{self.max_num_epochs} time elapsed:{train_time}s'
)
break
if train_time > self.max_train_time:
logging.info(f'max_train_time elapsed:{train_time}s')
break
if self._train_steps >= self.max_train_steps:
logging.info(
f'max_train_steps elapsed:{self._train_steps} '
f'time elapsed:{train_time}s'
)
break
if (
log_time > self.log_every_n_secs
or self._last_log_steps >= self.log_every_n_steps
):
yield self.log()
if (
validate_time > self.val_every_n_secs
or self._total_epochs - self.last_valid_epoch
>= self.val_every_n_epochs
or self._train_steps - self._last_valid_steps
>= self.val_every_n_steps
):
try:
# log before we validate
if self._last_log_steps:
yield self.log()
world.reset_metrics()
stop_training = self.validate()
except StopTrainException:
break
# reset the log time because we logged right before validating
self.log_time.reset()
self.last_valid_epoch = self._total_epochs
self._last_valid_steps = self._train_steps
if stop_training:
break
# make sure metrics are clean before we log
world.reset_metrics()
if (
self.save_time.time() > self.save_every_n_secs
and opt.get('model_file')
and is_primary_worker()
):
logging.info(
f"saving model checkpoint: {opt['model_file']}.checkpoint"
)
if opt['tensorboard_log'] and is_primary_worker():
self.tb_logger.flush()
self.save_model('.checkpoint')
self.save_time.reset()
if not self.saved and is_primary_worker():
# save agent
self.save_model()
# there's a rare edge case where the we never saved the model, and we try
# # to reload it. This sync_object ensures all workers wait for the primary
# worker to finish flushing before loading from disk.
sync_object(None)
if opt.get('model_file'):
# clean up all our memory, just to make sure we don't OOM on GPU when
# reloading the world
del world
del self.world
del self.agent
del self.valid_worlds
# reload best validation model
self.agent = create_agent(opt)
def train(self):
"""
Perform a training run.
:return: tuple of reports (validation_report, test_report)
"""
opt = self.opt
for _train_log in self.train_steps():
# we've already done what we need in these
pass
# perform final validation/testing
valid_worlds = load_eval_worlds(self.agent, opt, 'valid')
max_exs = opt['validation_max_exs'] if opt.get('short_final_eval') else -1
v_report = self._run_eval(valid_worlds, opt, 'valid', max_exs, write_log=True)
test_worlds = load_eval_worlds(self.agent, opt, 'test')
t_report = self._run_eval(test_worlds, opt, 'test', max_exs, write_log=True)
if opt['wandb_log'] and is_primary_worker():
self.wb_logger.log_final('valid', v_report)
self.wb_logger.log_final('test', t_report)
self.wb_logger.finish()
if valid_worlds:
for valid_world in valid_worlds:
valid_world.shutdown()
if test_worlds:
for test_world in test_worlds:
test_world.shutdown()
print_announcements(opt)
return v_report, t_report
@register_script('train_model', aliases=['tm', 'train'])
class TrainModel(ParlaiScript):
@classmethod
def setup_args(cls):
return setup_args()
def run(self):
self.train_loop = TrainLoop(self.opt)
return self.train_loop.train()
if __name__ == '__main__':
TrainModel.main()
| 35.594049
| 116
| 0.574521
|
8159414b0b5798cb7af900e1ffe7ad8a0d95133f
| 5,925
|
py
|
Python
|
threedi_api_client/openapi/models/organisation.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | null | null | null |
threedi_api_client/openapi/models/organisation.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | 16
|
2021-05-31T09:52:04.000Z
|
2022-03-14T16:07:19.000Z
|
threedi_api_client/openapi/models/organisation.py
|
nens/threedi-api-client
|
43b0eb1bd47310b1783f87f6ad8bfbfe0fb4d90a
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
3Di API
3Di simulation API (latest stable version: v3) Framework release: 2.9.0 3Di core release: 2.2.2 deployed on: 11:01AM (UTC) on January 11, 2022 # noqa: E501
The version of the OpenAPI document: v3
Contact: info@nelen-schuurmans.nl
Generated by: https://openapi-generator.tech
"""
import logging
import pprint
import re # noqa: F401
import six
from threedi_api_client.openapi.configuration import Configuration
logger = logging.getLogger(__name__)
class Organisation(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'url': 'str',
'unique_id': 'str',
'name': 'str'
}
attribute_map = {
'url': 'url',
'unique_id': 'unique_id',
'name': 'name'
}
def __init__(self, url=None, unique_id=None, name=None, local_vars_configuration=None): # noqa: E501
"""Organisation - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._url = None
self._unique_id = None
self._name = None
self.discriminator = None
if url is not None:
self.url = url
self.unique_id = unique_id
self.name = name
@property
def url(self):
"""Gets the url of this Organisation. # noqa: E501
:return: The url of this Organisation. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Organisation.
:param url: The url of this Organisation. # noqa: E501
:type: str
"""
self._url = url
@property
def unique_id(self):
"""Gets the unique_id of this Organisation. # noqa: E501
:return: The unique_id of this Organisation. # noqa: E501
:rtype: str
"""
return self._unique_id
@unique_id.setter
def unique_id(self, unique_id):
"""Sets the unique_id of this Organisation.
:param unique_id: The unique_id of this Organisation. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and unique_id is None: # noqa: E501
raise ValueError("Invalid value for `unique_id`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
unique_id is not None and len(unique_id) > 32):
raise ValueError("Invalid value for `unique_id`, length must be less than or equal to `32`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
unique_id is not None and len(unique_id) < 1):
raise ValueError("Invalid value for `unique_id`, length must be greater than or equal to `1`") # noqa: E501
self._unique_id = unique_id
@property
def name(self):
"""Gets the name of this Organisation. # noqa: E501
:return: The name of this Organisation. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Organisation.
:param name: The name of this Organisation. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) > 255):
raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Organisation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Organisation):
return True
return self.to_dict() != other.to_dict()
| 31.184211
| 166
| 0.589536
|
6256cf05cba907dc073aa2a239b01a7e67139fde
| 493
|
py
|
Python
|
2021/11/day11.py
|
caryhooper/adventofcode
|
70d142780a7d8c742c97448fb2287abe735c2f9c
|
[
"MIT"
] | null | null | null |
2021/11/day11.py
|
caryhooper/adventofcode
|
70d142780a7d8c742c97448fb2287abe735c2f9c
|
[
"MIT"
] | null | null | null |
2021/11/day11.py
|
caryhooper/adventofcode
|
70d142780a7d8c742c97448fb2287abe735c2f9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#Advent of Code Day 11 "Dumbo Octopus"
import sys
sys.path.append("..")
import submarine
sub = submarine.Submarine()
#Part1
file = open("input.txt","r")
nav = sub.navigator.octopus_navigator
nav.initialize_octopi(file.readlines())
file.close()
while nav.steps != 100:
nav.move_one_step()
print(nav.grid)
print(f"The number of flashes was {nav.flash_count}")
#Part 2
while nav.first_simultaenous_flash_step == 0:
nav.move_one_step()
print(nav.grid)
| 18.961538
| 53
| 0.720081
|
ac253bddabf4e46021811ca44ae8b066bbfdde6d
| 592
|
py
|
Python
|
app/__init__.py
|
mary-wan/News
|
14a5e322f2d89c1615b75d1738d1f7a821224965
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
mary-wan/News
|
14a5e322f2d89c1615b75d1738d1f7a821224965
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
mary-wan/News
|
14a5e322f2d89c1615b75d1738d1f7a821224965
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_bootstrap import Bootstrap #initialise boostrap
from config import config_options
bootstrap = Bootstrap()
def create_app(config_name):
# Initializing application
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
bootstrap.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# setting config
from .request import configure_request
configure_request(app)
return app
| 19.733333
| 60
| 0.756757
|
6c04efdc7eef1efcd89ef131a52e7f785e8aa511
| 711
|
py
|
Python
|
prototypes/ssh_gdb_expl_pwn_tools.py
|
brerodrigues/exploit_drafts
|
643bca8d13b44f99205fe614beda59c040a431c2
|
[
"MIT"
] | 1
|
2022-01-01T20:33:44.000Z
|
2022-01-01T20:33:44.000Z
|
prototypes/ssh_gdb_expl_pwn_tools.py
|
brerodrigues/exploit_drafts
|
643bca8d13b44f99205fe614beda59c040a431c2
|
[
"MIT"
] | null | null | null |
prototypes/ssh_gdb_expl_pwn_tools.py
|
brerodrigues/exploit_drafts
|
643bca8d13b44f99205fe614beda59c040a431c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from pwn import *
get_flag = True
chall_name = 'babymem'
def run_with_gdb(p):
gdb.attach(p,'''
b *main''')
run_exploit(p)
def run_exploit(p, debug):
if debug:
run_with_gdb(p)
payload_size = '1'
overflow = 'A' * 1
print(p.recv(timeout=2).decode())
p.sendline(payload_size)
print(p.recv(timeout=2).decode())
p.sendline(overflow)
p.interactive()
if get_flag:
ssh_connection = ssh(host='ssh.college',user='hacker')
process_full_path = '/challenge/' + chall_name
p = ssh_connection.run(process_full_path)
run_exploit(p, False)
ssh_connection.close()
else:
p = process('./' + chall_name)
run_exploit(p, False)
| 21.545455
| 58
| 0.644163
|
7264f6b633427e9ea9d50f4a8b28c0358370ed27
| 1,229
|
py
|
Python
|
utils/transforms.py
|
garvm7/transunet_pytorch
|
277c42d182ab9606607b0db782f0d00b55f06760
|
[
"MIT"
] | null | null | null |
utils/transforms.py
|
garvm7/transunet_pytorch
|
277c42d182ab9606607b0db782f0d00b55f06760
|
[
"MIT"
] | null | null | null |
utils/transforms.py
|
garvm7/transunet_pytorch
|
277c42d182ab9606607b0db782f0d00b55f06760
|
[
"MIT"
] | null | null | null |
import cv2
import torch
import random
import numpy as np
def flip_horizontal(img, mask):
img = np.flip(img, axis=1)
mask = np.flip(mask, axis=1)
return img, mask
def rotate(img, mask, angle_abs=5):
h, w, _ = img.shape
angle = random.choice([angle_abs, -angle_abs])
M = cv2.getRotationMatrix2D((h, w), angle, 1.0)
img = cv2.warpAffine(img, M, (h, w), flags=cv2.INTER_CUBIC)
mask = cv2.warpAffine(mask, M, (h, w), flags=cv2.INTER_CUBIC)
mask = np.expand_dims(mask, axis=-1)
return img, mask
class RandomAugmentation:
augmentations = [flip_horizontal, rotate]
def __init__(self, max_augment_count):
if max_augment_count <= len(self.augmentations):
self.max_augment_count = max_augment_count
else:
self.max_augment_count = len(self.augmentations)
def __call__(self, sample):
img, mask = sample['image'], sample['label']
augmentation_count = random.randint(0, self.max_augment_count)
selected_augmentations = random.sample(self.augmentations, k=augmentation_count)
for augmentation in selected_augmentations:
img, mask = augmentation(img, mask)
return {'img': img, 'mask': mask}
| 29.261905
| 88
| 0.664768
|
5df9c3cdcd80dc5522ebfd390020557a9938e8bf
| 380
|
py
|
Python
|
queryset_client/tests/base/tests/__init__.py
|
pulina/tastypie-queryset-client
|
1d65ee387e256b78c44fd8be57fa64ca798ae2da
|
[
"MIT"
] | null | null | null |
queryset_client/tests/base/tests/__init__.py
|
pulina/tastypie-queryset-client
|
1d65ee387e256b78c44fd8be57fa64ca798ae2da
|
[
"MIT"
] | 1
|
2016-12-22T10:55:44.000Z
|
2016-12-22T10:55:44.000Z
|
queryset_client/tests/base/tests/__init__.py
|
pulina/tastypie-queryset-client
|
1d65ee387e256b78c44fd8be57fa64ca798ae2da
|
[
"MIT"
] | 5
|
2015-04-27T11:50:28.000Z
|
2019-01-10T06:39:57.000Z
|
import warnings
warnings.simplefilter('ignore', Warning)
from .count import *
from .create import *
from .exists import *
from .foreignkey import *
from .get_or_create import *
from .latest import *
from .lazy import *
from .len import *
from .many_to_many import *
from .model import *
from .order_by import *
from .paginator import *
from .slice import *
from .strict import *
| 21.111111
| 40
| 0.75
|
fd6128a735e6e4719d1d198d23c02b6aa131a5c6
| 18,894
|
py
|
Python
|
model/code/main.py
|
xinyadu/doc_event_role
|
d43cbba14d433f009e9bef9ceff31c5de1cb5127
|
[
"MIT"
] | 47
|
2020-07-06T07:32:18.000Z
|
2022-02-14T08:15:04.000Z
|
model/code/main.py
|
zhichao-stone/doc_event_role
|
d43cbba14d433f009e9bef9ceff31c5de1cb5127
|
[
"MIT"
] | 4
|
2020-08-11T06:44:11.000Z
|
2021-09-02T03:26:16.000Z
|
model/code/main.py
|
zhichao-stone/doc_event_role
|
d43cbba14d433f009e9bef9ceff31c5de1cb5127
|
[
"MIT"
] | 13
|
2020-07-06T09:16:38.000Z
|
2021-12-16T06:37:54.000Z
|
from __future__ import print_function
import time
import sys
import argparse
import random
import torch
import gc
import torch.nn as nn
import torch.optim as optim
import numpy as np
from utils.metric import get_ner_fmeasure
from model.seqlabel import SeqLabel
# from model.sentclassifier import SentClassifier
from utils.data import Data
# from eval import get_macro_avg
from eval_no_duplicate import get_macro_avg
try:
import cPickle as pickle
except ImportError:
import pickle
seed_num = 42
random.seed(seed_num)
torch.manual_seed(seed_num)
np.random.seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic=True
def data_initialization(data):
data.initial_feature_alphabets()
data.build_alphabet(data.train_dir)
data.build_alphabet(data.dev_dir)
data.build_alphabet(data.test_dir)
data.fix_alphabet()
def predict_check(pred_variable, gold_variable, mask_variable):
"""
input:
pred_variable (batch_size, sent_len): pred tag result, in numpy format
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
right_token = np.sum(overlaped * mask)
total_token = mask.sum()
# print("right: %s, total: %s"%(right_token, total_token))
return right_token, total_token
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred_variable = pred_variable[word_recover]
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
def recover_nbest_label(pred_variable, mask_variable, label_alphabet, word_recover):
"""
input:
pred_variable (batch_size, sent_len, nbest): pred tag result
mask_variable (batch_size, sent_len): mask variable
word_recover (batch_size)
output:
nbest_pred_label list: [batch_size, nbest, each_seq_len]
"""
# exit(0)
pred_variable = pred_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = pred_variable.size(0)
seq_len = pred_variable.size(1)
nbest = pred_variable.size(2)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
for idx in range(batch_size):
pred = []
for idz in range(nbest):
each_pred = [label_alphabet.get_instance(pred_tag[idx][idy][idz]) for idy in range(seq_len) if mask[idx][idy] != 0]
pred.append(each_pred)
pred_label.append(pred)
return pred_label
def lr_decay(optimizer, epoch, decay_rate, init_lr):
lr = init_lr/(1+decay_rate*epoch)
print(" Learning rate is set as:", lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def evaluate(data, model, name, nbest=None):
if name == "train":
instances = data.train_Ids
elif name == "dev":
instance_texts, instances = data.dev_texts, data.dev_Ids
elif name == 'test':
instance_texts, instances = data.test_texts, data.test_Ids
elif name == 'raw':
instance_texts, instances = data.raw_texts, data.raw_Ids
else:
print("Error: wrong evaluate name,", name)
exit(1)
right_token = 0
whole_token = 0
nbest_pred_results = []
pred_results = []
gold_results = []
sequences, doc_ids = [], []
## set model in eval model
model.eval()
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end > train_num:
end = train_num
instance = instances[start:end]
instance_text = instance_texts[start:end]
if not instance:
continue
batch_word, batch_wordlen, batch_wordrecover, list_sent_words_tensor, batch_label, mask = batchify_sequence_labeling_with_label(instance, data.HP_gpu, False)
tag_seq = model(batch_word, batch_wordlen, list_sent_words_tensor, mask)
pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)
pred_results += pred_label
gold_results += gold_label
sequences += [item[0] for item in instance_text]
doc_ids += [item[-1] for item in instance_text]
# import ipdb; ipdb.set_trace()
decode_time = time.time() - start_time
speed = len(instances)/decode_time
# acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme)
p, r, f = get_macro_avg(sequences, pred_results, doc_ids)
return speed, p, r, f, pred_results
def batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train=True):
"""
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
labels: label ids for one sentence. (batch_size, sent_len)
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
word_seq_lengths: (batch_size,1) Tensor
label_seq_tensor: (batch_size, max_sent_len)
mask: (batch_size, max_sent_len)
"""
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
sent_words = [sent[1] for sent in input_batch_list]
labels = [sent[2] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
for idx, (seq, label, seqlen) in enumerate(zip(words, labels, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
list_sent_words_tensor = []
for sent_words_one_example in sent_words:
one_example_list = []
for sent in sent_words_one_example:
sent_tensor = torch.zeros((1, len(sent)), requires_grad = if_train).long()
sent_tensor[0, :len(sent)] = torch.LongTensor(sent)
if gpu:
one_example_list.append(sent_tensor.cuda())
else:
one_example_list.append(sent_tensor)
list_sent_words_tensor.append(one_example_list)
word_perm_idx = word_perm_idx.data.numpy().tolist()
list_sent_words_tensor_perm = []
for idx in word_perm_idx:
list_sent_words_tensor_perm.append(list_sent_words_tensor[idx])
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
mask = mask.cuda()
return word_seq_tensor, word_seq_lengths, word_seq_recover, list_sent_words_tensor_perm, label_seq_tensor, mask
def train(data):
print("Training model...")
data.show_data_summary()
save_data_name = data.model_dir +".dset"
data.save(save_data_name)
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
if data.optimizer.lower() == "sgd":
optimizer = optim.SGD(model.parameters(), lr=data.HP_lr, momentum=data.HP_momentum,weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adagrad":
optimizer = optim.Adagrad(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adadelta":
optimizer = optim.Adadelta(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "rmsprop":
optimizer = optim.RMSprop(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adam":
optimizer = optim.Adam(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
else:
print("Optimizer illegal: %s"%(data.optimizer))
exit(1)
best_dev = -10
# data.HP_iteration = 1
## start training
for idx in range(data.HP_iteration):
epoch_start = time.time()
temp_start = epoch_start
print("\nEpoch: %s/%s" %(idx,data.HP_iteration))
if data.optimizer == "SGD":
optimizer = lr_decay(optimizer, idx, data.HP_lr_decay, data.HP_lr)
instance_count = 0
sample_id = 0
sample_loss = 0
total_loss = 0
right_token = 0
whole_token = 0
random.shuffle(data.train_Ids)
print("Shuffle: first input word list:", data.train_Ids[0][0])
## set model in train model
model.train()
model.zero_grad()
batch_size = data.HP_batch_size
batch_id = 0
train_num = len(data.train_Ids)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end >train_num:
end = train_num
instance = data.train_Ids[start:end]
if not instance:
continue
batch_word, batch_wordlen, batch_wordrecover, list_sent_words_tensor, batch_label, mask = batchify_sequence_labeling_with_label(instance, data.HP_gpu, True)
instance_count += 1
loss, tag_seq = model.calculate_loss(batch_word, batch_wordlen, list_sent_words_tensor, batch_label, mask)
right, whole = predict_check(tag_seq, batch_label, mask)
right_token += right
whole_token += whole
# print("loss:",loss.item())
sample_loss += loss.item()
total_loss += loss.item()
if end%500 == 0:
temp_time = time.time()
temp_cost = temp_time - temp_start
temp_start = temp_time
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))
if sample_loss > 1e8 or str(sample_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
exit(1)
sys.stdout.flush()
sample_loss = 0
loss.backward()
optimizer.step()
model.zero_grad()
temp_time = time.time()
temp_cost = temp_time - temp_start
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))
epoch_finish = time.time()
epoch_cost = epoch_finish - epoch_start
print("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s"%(idx, epoch_cost, train_num/epoch_cost, total_loss))
print("totalloss:", total_loss)
if total_loss > 1e8 or str(total_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
exit(1)
# continue
speed, p, r, f, _ = evaluate(data, model, "dev")
dev_finish = time.time()
dev_cost = dev_finish - epoch_finish
# if data.seg:
current_score = f
print("Dev: time: %.2fs, speed: %.2fst/s; p: %.4f, r: %.4f, f: %.4f"%(dev_cost, speed, p, r, f))
# else:
# current_score = acc
# print("Dev: time: %.2fs speed: %.2fst/s; acc: %.4f"%(dev_cost, speed, acc))
if current_score > best_dev:
# if data.seg:
print("!!!Exceed previous best f score:", best_dev)
# else:
# print("!!!Exceed previous best acc score:", best_dev)
model_name = data.model_dir +'.'+ str(idx) + ".model"
print("Save current best model in file:", model_name)
torch.save(model.state_dict(), data.model_dir + '.' + 'best' + ".model")
best_dev = current_score
model_name = data.model_dir +'.'+ str(idx) + ".model"
torch.save(model.state_dict(), model_name)
# ## decode test
# speed, acc, p, r, f, _,_ = evaluate(data, model, "test")
# test_finish = time.time()
# test_cost = test_finish - dev_finish
# if data.seg:
# print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(test_cost, speed, acc, p, r, f))
# else:
# print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f"%(test_cost, speed, acc))
gc.collect()
def load_model_decode(data, name):
print("Load Model from file: ", data.model_dir)
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
# model = SeqModel(data)
## load model need consider if the model trained in GPU and load in CPU, or vice versa
# if not gpu:
# model.load_state_dict(torch.load(model_dir))
# # model.load_state_dict(torch.load(model_dir), map_location=lambda storage, loc: storage)
# # model = torch.load(model_dir, map_location=lambda storage, loc: storage)
# else:
# model.load_state_dict(torch.load(model_dir))
# # model = torch.load(model_dir)
model.load_state_dict(torch.load(data.load_model_dir))
# print("Decode %s data, nbest: %s ..."%(name, data.nbest))
start_time = time.time()
speed, p, r, f, pred_results = evaluate(data, model, name, data.nbest)
end_time = time.time()
time_cost = end_time - start_time
# if data.seg:
# print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(name, time_cost, speed, acc, p, r, f))
# else:
# print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f"%(name, time_cost, speed, acc))
return pred_results
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tuning with NCRF++')
# parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--config', help='Configuration File', default='None')
parser.add_argument('--wordemb', help='Embedding for words', default='None')
parser.add_argument('--charemb', help='Embedding for chars', default='None')
parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--savemodel', default="data/model/saved_model.lstmcrf.")
parser.add_argument('--savedset', help='Dir of saved data setting')
parser.add_argument('--train', default="data/conll03/train.bmes")
parser.add_argument('--dev', default="data/conll03/dev.bmes" )
parser.add_argument('--test', default="data/conll03/test.bmes")
parser.add_argument('--seg', default="True")
parser.add_argument('--raw')
parser.add_argument('--loadmodel')
parser.add_argument('--output')
args = parser.parse_args()
data = Data()
data.HP_gpu = torch.cuda.is_available()
if args.config == 'None':
data.train_dir = args.train
data.dev_dir = args.dev
data.test_dir = args.test
data.model_dir = args.savemodel
data.dset_dir = args.savedset
print("Save dset directory:",data.dset_dir)
save_model_dir = args.savemodel
data.word_emb_dir = args.wordemb
data.char_emb_dir = args.charemb
if args.seg.lower() == 'true':
data.seg = True
else:
data.seg = False
print("Seed num:",seed_num)
else:
data.read_config(args.config)
# data.show_data_summary()
status = data.status.lower()
print("Seed num:",seed_num)
if status == 'train':
print("MODEL: train")
data_initialization(data)
data.generate_instance('train')
data.generate_instance('dev')
data.generate_instance('test')
data.build_pretrain_emb()
train(data)
print("\n\n\nMODEL: decode")
data.load(data.dset_dir)
decode_results = load_model_decode(data, 'test')
data.write_decoded_results(decode_results, 'test')
elif status == 'decode':
print("MODEL: decode")
data.load(data.dset_dir)
data.read_config(args.config)
print(data.raw_dir)
data.generate_instance('raw')
print("nbest: %s"%(data.nbest))
decode_results = load_model_decode(data, 'raw')
data.write_decoded_results(decode_results, 'raw')
else:
print("Invalid argument! Please use valid arguments! (train/test/decode)")
| 40.285714
| 173
| 0.643379
|
00b12da890974bced0894f872f15a0eb0808ecfd
| 5,903
|
py
|
Python
|
rest-service/manager_rest/storage/relationships.py
|
TS-at-WS/cloudify-manager
|
3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/storage/relationships.py
|
TS-at-WS/cloudify-manager
|
3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/storage/relationships.py
|
TS-at-WS/cloudify-manager
|
3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc
|
[
"Apache-2.0"
] | null | null | null |
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from .models_base import db
def foreign_key(foreign_key_column,
nullable=False,
index=True,
primary_key=False,
ondelete='CASCADE'):
"""Return a ForeignKey object with the relevant
:param foreign_key_column: Unique id column in the parent table
:param nullable: Should the column be allowed to remain empty
:param index: Should the column be indexed
:param primary_key: Mark column as a primary key column
:param ondelete: If a record in the parent table is deleted, the ondelete
action will affect the corresponding records in the child table
"""
return db.Column(
db.ForeignKey(foreign_key_column, ondelete=ondelete),
nullable=nullable,
index=index,
primary_key=primary_key,
)
def one_to_many_relationship(child_class,
parent_class,
foreign_key_column,
parent_class_primary_key='_storage_id',
backreference=None,
cascade='all',
**relationship_kwargs):
"""Return a one-to-many SQL relationship object
Meant to be used from inside the *child* object
:param parent_class: Class of the parent table
:param child_class: Class of the child table
:param foreign_key_column: The column of the foreign key
:param parent_class_primary_key: The name of the parent's primary key
:param backreference: The name to give to the reference to the child
:param cascade: in what cases to cascade changes from parent to child
"""
backreference = backreference or child_class.__tablename__
parent_primary_key = getattr(parent_class, parent_class_primary_key)
return db.relationship(
parent_class,
primaryjoin=lambda: parent_primary_key == foreign_key_column,
# The following line makes sure that when the *parent* is
# deleted, all its connected children are deleted as well
backref=db.backref(backreference, cascade=cascade),
**relationship_kwargs
)
def many_to_many_relationship(current_class, other_class, table_prefix=None,
primary_key_tuple=False, **relationship_kwargs):
"""Return a many-to-many SQL relationship object
Notes:
1. The backreference name is the current table's table name
2. This method creates a new helper table in the DB
:param current_class: The class of the table we're connecting from
:param other_class: The class of the table we're connecting to
:param table_prefix: Custom prefix for the helper table name and the
backreference name
:param primary_key_tuple: Will make the two foreign keys as primary keys,
which will also index them.
"""
current_table_name = current_class.__tablename__
current_column_name = '{0}_id'.format(current_table_name[:-1])
current_foreign_key = '{0}.{1}'.format(
current_table_name,
current_class.unique_id()
)
other_table_name = other_class.__tablename__
other_column_name = '{0}_id'.format(other_table_name[:-1])
other_foreign_key = '{0}.{1}'.format(
other_table_name,
other_class.unique_id()
)
helper_table_name = '{0}_{1}'.format(
current_table_name,
other_table_name
)
backref_name = current_table_name
if table_prefix:
helper_table_name = '{0}_{1}'.format(table_prefix, helper_table_name)
backref_name = '{0}_{1}'.format(table_prefix, backref_name)
secondary_table = get_secondary_table(
helper_table_name,
current_column_name,
other_column_name,
current_foreign_key,
other_foreign_key,
primary_key_tuple
)
return db.relationship(
other_class,
secondary=secondary_table,
backref=db.backref(backref_name),
**relationship_kwargs
)
def get_secondary_table(helper_table_name,
first_column_name,
second_column_name,
first_foreign_key,
second_foreign_key,
primary_key_tuple):
"""Create a helper table for a many-to-many relationship
:param helper_table_name: The name of the table
:param first_column_name: The name of the first column in the table
:param second_column_name: The name of the second column in the table
:param first_foreign_key: The string representing the first foreign key,
for example `blueprint._storage_id`, or `tenants.id`
:param second_foreign_key: The string representing the second foreign key
:param primary_key_tuple: Will make the two foreign keys as primary keys,
which will also index them.
:return: A Table object
"""
return db.Table(
helper_table_name,
db.Column(
first_column_name,
db.Integer,
db.ForeignKey(first_foreign_key),
primary_key=primary_key_tuple
),
db.Column(
second_column_name,
db.Integer,
db.ForeignKey(second_foreign_key),
primary_key=primary_key_tuple
)
)
| 37.360759
| 78
| 0.666102
|
1b057820f42fd904b0a8eace723d3ffdf926de2d
| 1,063
|
py
|
Python
|
skynet/wallet/wallet_info.py
|
hulatang/skynet-blockchain
|
d7d6f7ec84731c13b9d6d307bb171cf0e266be82
|
[
"Apache-2.0"
] | 7
|
2021-09-07T02:14:15.000Z
|
2022-03-27T06:42:35.000Z
|
skynet/wallet/wallet_info.py
|
hulatang/skynet-blockchain
|
d7d6f7ec84731c13b9d6d307bb171cf0e266be82
|
[
"Apache-2.0"
] | 1
|
2021-10-21T16:38:56.000Z
|
2021-11-15T13:03:15.000Z
|
skynet/wallet/wallet_info.py
|
hulatang/skynet-blockchain
|
d7d6f7ec84731c13b9d6d307bb171cf0e266be82
|
[
"Apache-2.0"
] | 3
|
2021-10-21T07:17:40.000Z
|
2022-03-16T12:57:09.000Z
|
from dataclasses import dataclass
from typing import List
from skynet.util.ints import uint8, uint32
from skynet.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class WalletInfo(Streamable):
"""
This object represents the wallet data as it is stored in DB.
ID: Main wallet (Standard) is stored at index 1, every wallet created after done has auto incremented id.
Name: can be a user provided or default generated name. (can be modified)
Type: is specified during wallet creation and should never be changed.
Data: this filed is intended to be used for storing any wallet specific information required for it.
(RL wallet stores origin_id, admin/user pubkey, rate limit, etc.)
This data should be json encoded string.
"""
id: uint32
name: str
type: uint8 # WalletType(type)
data: str
@dataclass(frozen=True)
@streamable
class WalletInfoBackup(Streamable):
"""
Used for transforming list of WalletInfo objects into bytes.
"""
wallet_list: List[WalletInfo]
| 30.371429
| 109
| 0.735654
|
4f7084b11b786053a15c17657dada906d129d0bf
| 3,226
|
py
|
Python
|
tests/format/optionarch.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
tests/format/optionarch.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
tests/format/optionarch.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pytest
from buildstream import _yaml
from buildstream.exceptions import ErrorDomain, LoadErrorReason
from buildstream.testing.runcli import cli # pylint: disable=unused-import
from tests.testutils import override_platform_uname
# Project directory
DATA_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"machine,value,expected",
[
# Test explicitly provided arches
("arm", "aarch32", "Army"),
("arm", "aarch64", "Aarchy"),
# Test automatically derived arches
("arm", None, "Army"),
("aarch64", None, "Aarchy"),
# Test that explicitly provided arches dont error out
# when the `uname` reported arch is not supported
("i386", "aarch32", "Army"),
("x86_64", "aarch64", "Aarchy"),
],
)
def test_conditional(cli, datafiles, machine, value, expected):
with override_platform_uname(machine=machine):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch")
bst_args = []
if value is not None:
bst_args += ["--option", "machine_arch", value]
bst_args += ["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
result = cli.run(project=project, silent=True, args=bst_args)
result.assert_success()
loaded = _yaml.load_data(result.output)
assert loaded.get_str("result") == expected
@pytest.mark.datafiles(DATA_DIR)
def test_unsupported_arch(cli, datafiles):
with override_platform_uname(machine="x86_64"):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch")
result = cli.run(
project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@pytest.mark.datafiles(DATA_DIR)
def test_alias(cli, datafiles):
with override_platform_uname(machine="arm"):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch-alias")
result = cli.run(
project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
)
result.assert_success()
@pytest.mark.datafiles(DATA_DIR)
def test_unknown_host_arch(cli, datafiles):
with override_platform_uname(machine="x86_128"):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch")
result = cli.run(
project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
)
result.assert_main_error(ErrorDomain.PLATFORM, None)
@pytest.mark.datafiles(DATA_DIR)
def test_unknown_project_arch(cli, datafiles):
project = os.path.join(datafiles.dirname, datafiles.basename, "option-arch-unknown")
result = cli.run(
project=project, silent=True, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]
)
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
| 33.957895
| 111
| 0.66522
|
523b2e6862e62158dc67328b20624d958ad05699
| 7,512
|
py
|
Python
|
budgetportal/webflow/views.py
|
Lunga001/datamanager
|
ebe9ad9db2ee7011855f1249c46d9d1bf6f4c4d1
|
[
"MIT"
] | 3
|
2019-08-31T03:08:22.000Z
|
2020-04-03T13:09:20.000Z
|
budgetportal/webflow/views.py
|
Lunga001/datamanager
|
ebe9ad9db2ee7011855f1249c46d9d1bf6f4c4d1
|
[
"MIT"
] | 97
|
2019-04-16T07:54:38.000Z
|
2022-02-10T07:25:48.000Z
|
budgetportal/webflow/views.py
|
OpenUpSA/budget-portal
|
879c5875b1d438b9287c38d6730c86be69051ac5
|
[
"MIT"
] | 14
|
2019-04-23T09:48:17.000Z
|
2021-04-13T17:48:40.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
import json
import urllib.parse
from slugify import slugify
from budgetportal import models
from budgetportal.csv_gen import Echo
from budgetportal.infra_projects.charts import time_series_data
from budgetportal.json_encoder import JSONEncoder
from django.forms.models import model_to_dict
from django.http.response import StreamingHttpResponse
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from drf_haystack.filters import (
HaystackFacetFilter,
HaystackFilter,
HaystackOrderingFilter,
)
from drf_haystack.mixins import FacetMixin
from drf_haystack.viewsets import HaystackViewSet
from rest_framework.decorators import action
from rest_framework.generics import RetrieveAPIView
from .serializers import (
InfaProjectCSVSnapshotSerializer,
InfraProjectCSVSerializer,
InfraProjectFacetSerializer,
InfraProjectSerializer,
)
def infrastructure_project_list(request):
context = {
"page_title": "Infrastructure project search - vulekamali",
"page_description": "Find infrastructure projects run by national and provincial government.",
"page_data_json": "null",
}
return render(request, "webflow/infrastructure-search-template.html", context)
def infrastructure_project_detail(request, id, slug):
project = get_object_or_404(models.InfraProject, pk=int(id))
snapshot = project.project_snapshots.latest()
page_data = {"project": model_to_dict(snapshot)}
page_data["project"]["irm_snapshot"] = str(snapshot.irm_snapshot)
page_data["project"]["csv_download_url"] = project.csv_download_url
snapshot_list = list(project.project_snapshots.all())
page_data["time_series_chart"] = time_series_data(snapshot_list)
department = models.Department.get_in_latest_government(
snapshot.department, snapshot.government
)
page_data["department_url"] = department.get_url_path() if department else None
page_data["province_depts_url"] = (
"/%s/departments?province=%s&sphere=provincial"
% (models.FinancialYear.get_latest_year().slug, slugify(snapshot.province),)
)
page_data[
"latest_snapshot_financial_year"
] = snapshot.irm_snapshot.sphere.financial_year.slug
context = {
"project": project,
"page_data_json": json.dumps(
page_data, cls=JSONEncoder, sort_keys=True, indent=4
),
"page_title": "%s, %s Infrastructure projects - vulekamali"
% (snapshot.name, snapshot.province),
"page_description": "Infrastructure project by the %s %s department."
% (snapshot.government_label, snapshot.department),
}
return render(request, "webflow/detail_infrastructure-projects.html", context)
class InfraProjectCSVGeneratorMixIn:
def generate_csv_response(self, response_results, filename="export.csv"):
response = StreamingHttpResponse(
streaming_content=self._generate_rows(response_results),
content_type="text/csv",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
def _generate_rows(self, response_results):
headers = InfraProjectCSVSerializer.Meta.fields
writer = csv.DictWriter(Echo(), fieldnames=headers)
yield writer.writerow({h: h for h in headers})
for row in response_results:
yield writer.writerow(row)
class InfaProjectCSVDownload(RetrieveAPIView, InfraProjectCSVGeneratorMixIn):
queryset = models.InfraProject.objects.prefetch_related("project_snapshots")
serializer_class = InfaProjectCSVSnapshotSerializer
def get(self, request, *args, **kwargs):
project = get_object_or_404(self.queryset, id=int(kwargs["id"]))
serializer = self.serializer_class(
project.project_snapshots.iterator(), many=True
)
filename = "{}.csv".format(project.get_slug())
return self.generate_csv_response(serializer.data, filename=filename)
class InfraProjectFacetFilter(HaystackFacetFilter):
def filter_queryset(self, request, queryset, view, *args, **kwargs):
queryset = super(InfraProjectFacetFilter, self).filter_queryset(
request, queryset, view, *args, **kwargs
)
text_query = request.query_params.get("q", None)
if text_query:
queryset = queryset.filter(text=text_query)
return queryset
class InfraProjectFilter(HaystackFilter):
def filter_queryset(self, request, queryset, view, *args, **kwargs):
queryset = super(InfraProjectFilter, self).filter_queryset(
request, queryset, view, *args, **kwargs
)
text_query = request.query_params.get("q", None)
if text_query:
queryset = queryset.filter(text=text_query)
return queryset
class InfraProjectSearchView(
FacetMixin, HaystackViewSet, InfraProjectCSVGeneratorMixIn
):
# `index_models` is an optional list of which models you would like to include
# in the search result. You might have several models indexed, and this provides
# a way to filter out those of no interest for this particular view.
# (Translates to `SearchQuerySet().models(*index_models)` behind the scenes.
# index_models = [Location]
serializer_class = InfraProjectSerializer
csv_serializer_class = InfraProjectCSVSerializer
filter_backends = [InfraProjectFilter, HaystackOrderingFilter]
facet_serializer_class = InfraProjectFacetSerializer
facet_filter_backends = [InfraProjectFacetFilter]
ordering_fields = [
"name",
"estimated_total_project_cost",
"status_order",
"estimated_completion_date",
]
def list(self, request, *args, **kwargs):
csv_download_params = self._get_csv_query_params(request.query_params)
response = super().list(request, *args, **kwargs)
if isinstance(response.data, dict):
response.data["csv_download_url"] = reverse(
"infrastructure-project-api-csv"
)
if csv_download_params:
response.data["csv_download_url"] += "?{}".format(csv_download_params)
return response
@action(detail=False, methods=["get"])
def get_csv(self, request, *args, **kwargs):
self.serializer_class = self.csv_serializer_class
self.pagination_class = None
response = super().list(request, *args, **kwargs)
return self.generate_csv_response(
response.data, filename=self._get_filename(request.query_params)
)
def _get_csv_query_params(self, original_query_params):
csv_download_params = original_query_params.copy()
csv_download_params.pop("fields", None)
csv_download_params.pop("limit", None)
csv_download_params.pop("offset", None)
return urllib.parse.urlencode(csv_download_params)
def _get_filename(self, query_params):
keys_to_check = (
"government_label",
"sector",
"province",
"department",
"status",
"primary_founding_source",
"q",
)
extension = "csv"
filename = "infrastructure-projects"
for key in keys_to_check:
if query_params.get(key):
filename += "-{}-{}".format(key, slugify(query_params[key]))
return "{}.{}".format(filename, extension)
| 37.939394
| 102
| 0.700213
|
c8264f4fdeecae2e7bf8ea387882805c0ce13f70
| 162
|
py
|
Python
|
multi-armed-bandits/submission/bandit.py
|
harshsiloiya98/CS747-Assignments
|
a2c6ddecc0c5fef829aec80fa6490cd50d12a816
|
[
"MIT"
] | null | null | null |
multi-armed-bandits/submission/bandit.py
|
harshsiloiya98/CS747-Assignments
|
a2c6ddecc0c5fef829aec80fa6490cd50d12a816
|
[
"MIT"
] | null | null | null |
multi-armed-bandits/submission/bandit.py
|
harshsiloiya98/CS747-Assignments
|
a2c6ddecc0c5fef829aec80fa6490cd50d12a816
|
[
"MIT"
] | null | null | null |
import numpy as np
# simulates a Bernoulli bandit arm
def pull_arm(prob):
if (np.random.rand() <= prob):
return 1.
else:
return 0.
| 20.25
| 35
| 0.580247
|
42b76f93f0479af0dd1e9305a9f40e29139ff379
| 4,025
|
py
|
Python
|
crosshair/examples/check_examples_test.py
|
pschanely/CrossHair
|
520ed990a370eb916cae9a7c688cbd0119ac03e3
|
[
"MIT"
] | 785
|
2019-09-28T14:47:48.000Z
|
2022-03-24T15:04:03.000Z
|
crosshair/examples/check_examples_test.py
|
pschanely/CrossHair
|
520ed990a370eb916cae9a7c688cbd0119ac03e3
|
[
"MIT"
] | 111
|
2019-10-19T14:43:03.000Z
|
2022-03-29T07:23:32.000Z
|
crosshair/examples/check_examples_test.py
|
pschanely/CrossHair
|
520ed990a370eb916cae9a7c688cbd0119ac03e3
|
[
"MIT"
] | 36
|
2018-05-12T03:31:29.000Z
|
2022-03-02T14:07:16.000Z
|
"""Run functional tests of the tool on all the examples."""
import argparse
import os
import pathlib
from pathlib import Path
import re
import shlex
import subprocess
import sys
from typing import Iterable
from typing import List
import pytest
def extract_linenums(text: str) -> List[int]:
r"""
Pull ordered line numbers out of crosshair output.
>>> extract_linenums("foo:34:bar\nfoo:64:bar\n")
[34, 64]
"""
return list(map(int, re.compile(r":(\d+)\:").findall(text)))
def find_examples() -> Iterable[Path]:
examples_dir = pathlib.Path(os.path.realpath(__file__)).parent
for path in sorted(examples_dir.glob("**/*.py")):
if path.stem != "__init__":
yield path
def main(argv: List[str]) -> int:
"""Execute the main routine."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--overwrite", help="If set, overwrite the golden files", action="store_true"
)
args = parser.parse_args(argv)
overwrite = bool(args.overwrite)
success = True
for pth in find_examples():
success &= run_on_file(pth, overwrite)
if not success:
print("One or more functional tests failed. Please see above.")
return 1
print("The functional tests passed.")
return 0
def run_on_file(pth: Path, overwrite: bool) -> bool:
# opts = ["--per_condition_timeout=0.75"]
# if kind == "hypothesis":
# opts = [
# "--per_condition_timeout=5.0",
# "--per_path_timeout=5.0"
# ]
cmd = [
sys.executable,
"-m",
"crosshair",
"check",
str(pth),
]
cmd_as_string = " ".join(shlex.quote(part) for part in cmd)
print(f"Running: {cmd_as_string}")
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
stdout, stderr = proc.communicate()
assert isinstance(stdout, str)
assert isinstance(stderr, str)
# We see empty output if, and only if, the process succeeds:
if (proc.returncode == 0) != (stdout == "" and stderr == ""):
print(
f"The return code does not correspond to the output.\n\n"
f"The command was:\n"
f"{cmd_as_string}\n\n"
f"The return code was: {proc.returncode}\n"
f"The captured stdout was:\n"
f"{stdout}\n\n"
f"The captured stderr:\n"
f"{stderr}\n\n"
)
return False
expected_stdout_pth = pth.parent / (pth.stem + ".out")
##
# Replace the absolute path to the examples directory
# with a place holder to make these tests machine agnostic.
##
path_re = re.compile(r"^.*[/\\]([_\w]+\.py):", re.MULTILINE)
stdout, _ = path_re.subn(r"\1:", stdout)
if overwrite:
if expected_stdout_pth.exists():
expected_stdout_pth.unlink()
if stdout:
expected_stdout_pth.write_text(stdout)
else:
if expected_stdout_pth.exists():
expected_stdout = expected_stdout_pth.read_text()
else:
expected_stdout = ""
# We only check line numbers, as error messages aren't stable.
if extract_linenums(expected_stdout) != extract_linenums(stdout):
print(
f"The output was different than expected.\n\n"
f"The command was:\n"
f"{cmd_as_string}\n\n"
f"The captured stdout was:\n"
f"{stdout}\n\n"
f"The expected stdout:\n"
f"{expected_stdout}\n\n"
)
return False
return True
@pytest.mark.parametrize("path", find_examples(), ids=lambda p: "_".join(p.parts[-3:]))
def test_examples(path: Path):
# TODO: "unable to meet precondition" and non-deterministic problems aren't
# surfaced. Reconsider.
assert run_on_file(path, overwrite=False)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 27.568493
| 87
| 0.59528
|
0b3b3442aa53183e0bd293e0fee994d4d8ac0542
| 63,399
|
py
|
Python
|
release/stubs.min/System/Windows/Controls/Primitives_parts/ScrollBar.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Controls/Primitives_parts/ScrollBar.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Controls/Primitives_parts/ScrollBar.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class ScrollBar(
RangeBase,
IResource,
IAnimatable,
IInputElement,
IFrameworkInputElement,
ISupportInitialize,
IHaveResources,
IQueryAmbient,
):
"""
Represents a control that provides a scroll bar that has a sliding System.Windows.Controls.Primitives.Thumb whose position corresponds to a value.
ScrollBar()
"""
def AddLogicalChild(self, *args):
"""
AddLogicalChild(self: FrameworkElement,child: object)
Adds the provided object to the logical tree of this element.
child: Child element to be added.
"""
pass
def AddVisualChild(self, *args):
"""
AddVisualChild(self: Visual,child: Visual)
Defines the parent-child relationship between two visuals.
child: The child visual object to add to parent visual.
"""
pass
def ArrangeCore(self, *args):
"""
ArrangeCore(self: FrameworkElement,finalRect: Rect)
Implements System.Windows.UIElement.ArrangeCore(System.Windows.Rect) (defined as virtual in
System.Windows.UIElement) and seals the implementation.
finalRect: The final area within the parent that this element should use to arrange itself and its children.
"""
pass
def ArrangeOverride(self, *args):
"""
ArrangeOverride(self: Control,arrangeBounds: Size) -> Size
Called to arrange and size the content of a System.Windows.Controls.Control object.
arrangeBounds: The computed size that is used to arrange the content.
Returns: The size of the control.
"""
pass
def GetLayoutClip(self, *args):
"""
GetLayoutClip(self: FrameworkElement,layoutSlotSize: Size) -> Geometry
Returns a geometry for a clipping mask. The mask applies if the layout system attempts to
arrange an element that is larger than the available display space.
layoutSlotSize: The size of the part of the element that does visual presentation.
Returns: The clipping geometry.
"""
pass
def GetTemplateChild(self, *args):
"""
GetTemplateChild(self: FrameworkElement,childName: str) -> DependencyObject
Returns the named element in the visual tree of an instantiated
System.Windows.Controls.ControlTemplate.
childName: Name of the child to find.
Returns: The requested element. May be null if no element of the requested name exists.
"""
pass
def GetUIParentCore(self, *args):
"""
GetUIParentCore(self: FrameworkElement) -> DependencyObject
Returns an alternative logical parent for this element if there is no visual parent.
Returns: Returns something other than null whenever a WPF framework-level implementation of this method
has a non-visual parent connection.
"""
pass
def GetVisualChild(self, *args):
"""
GetVisualChild(self: FrameworkElement,index: int) -> Visual
Overrides System.Windows.Media.Visual.GetVisualChild(System.Int32),and returns a child at the
specified index from a collection of child elements.
index: The zero-based index of the requested child element in the collection.
Returns: The requested child element. This should not return null; if the provided index is out of range,
an exception is thrown.
"""
pass
def HitTestCore(self, *args):
"""
HitTestCore(self: UIElement,hitTestParameters: GeometryHitTestParameters) -> GeometryHitTestResult
Implements
System.Windows.Media.Visual.HitTestCore(System.Windows.Media.GeometryHitTestParameters) to
supply base element hit testing behavior (returning System.Windows.Media.GeometryHitTestResult).
hitTestParameters: Describes the hit test to perform,including the initial hit point.
Returns: Results of the test,including the evaluated geometry.
HitTestCore(self: UIElement,hitTestParameters: PointHitTestParameters) -> HitTestResult
Implements System.Windows.Media.Visual.HitTestCore(System.Windows.Media.PointHitTestParameters)
to supply base element hit testing behavior (returning System.Windows.Media.HitTestResult).
hitTestParameters: Describes the hit test to perform,including the initial hit point.
Returns: Results of the test,including the evaluated point.
"""
pass
def MeasureCore(self, *args):
"""
MeasureCore(self: FrameworkElement,availableSize: Size) -> Size
Implements basic measure-pass layout system behavior for System.Windows.FrameworkElement.
availableSize: The available size that the parent element can give to the child elements.
Returns: The desired size of this element in layout.
"""
pass
def MeasureOverride(self, *args):
"""
MeasureOverride(self: Control,constraint: Size) -> Size
Called to remeasure a control.
constraint: The maximum size that the method can return.
Returns: The size of the control,up to the maximum specified by constraint.
"""
pass
def OnAccessKey(self, *args):
"""
OnAccessKey(self: UIElement,e: AccessKeyEventArgs)
Provides class handling for when an access key that is meaningful for this element is invoked.
e: The event data to the access key event. The event data reports which key was invoked,and
indicate whether the System.Windows.Input.AccessKeyManager object that controls the sending of
these events also sent this access key invocation to other elements.
"""
pass
def OnApplyTemplate(self):
"""
OnApplyTemplate(self: ScrollBar)
Creates the visual tree for the System.Windows.Controls.Primitives.ScrollBar.
"""
pass
def OnChildDesiredSizeChanged(self, *args):
"""
OnChildDesiredSizeChanged(self: UIElement,child: UIElement)
Supports layout behavior when a child element is resized.
child: The child element that is being resized.
"""
pass
def OnContextMenuClosing(self, *args):
"""
OnContextMenuClosing(self: ScrollBar,e: ContextMenuEventArgs)
Provides class handling for the System.Windows.FrameworkElement.ContextMenuClosing event that
occurs when the System.Windows.Controls.ContextMenu for a
System.Windows.Controls.Primitives.ScrollBar closes.
e: The event data.
"""
pass
def OnContextMenuOpening(self, *args):
"""
OnContextMenuOpening(self: ScrollBar,e: ContextMenuEventArgs)
Provides class handling for the System.Windows.FrameworkElement.ContextMenuOpening event that
occurs when the System.Windows.Controls.ContextMenu for a
System.Windows.Controls.Primitives.ScrollBar opens.
e: The event data.
"""
pass
def OnCreateAutomationPeer(self, *args):
"""
OnCreateAutomationPeer(self: ScrollBar) -> AutomationPeer
Creates an System.Windows.Automation.Peers.AutomationPeer for this
System.Windows.Controls.Primitives.ScrollBar control.
Returns: A System.Windows.Automation.Peers.ScrollBarAutomationPeer for the
System.Windows.Controls.Primitives.ScrollBar control.
"""
pass
def OnDpiChanged(self, *args):
""" OnDpiChanged(self: Visual,oldDpi: DpiScale,newDpi: DpiScale) """
pass
def OnDragEnter(self, *args):
"""
OnDragEnter(self: UIElement,e: DragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.DragEnter�attached event reaches an element in
its route that is derived from this class. Implement this method to add class handling for this
event.
e: The System.Windows.DragEventArgs that contains the event data.
"""
pass
def OnDragLeave(self, *args):
"""
OnDragLeave(self: UIElement,e: DragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.DragLeave�attached event reaches an element in
its route that is derived from this class. Implement this method to add class handling for this
event.
e: The System.Windows.DragEventArgs that contains the event data.
"""
pass
def OnDragOver(self, *args):
"""
OnDragOver(self: UIElement,e: DragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.DragOver�attached event reaches an element in
its route that is derived from this class. Implement this method to add class handling for this
event.
e: The System.Windows.DragEventArgs that contains the event data.
"""
pass
def OnDrop(self, *args):
"""
OnDrop(self: UIElement,e: DragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.DragEnter�attached event reaches an element in
its route that is derived from this class. Implement this method to add class handling for this
event.
e: The System.Windows.DragEventArgs that contains the event data.
"""
pass
def OnGiveFeedback(self, *args):
"""
OnGiveFeedback(self: UIElement,e: GiveFeedbackEventArgs)
Invoked when an unhandled System.Windows.DragDrop.GiveFeedback�attached event reaches an element
in its route that is derived from this class. Implement this method to add class handling for
this event.
e: The System.Windows.GiveFeedbackEventArgs that contains the event data.
"""
pass
def OnGotFocus(self, *args):
"""
OnGotFocus(self: FrameworkElement,e: RoutedEventArgs)
Invoked whenever an unhandled System.Windows.UIElement.GotFocus event reaches this element in
its route.
e: The System.Windows.RoutedEventArgs that contains the event data.
"""
pass
def OnGotKeyboardFocus(self, *args):
"""
OnGotKeyboardFocus(self: UIElement,e: KeyboardFocusChangedEventArgs)
Invoked when an unhandled System.Windows.Input.Keyboard.GotKeyboardFocus�attached event reaches
an element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.KeyboardFocusChangedEventArgs that contains the event data.
"""
pass
def OnGotMouseCapture(self, *args):
"""
OnGotMouseCapture(self: UIElement,e: MouseEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.GotMouseCapture�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseEventArgs that contains the event data.
"""
pass
def OnGotStylusCapture(self, *args):
"""
OnGotStylusCapture(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.GotStylusCapture�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnGotTouchCapture(self, *args):
"""
OnGotTouchCapture(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.GotTouchCapture routed event that
occurs when a touch is captured to this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnInitialized(self, *args):
"""
OnInitialized(self: FrameworkElement,e: EventArgs)
Raises the System.Windows.FrameworkElement.Initialized event. This method is invoked whenever
System.Windows.FrameworkElement.IsInitialized is set to true internally.
e: The System.Windows.RoutedEventArgs that contains the event data.
"""
pass
def OnIsKeyboardFocusedChanged(self, *args):
"""
OnIsKeyboardFocusedChanged(self: UIElement,e: DependencyPropertyChangedEventArgs)
Invoked when an unhandled System.Windows.UIElement.IsKeyboardFocusedChanged event is raised on
this element. Implement this method to add class handling for this event.
e: The System.Windows.DependencyPropertyChangedEventArgs that contains the event data.
"""
pass
def OnIsKeyboardFocusWithinChanged(self, *args):
"""
OnIsKeyboardFocusWithinChanged(self: UIElement,e: DependencyPropertyChangedEventArgs)
Invoked just before the System.Windows.UIElement.IsKeyboardFocusWithinChanged event is raised by
this element. Implement this method to add class handling for this event.
e: A System.Windows.DependencyPropertyChangedEventArgs that contains the event data.
"""
pass
def OnIsMouseCapturedChanged(self, *args):
"""
OnIsMouseCapturedChanged(self: UIElement,e: DependencyPropertyChangedEventArgs)
Invoked when an unhandled System.Windows.UIElement.IsMouseCapturedChanged event is raised on
this element. Implement this method to add class handling for this event.
e: The System.Windows.DependencyPropertyChangedEventArgs that contains the event data.
"""
pass
def OnIsMouseCaptureWithinChanged(self, *args):
"""
OnIsMouseCaptureWithinChanged(self: UIElement,e: DependencyPropertyChangedEventArgs)
Invoked when an unhandled System.Windows.UIElement.IsMouseCaptureWithinChanged event is raised
on this element. Implement this method to add class handling for this event.
e: A System.Windows.DependencyPropertyChangedEventArgs that contains the event data.
"""
pass
def OnIsMouseDirectlyOverChanged(self, *args):
"""
OnIsMouseDirectlyOverChanged(self: UIElement,e: DependencyPropertyChangedEventArgs)
Invoked when an unhandled System.Windows.UIElement.IsMouseDirectlyOverChanged event is raised on
this element. Implement this method to add class handling for this event.
e: The System.Windows.DependencyPropertyChangedEventArgs that contains the event data.
"""
pass
def OnIsStylusCapturedChanged(self, *args):
"""
OnIsStylusCapturedChanged(self: UIElement,e: DependencyPropertyChangedEventArgs)
Invoked when an unhandled System.Windows.UIElement.IsStylusCapturedChanged event is raised on
this element. Implement this method to add class handling for this event.
e: A System.Windows.DependencyPropertyChangedEventArgs that contains the event data.
"""
pass
def OnIsStylusCaptureWithinChanged(self, *args):
"""
OnIsStylusCaptureWithinChanged(self: UIElement,e: DependencyPropertyChangedEventArgs)
Invoked when an unhandled System.Windows.UIElement.IsStylusCaptureWithinChanged event is raised
on this element. Implement this method to add class handling for this event.
e: The System.Windows.DependencyPropertyChangedEventArgs that contains the event data.
"""
pass
def OnIsStylusDirectlyOverChanged(self, *args):
"""
OnIsStylusDirectlyOverChanged(self: UIElement,e: DependencyPropertyChangedEventArgs)
Invoked when an unhandled System.Windows.UIElement.IsStylusDirectlyOverChanged event is raised
on this element. Implement this method to add class handling for this event.
e: The System.Windows.DependencyPropertyChangedEventArgs that contains the event data.
"""
pass
def OnKeyDown(self, *args):
"""
OnKeyDown(self: UIElement,e: KeyEventArgs)
Invoked when an unhandled System.Windows.Input.Keyboard.KeyDown�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.KeyEventArgs that contains the event data.
"""
pass
def OnKeyUp(self, *args):
"""
OnKeyUp(self: UIElement,e: KeyEventArgs)
Invoked when an unhandled System.Windows.Input.Keyboard.KeyUp�attached event reaches an element
in its route that is derived from this class. Implement this method to add class handling for
this event.
e: The System.Windows.Input.KeyEventArgs that contains the event data.
"""
pass
def OnLostFocus(self, *args):
"""
OnLostFocus(self: UIElement,e: RoutedEventArgs)
Raises the System.Windows.UIElement.LostFocus�routed event by using the event data that is
provided.
e: A System.Windows.RoutedEventArgs that contains event data. This event data must contain the
identifier for the System.Windows.UIElement.LostFocus event.
"""
pass
def OnLostKeyboardFocus(self, *args):
"""
OnLostKeyboardFocus(self: UIElement,e: KeyboardFocusChangedEventArgs)
Invoked when an unhandled System.Windows.Input.Keyboard.LostKeyboardFocus�attached event reaches
an element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.KeyboardFocusChangedEventArgs that contains event data.
"""
pass
def OnLostMouseCapture(self, *args):
"""
OnLostMouseCapture(self: UIElement,e: MouseEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.LostMouseCapture�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseEventArgs that contains event data.
"""
pass
def OnLostStylusCapture(self, *args):
"""
OnLostStylusCapture(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.LostStylusCapture�attached event reaches
an element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains event data.
"""
pass
def OnLostTouchCapture(self, *args):
"""
OnLostTouchCapture(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.LostTouchCapture routed event that
occurs when this element loses a touch capture.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnManipulationBoundaryFeedback(self, *args):
"""
OnManipulationBoundaryFeedback(self: UIElement,e: ManipulationBoundaryFeedbackEventArgs)
Called when the System.Windows.UIElement.ManipulationBoundaryFeedback event occurs.
e: The data for the event.
"""
pass
def OnManipulationCompleted(self, *args):
"""
OnManipulationCompleted(self: UIElement,e: ManipulationCompletedEventArgs)
Called when the System.Windows.UIElement.ManipulationCompleted event occurs.
e: The data for the event.
"""
pass
def OnManipulationDelta(self, *args):
"""
OnManipulationDelta(self: UIElement,e: ManipulationDeltaEventArgs)
Called when the System.Windows.UIElement.ManipulationDelta event occurs.
e: The data for the event.
"""
pass
def OnManipulationInertiaStarting(self, *args):
"""
OnManipulationInertiaStarting(self: UIElement,e: ManipulationInertiaStartingEventArgs)
Called when the System.Windows.UIElement.ManipulationInertiaStarting event occurs.
e: The data for the event.
"""
pass
def OnManipulationStarted(self, *args):
"""
OnManipulationStarted(self: UIElement,e: ManipulationStartedEventArgs)
Called when the System.Windows.UIElement.ManipulationStarted event occurs.
e: The data for the event.
"""
pass
def OnManipulationStarting(self, *args):
"""
OnManipulationStarting(self: UIElement,e: ManipulationStartingEventArgs)
Provides class handling for the System.Windows.UIElement.ManipulationStarting routed event that
occurs when the manipulation processor is first created.
e: A System.Windows.Input.ManipulationStartingEventArgs that contains the event data.
"""
pass
def OnMaximumChanged(self, *args):
"""
OnMaximumChanged(self: RangeBase,oldMaximum: float,newMaximum: float)
Called when the System.Windows.Controls.Primitives.RangeBase.Maximum property changes.
oldMaximum: Old value of the System.Windows.Controls.Primitives.RangeBase.Maximum property.
newMaximum: New value of the System.Windows.Controls.Primitives.RangeBase.Maximum property.
"""
pass
def OnMinimumChanged(self, *args):
"""
OnMinimumChanged(self: RangeBase,oldMinimum: float,newMinimum: float)
Called when the System.Windows.Controls.Primitives.RangeBase.Minimum property changes.
oldMinimum: Old value of the System.Windows.Controls.Primitives.RangeBase.Minimum property.
newMinimum: New value of the System.Windows.Controls.Primitives.RangeBase.Minimum property.
"""
pass
def OnMouseDoubleClick(self, *args):
"""
OnMouseDoubleClick(self: Control,e: MouseButtonEventArgs)
Raises the System.Windows.Controls.Control.MouseDoubleClick routed event.
e: The event data.
"""
pass
def OnMouseDown(self, *args):
"""
OnMouseDown(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.MouseDown�attached event reaches an element
in its route that is derived from this class. Implement this method to add class handling for
this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. This event data
reports details about the mouse button that was pressed and the handled state.
"""
pass
def OnMouseEnter(self, *args):
"""
OnMouseEnter(self: UIElement,e: MouseEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.MouseEnter�attached event is raised on this
element. Implement this method to add class handling for this event.
e: The System.Windows.Input.MouseEventArgs that contains the event data.
"""
pass
def OnMouseLeave(self, *args):
"""
OnMouseLeave(self: UIElement,e: MouseEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.MouseLeave�attached event is raised on this
element. Implement this method to add class handling for this event.
e: The System.Windows.Input.MouseEventArgs that contains the event data.
"""
pass
def OnMouseLeftButtonDown(self, *args):
"""
OnMouseLeftButtonDown(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.UIElement.MouseLeftButtonDown�routed event is raised on
this element. Implement this method to add class handling for this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that the left mouse button was pressed.
"""
pass
def OnMouseLeftButtonUp(self, *args):
"""
OnMouseLeftButtonUp(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.UIElement.MouseLeftButtonUp�routed event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that the left mouse button was released.
"""
pass
def OnMouseMove(self, *args):
"""
OnMouseMove(self: UIElement,e: MouseEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.MouseMove�attached event reaches an element
in its route that is derived from this class. Implement this method to add class handling for
this event.
e: The System.Windows.Input.MouseEventArgs that contains the event data.
"""
pass
def OnMouseRightButtonDown(self, *args):
"""
OnMouseRightButtonDown(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.UIElement.MouseRightButtonDown�routed event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that the right mouse button was pressed.
"""
pass
def OnMouseRightButtonUp(self, *args):
"""
OnMouseRightButtonUp(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.UIElement.MouseRightButtonUp�routed event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that the right mouse button was released.
"""
pass
def OnMouseUp(self, *args):
"""
OnMouseUp(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.MouseUp�routed event reaches an element in
its route that is derived from this class. Implement this method to add class handling for this
event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that the mouse button was released.
"""
pass
def OnMouseWheel(self, *args):
"""
OnMouseWheel(self: UIElement,e: MouseWheelEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.MouseWheel�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseWheelEventArgs that contains the event data.
"""
pass
def OnPreviewDragEnter(self, *args):
"""
OnPreviewDragEnter(self: UIElement,e: DragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.PreviewDragEnter�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.DragEventArgs that contains the event data.
"""
pass
def OnPreviewDragLeave(self, *args):
"""
OnPreviewDragLeave(self: UIElement,e: DragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.PreviewDragLeave�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.DragEventArgs that contains the event data.
"""
pass
def OnPreviewDragOver(self, *args):
"""
OnPreviewDragOver(self: UIElement,e: DragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.PreviewDragOver�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.DragEventArgs that contains the event data.
"""
pass
def OnPreviewDrop(self, *args):
"""
OnPreviewDrop(self: UIElement,e: DragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.PreviewDrop�attached event reaches an element
in its route that is derived from this class. Implement this method to add class handling for
this event.
e: The System.Windows.DragEventArgs that contains the event data.
"""
pass
def OnPreviewGiveFeedback(self, *args):
"""
OnPreviewGiveFeedback(self: UIElement,e: GiveFeedbackEventArgs)
Invoked when an unhandled System.Windows.DragDrop.PreviewGiveFeedback�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.GiveFeedbackEventArgs that contains the event data.
"""
pass
def OnPreviewGotKeyboardFocus(self, *args):
"""
OnPreviewGotKeyboardFocus(self: UIElement,e: KeyboardFocusChangedEventArgs)
Invoked when an unhandled System.Windows.Input.Keyboard.PreviewGotKeyboardFocus�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.KeyboardFocusChangedEventArgs that contains the event data.
"""
pass
def OnPreviewKeyDown(self, *args):
"""
OnPreviewKeyDown(self: UIElement,e: KeyEventArgs)
Invoked when an unhandled System.Windows.Input.Keyboard.PreviewKeyDown�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.KeyEventArgs that contains the event data.
"""
pass
def OnPreviewKeyUp(self, *args):
"""
OnPreviewKeyUp(self: UIElement,e: KeyEventArgs)
Invoked when an unhandled System.Windows.Input.Keyboard.PreviewKeyUp�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.KeyEventArgs that contains the event data.
"""
pass
def OnPreviewLostKeyboardFocus(self, *args):
"""
OnPreviewLostKeyboardFocus(self: UIElement,e: KeyboardFocusChangedEventArgs)
Invoked when an unhandled System.Windows.Input.Keyboard.PreviewKeyDown�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.KeyboardFocusChangedEventArgs that contains the event data.
"""
pass
def OnPreviewMouseDoubleClick(self, *args):
"""
OnPreviewMouseDoubleClick(self: Control,e: MouseButtonEventArgs)
Raises the System.Windows.Controls.Control.PreviewMouseDoubleClick routed event.
e: The event data.
"""
pass
def OnPreviewMouseDown(self, *args):
"""
OnPreviewMouseDown(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.PreviewMouseDown attached�routed event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that one or more mouse buttons were pressed.
"""
pass
def OnPreviewMouseLeftButtonDown(self, *args):
"""
OnPreviewMouseLeftButtonDown(self: ScrollBar,e: MouseButtonEventArgs)
Provides class handling for the System.Windows.UIElement.PreviewMouseLeftButtonDown event.
e: The event data.
"""
pass
def OnPreviewMouseLeftButtonUp(self, *args):
"""
OnPreviewMouseLeftButtonUp(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.UIElement.PreviewMouseLeftButtonUp�routed event reaches
an element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that the left mouse button was released.
"""
pass
def OnPreviewMouseMove(self, *args):
"""
OnPreviewMouseMove(self: UIElement,e: MouseEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.PreviewMouseMove�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseEventArgs that contains the event data.
"""
pass
def OnPreviewMouseRightButtonDown(self, *args):
"""
OnPreviewMouseRightButtonDown(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.UIElement.PreviewMouseRightButtonDown�routed event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that the right mouse button was pressed.
"""
pass
def OnPreviewMouseRightButtonUp(self, *args):
"""
OnPreviewMouseRightButtonUp(self: ScrollBar,e: MouseButtonEventArgs)
Provides class handling for the System.Windows.UIElement.PreviewMouseRightButtonUp event.
e: The event data.
"""
pass
def OnPreviewMouseUp(self, *args):
"""
OnPreviewMouseUp(self: UIElement,e: MouseButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.PreviewMouseUp�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseButtonEventArgs that contains the event data. The event data
reports that one or more mouse buttons were released.
"""
pass
def OnPreviewMouseWheel(self, *args):
"""
OnPreviewMouseWheel(self: UIElement,e: MouseWheelEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.PreviewMouseWheel�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.MouseWheelEventArgs that contains the event data.
"""
pass
def OnPreviewQueryContinueDrag(self, *args):
"""
OnPreviewQueryContinueDrag(self: UIElement,e: QueryContinueDragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.PreviewQueryContinueDrag�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.QueryContinueDragEventArgs that contains the event data.
"""
pass
def OnPreviewStylusButtonDown(self, *args):
"""
OnPreviewStylusButtonDown(self: UIElement,e: StylusButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusButtonDown�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.StylusButtonEventArgs that contains the event data.
"""
pass
def OnPreviewStylusButtonUp(self, *args):
"""
OnPreviewStylusButtonUp(self: UIElement,e: StylusButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusButtonUp�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.StylusButtonEventArgs that contains the event data.
"""
pass
def OnPreviewStylusDown(self, *args):
"""
OnPreviewStylusDown(self: UIElement,e: StylusDownEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusDown�attached event reaches
an element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusDownEventArgs that contains the event data.
"""
pass
def OnPreviewStylusInAirMove(self, *args):
"""
OnPreviewStylusInAirMove(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusInAirMove�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnPreviewStylusInRange(self, *args):
"""
OnPreviewStylusInRange(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusInRange�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnPreviewStylusMove(self, *args):
"""
OnPreviewStylusMove(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusMove�attached event reaches
an element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnPreviewStylusOutOfRange(self, *args):
"""
OnPreviewStylusOutOfRange(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusOutOfRange�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnPreviewStylusSystemGesture(self, *args):
"""
OnPreviewStylusSystemGesture(self: UIElement,e: StylusSystemGestureEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusSystemGesture�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.StylusSystemGestureEventArgs that contains the event data.
"""
pass
def OnPreviewStylusUp(self, *args):
"""
OnPreviewStylusUp(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.PreviewStylusUp�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnPreviewTextInput(self, *args):
"""
OnPreviewTextInput(self: UIElement,e: TextCompositionEventArgs)
Invoked when an unhandled System.Windows.Input.TextCompositionManager.PreviewTextInput�attached
event reaches an element in its route that is derived from this class. Implement this method to
add class handling for this event.
e: The System.Windows.Input.TextCompositionEventArgs that contains the event data.
"""
pass
def OnPreviewTouchDown(self, *args):
"""
OnPreviewTouchDown(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.PreviewTouchDown routed event that
occurs when a touch presses this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnPreviewTouchMove(self, *args):
"""
OnPreviewTouchMove(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.PreviewTouchMove routed event that
occurs when a touch moves while inside this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnPreviewTouchUp(self, *args):
"""
OnPreviewTouchUp(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.PreviewTouchUp routed event that occurs
when a touch is released inside this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnPropertyChanged(self, *args):
"""
OnPropertyChanged(self: FrameworkElement,e: DependencyPropertyChangedEventArgs)
Invoked whenever the effective value of any dependency property on this
System.Windows.FrameworkElement has been updated. The specific dependency property that changed
is reported in the arguments parameter. Overrides
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs).
e: The event data that describes the property that changed,as well as old and new values.
"""
pass
def OnQueryContinueDrag(self, *args):
"""
OnQueryContinueDrag(self: UIElement,e: QueryContinueDragEventArgs)
Invoked when an unhandled System.Windows.DragDrop.QueryContinueDrag�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.QueryContinueDragEventArgs that contains the event data.
"""
pass
def OnQueryCursor(self, *args):
"""
OnQueryCursor(self: UIElement,e: QueryCursorEventArgs)
Invoked when an unhandled System.Windows.Input.Mouse.QueryCursor�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.QueryCursorEventArgs that contains the event data.
"""
pass
def OnRender(self, *args):
"""
OnRender(self: UIElement,drawingContext: DrawingContext)
When overridden in a derived class,participates in rendering operations that are directed by
the layout system. The rendering instructions for this element are not used directly when this
method is invoked,and are instead preserved for later asynchronous use by layout and drawing.
drawingContext: The drawing instructions for a specific element. This context is provided to the layout system.
"""
pass
def OnRenderSizeChanged(self, *args):
"""
OnRenderSizeChanged(self: FrameworkElement,sizeInfo: SizeChangedInfo)
Raises the System.Windows.FrameworkElement.SizeChanged event,using the specified information as
part of the eventual event data.
sizeInfo: Details of the old and new size involved in the change.
"""
pass
def OnStyleChanged(self, *args):
"""
OnStyleChanged(self: FrameworkElement,oldStyle: Style,newStyle: Style)
Invoked when the style in use on this element changes,which will invalidate the layout.
oldStyle: The old style.
newStyle: The new style.
"""
pass
def OnStylusButtonDown(self, *args):
"""
OnStylusButtonDown(self: UIElement,e: StylusButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusButtonDown�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusButtonEventArgs that contains the event data.
"""
pass
def OnStylusButtonUp(self, *args):
"""
OnStylusButtonUp(self: UIElement,e: StylusButtonEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusButtonUp�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusButtonEventArgs that contains the event data.
"""
pass
def OnStylusDown(self, *args):
"""
OnStylusDown(self: UIElement,e: StylusDownEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusDown�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusDownEventArgs that contains the event data.
"""
pass
def OnStylusEnter(self, *args):
"""
OnStylusEnter(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusEnter�attached event is raised by
this element. Implement this method to add class handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusInAirMove(self, *args):
"""
OnStylusInAirMove(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusInAirMove�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusInRange(self, *args):
"""
OnStylusInRange(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusInRange�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusLeave(self, *args):
"""
OnStylusLeave(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusLeave�attached event is raised by
this element. Implement this method to add class handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusMove(self, *args):
"""
OnStylusMove(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusMove�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusOutOfRange(self, *args):
"""
OnStylusOutOfRange(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusOutOfRange�attached event reaches an
element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnStylusSystemGesture(self, *args):
"""
OnStylusSystemGesture(self: UIElement,e: StylusSystemGestureEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusSystemGesture�attached event reaches
an element in its route that is derived from this class. Implement this method to add class
handling for this event.
e: The System.Windows.Input.StylusSystemGestureEventArgs that contains the event data.
"""
pass
def OnStylusUp(self, *args):
"""
OnStylusUp(self: UIElement,e: StylusEventArgs)
Invoked when an unhandled System.Windows.Input.Stylus.StylusUp�attached event reaches an element
in its route that is derived from this class. Implement this method to add class handling for
this event.
e: The System.Windows.Input.StylusEventArgs that contains the event data.
"""
pass
def OnTemplateChanged(self, *args):
"""
OnTemplateChanged(self: Control,oldTemplate: ControlTemplate,newTemplate: ControlTemplate)
Called whenever the control's template changes.
oldTemplate: The old template.
newTemplate: The new template.
"""
pass
def OnTextInput(self, *args):
"""
OnTextInput(self: UIElement,e: TextCompositionEventArgs)
Invoked when an unhandled System.Windows.Input.TextCompositionManager.TextInput�attached event
reaches an element in its route that is derived from this class. Implement this method to add
class handling for this event.
e: The System.Windows.Input.TextCompositionEventArgs that contains the event data.
"""
pass
def OnToolTipClosing(self, *args):
"""
OnToolTipClosing(self: FrameworkElement,e: ToolTipEventArgs)
Invoked whenever an unhandled System.Windows.FrameworkElement.ToolTipClosing routed event
reaches this class in its route. Implement this method to add class handling for this event.
e: Provides data about the event.
"""
pass
def OnToolTipOpening(self, *args):
"""
OnToolTipOpening(self: FrameworkElement,e: ToolTipEventArgs)
Invoked whenever the System.Windows.FrameworkElement.ToolTipOpening routed event reaches this
class in its route. Implement this method to add class handling for this event.
e: Provides data about the event.
"""
pass
def OnTouchDown(self, *args):
"""
OnTouchDown(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchDown routed event that occurs when
a touch presses inside this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnTouchEnter(self, *args):
"""
OnTouchEnter(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchEnter routed event that occurs
when a touch moves from outside to inside the bounds of this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnTouchLeave(self, *args):
"""
OnTouchLeave(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchLeave routed event that occurs
when a touch moves from inside to outside the bounds of this System.Windows.UIElement.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnTouchMove(self, *args):
"""
OnTouchMove(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchMove routed event that occurs when
a touch moves while inside this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnTouchUp(self, *args):
"""
OnTouchUp(self: UIElement,e: TouchEventArgs)
Provides class handling for the System.Windows.UIElement.TouchUp routed event that occurs when a
touch is released inside this element.
e: A System.Windows.Input.TouchEventArgs that contains the event data.
"""
pass
def OnValueChanged(self, *args):
"""
OnValueChanged(self: RangeBase,oldValue: float,newValue: float)
Raises the System.Windows.Controls.Primitives.RangeBase.ValueChanged routed event.
oldValue: Old value of the System.Windows.Controls.Primitives.RangeBase.Value property
newValue: New value of the System.Windows.Controls.Primitives.RangeBase.Value property
"""
pass
def OnVisualChildrenChanged(self, *args):
"""
OnVisualChildrenChanged(self: Visual,visualAdded: DependencyObject,visualRemoved: DependencyObject)
Called when the System.Windows.Media.VisualCollection of the visual object is modified.
visualAdded: The System.Windows.Media.Visual that was added to the collection
visualRemoved: The System.Windows.Media.Visual that was removed from the collection
"""
pass
def OnVisualParentChanged(self, *args):
"""
OnVisualParentChanged(self: FrameworkElement,oldParent: DependencyObject)
Invoked when the parent of this element in the visual tree is changed. Overrides
System.Windows.UIElement.OnVisualParentChanged(System.Windows.DependencyObject).
oldParent: The old parent element. May be null to indicate that the element did not have a visual parent
previously.
"""
pass
def ParentLayoutInvalidated(self, *args):
"""
ParentLayoutInvalidated(self: FrameworkElement,child: UIElement)
Supports incremental layout implementations in specialized subclasses of
System.Windows.FrameworkElement.
System.Windows.FrameworkElement.ParentLayoutInvalidated(System.Windows.UIElement) is invoked
when a child element has invalidated a property that is marked in metadata as affecting the
parent's measure or arrange passes during layout.
child: The child element reporting the change.
"""
pass
def RemoveLogicalChild(self, *args):
"""
RemoveLogicalChild(self: FrameworkElement,child: object)
Removes the provided object from this element's logical tree. System.Windows.FrameworkElement
updates the affected logical tree parent pointers to keep in sync with this deletion.
child: The element to remove.
"""
pass
def RemoveVisualChild(self, *args):
"""
RemoveVisualChild(self: Visual,child: Visual)
Removes the parent-child relationship between two visuals.
child: The child visual object to remove from the parent visual.
"""
pass
def ShouldSerializeProperty(self, *args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self, *args):
pass
DefaultStyleKey = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the key to use to reference the style for this control,when theme styles are used or defined.
"""
HandlesScrolling = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates whether a control supports scrolling.
"""
HasEffectiveKeyboardFocus = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
InheritanceBehavior = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the scope limits for property value inheritance,resource key lookup,and RelativeSource FindAncestor lookup.
"""
IsEnabledCore = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates whether the System.Windows.Controls.Primitives.ScrollBar is enabled.
"""
LogicalChildren = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets an enumerator for logical child elements of this element.
"""
Orientation = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets whether the System.Windows.Controls.Primitives.ScrollBar is displayed horizontally or vertically.
Get: Orientation(self: ScrollBar) -> Orientation
Set: Orientation(self: ScrollBar)=value
"""
StylusPlugIns = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a collection of all stylus plug-in (customization) objects associated with this element.
"""
Track = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the System.Windows.Controls.Primitives.Track for a System.Windows.Controls.Primitives.ScrollBar control.
Get: Track(self: ScrollBar) -> Track
"""
ViewportSize = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the amount of the scrollable content that is currently visible.
Get: ViewportSize(self: ScrollBar) -> float
Set: ViewportSize(self: ScrollBar)=value
"""
VisualBitmapEffect = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the System.Windows.Media.Effects.BitmapEffect value for the System.Windows.Media.Visual.
"""
VisualBitmapEffectInput = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the System.Windows.Media.Effects.BitmapEffectInput value for the System.Windows.Media.Visual.
"""
VisualBitmapScalingMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the System.Windows.Media.BitmapScalingMode for the System.Windows.Media.Visual.
"""
VisualCacheMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a cached representation of the System.Windows.Media.Visual.
"""
VisualChildrenCount = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the number of visual child elements within this element.
"""
VisualClearTypeHint = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the System.Windows.Media.ClearTypeHint that determines how ClearType is rendered in the System.Windows.Media.Visual.
"""
VisualClip = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the clip region of the System.Windows.Media.Visual as a System.Windows.Media.Geometry value.
"""
VisualEdgeMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the edge mode of the System.Windows.Media.Visual as an System.Windows.Media.EdgeMode value.
"""
VisualEffect = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the bitmap effect to apply to the System.Windows.Media.Visual.
"""
VisualOffset = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the offset value of the visual object.
"""
VisualOpacity = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the opacity of the System.Windows.Media.Visual.
"""
VisualOpacityMask = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the System.Windows.Media.Brush value that represents the opacity mask of the System.Windows.Media.Visual.
"""
VisualParent = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the visual tree parent of the visual object.
"""
VisualScrollableAreaClip = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets a clipped scrollable area for the System.Windows.Media.Visual.
"""
VisualTextHintingMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the System.Windows.Media.TextHintingMode of the System.Windows.Media.Visual.
"""
VisualTextRenderingMode = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the System.Windows.Media.TextRenderingMode of the System.Windows.Media.Visual.
"""
VisualTransform = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the System.Windows.Media.Transform value for the System.Windows.Media.Visual.
"""
VisualXSnappingGuidelines = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the x-coordinate (vertical) guideline collection.
"""
VisualYSnappingGuidelines = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the y-coordinate (horizontal) guideline collection.
"""
DeferScrollToHorizontalOffsetCommand = None
DeferScrollToVerticalOffsetCommand = None
LineDownCommand = None
LineLeftCommand = None
LineRightCommand = None
LineUpCommand = None
OrientationProperty = None
PageDownCommand = None
PageLeftCommand = None
PageRightCommand = None
PageUpCommand = None
Scroll = None
ScrollEvent = None
ScrollHereCommand = None
ScrollToBottomCommand = None
ScrollToEndCommand = None
ScrollToHomeCommand = None
ScrollToHorizontalOffsetCommand = None
ScrollToLeftEndCommand = None
ScrollToRightEndCommand = None
ScrollToTopCommand = None
ScrollToVerticalOffsetCommand = None
ViewportSizeProperty = None
| 25.553809
| 221
| 0.685374
|
f96acb003b870816c3036cac6aee0cc37e123b78
| 2,119
|
py
|
Python
|
model/pytorch/dffml_model_pytorch/pytorch_accuracy_scorer.py
|
yopknopixx/dffml
|
7f295bb01b235a915c9f8015564b97a708cd5325
|
[
"MIT"
] | null | null | null |
model/pytorch/dffml_model_pytorch/pytorch_accuracy_scorer.py
|
yopknopixx/dffml
|
7f295bb01b235a915c9f8015564b97a708cd5325
|
[
"MIT"
] | null | null | null |
model/pytorch/dffml_model_pytorch/pytorch_accuracy_scorer.py
|
yopknopixx/dffml
|
7f295bb01b235a915c9f8015564b97a708cd5325
|
[
"MIT"
] | null | null | null |
from dffml.feature.feature import Features
import os
import torch
from dffml.base import config
from dffml.source.source import Sources
from dffml.util.entrypoint import entrypoint
from dffml.model import ModelNotTrained, ModelContext
from dffml.accuracy import (
AccuracyScorer,
AccuracyContext,
)
@config
class PytorchAccuracyConfig:
pass
class PytorchAccuracyContext(AccuracyContext):
"""
Accuracy Scorer for Pytorch Network Models
"""
async def score(
self, mctx: ModelContext, sctx: Sources, *features: Features
):
if not os.path.isfile(os.path.join(mctx.model_path)):
raise ModelNotTrained("Train model before assessing for accuracy.")
dataset, size = await mctx.dataset_generator(sctx)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=mctx.parent.config.batch_size,
shuffle=True,
num_workers=4,
)
mctx._model.eval()
if mctx.classifications:
running_corrects = 0
for inputs, labels in dataloader:
inputs = inputs.to(mctx.device)
labels = labels.to(mctx.device)
with torch.set_grad_enabled(False):
outputs = mctx._model(inputs)
_, preds = torch.max(outputs, 1)
running_corrects += torch.sum(preds == labels.data)
acc = running_corrects.double() / size
else:
running_loss = 0.0
for inputs, labels in dataloader:
inputs = inputs.to(inputs)
labels = labels.to(inputs)
with torch.set_grad_enabled(False):
outputs = mctx._model(inputs)
loss = mctx.criterion(inputs, outputs)
running_loss += loss.item() * inputs.size(0)
total_loss = running_loss / size
acc = 1.0 - total_loss
return acc
@entrypoint("pytorchscore")
class PytorchAccuracy(AccuracyScorer):
CONFIG = PytorchAccuracyConfig
CONTEXT = PytorchAccuracyContext
| 27.166667
| 79
| 0.612081
|
ad1e53a64e378ac2b384523fe38c8c7ba81c28e9
| 2,029
|
py
|
Python
|
convert_utils.py
|
dparrini/anafas2atp
|
018ca31de43e516c49b477fb83dde1d264fd6194
|
[
"MIT"
] | null | null | null |
convert_utils.py
|
dparrini/anafas2atp
|
018ca31de43e516c49b477fb83dde1d264fd6194
|
[
"MIT"
] | null | null | null |
convert_utils.py
|
dparrini/anafas2atp
|
018ca31de43e516c49b477fb83dde1d264fd6194
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2019 David Rodrigues Parrini
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Utilitary functions for field conversion.
"""
def try_int(intstr):
"""
Try converting a string into int. Trims empty space.
"""
try:
num = int(intstr.strip())
except ValueError:
num = 0
return num
def try_float(floatstr):
"""
Try converting a string into a float. Trims empty space.
"""
try:
num = float(floatstr.strip())
except ValueError:
num = 0.0
return num
def try_anafas_float(floatstr):
"""
Try converting a string into a float. Trims empty space and checks whether
there is a decimal separator. When a decimal separator is unspecified, assumes
two decimals separators by default (Anafas' default) dividing the resulting
number by 100.
"""
try:
num = float(floatstr.strip())
# checks if the decimal separator was omitted
thereIsDot = not (floatstr.find(".") == -1)
if not thereIsDot:
num = num / 100.0
except ValueError:
num = 0.0
return num
| 28.577465
| 80
| 0.740266
|
e3d6706fa91d12ab02d52fb92bdbc05024bd5959
| 9,951
|
py
|
Python
|
roles/openshift_health_checker/test/etcd_imagedata_size_test.py
|
KoteikinyDrova/openshift-ansible
|
3db2bb10c0ad5e7ed702bfccdec03562533e8539
|
[
"Apache-2.0"
] | 1
|
2019-03-13T10:14:35.000Z
|
2019-03-13T10:14:35.000Z
|
roles/openshift_health_checker/test/etcd_imagedata_size_test.py
|
KoteikinyDrova/openshift-ansible
|
3db2bb10c0ad5e7ed702bfccdec03562533e8539
|
[
"Apache-2.0"
] | 3
|
2016-12-01T23:01:36.000Z
|
2016-12-02T00:16:48.000Z
|
roles/openshift_health_checker/test/etcd_imagedata_size_test.py
|
KoteikinyDrova/openshift-ansible
|
3db2bb10c0ad5e7ed702bfccdec03562533e8539
|
[
"Apache-2.0"
] | 4
|
2018-10-27T00:29:24.000Z
|
2022-01-07T07:39:51.000Z
|
import pytest
from collections import namedtuple
from openshift_checks.etcd_imagedata_size import EtcdImageDataSize, OpenShiftCheckException
from etcdkeysize import check_etcd_key_size
def fake_etcd_client(root):
fake_nodes = dict()
fake_etcd_node(root, fake_nodes)
clientclass = namedtuple("client", ["read"])
return clientclass(lambda key, recursive: fake_etcd_result(fake_nodes[key]))
def fake_etcd_result(fake_node):
resultclass = namedtuple("result", ["leaves"])
if not fake_node.dir:
return resultclass([fake_node])
return resultclass(fake_node.leaves)
def fake_etcd_node(node, visited):
min_req_fields = ["dir", "key"]
fields = list(node)
leaves = []
if node["dir"] and node.get("leaves"):
for leaf in node["leaves"]:
leaves.append(fake_etcd_node(leaf, visited))
if len(set(min_req_fields) - set(fields)) > 0:
raise ValueError("fake etcd nodes require at least {} fields.".format(min_req_fields))
if node.get("leaves"):
node["leaves"] = leaves
nodeclass = namedtuple("node", fields)
nodeinst = nodeclass(**node)
visited[nodeinst.key] = nodeinst
return nodeinst
@pytest.mark.parametrize('ansible_mounts,extra_words', [
([], ['none']), # empty ansible_mounts
([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
])
def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
task_vars = dict(
ansible_mounts=ansible_mounts,
)
check = EtcdImageDataSize(fake_execute_module, task_vars)
with pytest.raises(OpenShiftCheckException) as excinfo:
check.run()
for word in 'determine valid etcd mountpath'.split() + extra_words:
assert word in str(excinfo.value)
@pytest.mark.parametrize('ansible_mounts,tree,size_limit,should_fail,extra_words', [
(
# test that default image size limit evals to 1/2 * (total size in use)
[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 80 * 10**9,
}],
{"dir": False, "key": "/", "value": "1234"},
None,
False,
[],
),
(
[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 48 * 10**9,
}],
{"dir": False, "key": "/", "value": "1234"},
None,
False,
[],
),
(
# set max size limit for image data to be below total node value
# total node value is defined as the sum of the value field
# from every node
[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 48 * 10**9,
}],
{"dir": False, "key": "/", "value": "12345678"},
7,
True,
["exceeds the maximum recommended limit", "0.00 GB"],
),
(
[{
'mount': '/',
'size_available': 48 * 10**9 - 1,
'size_total': 48 * 10**9,
}],
{"dir": False, "key": "/", "value": "1234"},
None,
True,
["exceeds the maximum recommended limit", "0.00 GB"],
)
])
def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size_limit, should_fail, extra_words):
def execute_module(module_name, module_args, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
}
client = fake_etcd_client(tree)
s, limit_exceeded = check_etcd_key_size(client, tree["key"], module_args["size_limit_bytes"])
return {"size_limit_exceeded": limit_exceeded}
task_vars = dict(
etcd_max_image_data_size_bytes=size_limit,
ansible_mounts=ansible_mounts,
openshift=dict(
master=dict(etcd_hosts=["localhost"]),
common=dict(config_base="/var/lib/origin")
)
)
if size_limit is None:
task_vars.pop("etcd_max_image_data_size_bytes")
check = EtcdImageDataSize(execute_module, task_vars).run()
if should_fail:
assert check["failed"]
for word in extra_words:
assert word in check["msg"]
else:
assert not check.get("failed", False)
@pytest.mark.parametrize('ansible_mounts,tree,root_path,expected_size,extra_words', [
(
[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 80 * 10**9,
}],
# test recursive size check on tree with height > 1
{
"dir": True,
"key": "/",
"leaves": [
{"dir": False, "key": "/foo1", "value": "1234"},
{"dir": False, "key": "/foo2", "value": "1234"},
{"dir": False, "key": "/foo3", "value": "1234"},
{"dir": False, "key": "/foo4", "value": "1234"},
{
"dir": True,
"key": "/foo5",
"leaves": [
{"dir": False, "key": "/foo/bar1", "value": "56789"},
{"dir": False, "key": "/foo/bar2", "value": "56789"},
{"dir": False, "key": "/foo/bar3", "value": "56789"},
{
"dir": True,
"key": "/foo/bar4",
"leaves": [
{"dir": False, "key": "/foo/bar/baz1", "value": "123"},
{"dir": False, "key": "/foo/bar/baz2", "value": "123"},
]
},
]
},
]
},
"/",
37,
[],
),
(
[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 80 * 10**9,
}],
# test correct sub-tree size calculation
{
"dir": True,
"key": "/",
"leaves": [
{"dir": False, "key": "/foo1", "value": "1234"},
{"dir": False, "key": "/foo2", "value": "1234"},
{"dir": False, "key": "/foo3", "value": "1234"},
{"dir": False, "key": "/foo4", "value": "1234"},
{
"dir": True,
"key": "/foo5",
"leaves": [
{"dir": False, "key": "/foo/bar1", "value": "56789"},
{"dir": False, "key": "/foo/bar2", "value": "56789"},
{"dir": False, "key": "/foo/bar3", "value": "56789"},
{
"dir": True,
"key": "/foo/bar4",
"leaves": [
{"dir": False, "key": "/foo/bar/baz1", "value": "123"},
{"dir": False, "key": "/foo/bar/baz2", "value": "123"},
]
},
]
},
]
},
"/foo5",
21,
[],
),
(
[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 80 * 10**9,
}],
# test that a non-existing key is handled correctly
{
"dir": False,
"key": "/",
"value": "1234",
},
"/missing",
0,
[],
),
(
[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 80 * 10**9,
}],
# test etcd cycle handling
{
"dir": True,
"key": "/",
"leaves": [
{"dir": False, "key": "/foo1", "value": "1234"},
{"dir": False, "key": "/foo2", "value": "1234"},
{"dir": False, "key": "/foo3", "value": "1234"},
{"dir": False, "key": "/foo4", "value": "1234"},
{
"dir": True,
"key": "/",
"leaves": [
{"dir": False, "key": "/foo1", "value": "1"},
],
},
]
},
"/",
16,
[],
),
])
def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_path, expected_size, extra_words):
def execute_module(module_name, module_args, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
}
client = fake_etcd_client(tree)
size, limit_exceeded = check_etcd_key_size(client, root_path, module_args["size_limit_bytes"])
assert size == expected_size
return {
"size_limit_exceeded": limit_exceeded,
}
task_vars = dict(
ansible_mounts=ansible_mounts,
openshift=dict(
master=dict(etcd_hosts=["localhost"]),
common=dict(config_base="/var/lib/origin")
)
)
check = EtcdImageDataSize(execute_module, task_vars).run()
assert not check.get("failed", False)
def test_etcdkeysize_module_failure():
def execute_module(module_name, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
}
return {
"rc": 1,
"module_stderr": "failure",
}
task_vars = dict(
ansible_mounts=[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 80 * 10**9,
}],
openshift=dict(
master=dict(etcd_hosts=["localhost"]),
common=dict(config_base="/var/lib/origin")
)
)
check = EtcdImageDataSize(execute_module, task_vars).run()
assert check["failed"]
for word in "Failed to retrieve stats":
assert word in check["msg"]
def fake_execute_module(*args):
raise AssertionError('this function should not be called')
| 30.246201
| 114
| 0.467591
|
eda5fe12568fc196f3b44d54fbecc4de24145848
| 7,054
|
py
|
Python
|
deep_sort/sort/tracker.py
|
GlassyWing/sort_torch
|
3d4c809dee6ad35b2f7d5a4b75f9e30b07722441
|
[
"Apache-2.0"
] | null | null | null |
deep_sort/sort/tracker.py
|
GlassyWing/sort_torch
|
3d4c809dee6ad35b2f7d5a4b75f9e30b07722441
|
[
"Apache-2.0"
] | null | null | null |
deep_sort/sort/tracker.py
|
GlassyWing/sort_torch
|
3d4c809dee6ad35b2f7d5a4b75f9e30b07722441
|
[
"Apache-2.0"
] | null | null | null |
import torch
from . import kalman_filter
from . import linear_assignment
from . import iou_matching
from .track import Track
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : kalman_filter.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3, use_cuda=False):
self.metric = metric
self.max_iou_distance = max_iou_distance
self.max_age = max_age
self.n_init = n_init
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.kf = kalman_filter.KalmanFilter(self.device)
self.tracks = []
self._next_id = 1
def _initiate_track(self, detection):
mean, covariance = self.kf.initiate(detection.to_xyah())
self.tracks.append(Track(
mean, covariance, self._next_id, self.n_init, self.max_age,
detection.feature, detection.payload))
self._next_id += 1
def _match(self, detections):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = torch.stack([dets[i].feature for i in detection_indices], dim=0)
targets = [tracks[i].track_id for i in track_indices]
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(
self.kf, cost_matrix, tracks, dets, track_indices,
detection_indices)
return cost_matrix
# Split track set into confirmed and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
# Associate confirmed tracks using appearance features.
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, self.max_age,
self.tracks, detections, confirmed_tracks)
# Associate remaining tracks together with unconfirmed tracks using IOU.
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update == 1]
unmatched_tracks_a = [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update != 1]
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, iou_track_candidates, unmatched_detections)
matches = matches_a + matches_b
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
return matches, unmatched_tracks, unmatched_detections
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
track_means = []
track_covs = []
for track in self.tracks:
track_means.append(track.mean)
track_covs.append(track.covariance)
if len(self.tracks) != 0:
track_means = torch.cat(track_means, dim=0)
track_covs = torch.cat(track_covs, dim=0)
updated_means, updated_covs = self.kf.predict(track_means, track_covs)
for i, track in enumerate(self.tracks):
track.predict(updated_means[i].unsqueeze(0),
updated_covs[i].unsqueeze(0))
def update(self, detections):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections = \
self._match(detections)
# Update track set.
if len(matches) != 0:
matched_track_means = []
matched_track_covs = []
matched_measures = []
for track_idx, detection_idx in matches:
track = self.tracks[track_idx]
detection = detections[detection_idx]
matched_track_means.append(track.mean)
matched_track_covs.append(track.covariance)
matched_measures.append(detection.tlwh)
matched_track_means = torch.cat(matched_track_means, dim=0)
matched_track_covs = torch.cat(matched_track_covs, dim=0)
matched_measures = torch.stack(matched_measures, dim=0)
matched_measures[:, :2] += matched_measures[:, 2:] / 2
matched_measures[:, 2] /= matched_measures[:, 3]
# Make the most of the GPU
updated_means, updated_covs = self.kf.update(matched_track_means, matched_track_covs, matched_measures)
for i, (track_idx, detection_idx) in enumerate(matches):
track = self.tracks[track_idx]
detection = detections[detection_idx]
track.update(updated_means[i].unsqueeze(0),
updated_covs[i].unsqueeze(0),
detection.feature,
detection.payload)
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx])
self.tracks = [t for t in self.tracks if not t.is_deleted()]
# Update distance metric.
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
features, targets = [], []
for track in self.tracks:
if not track.is_confirmed():
continue
features += track.features
targets += [track.track_id for _ in track.features]
track.features = []
self.metric.partial_fit(
features,
targets,
active_targets)
| 39.853107
| 115
| 0.628438
|
ac1ae9319df5427254883fc0c7ed93b89cf1685b
| 5,262
|
py
|
Python
|
ironic/common/driver_factory.py
|
hongbin/ironic
|
857690560657bbfc5342000908bc84c918da1b11
|
[
"Apache-2.0"
] | null | null | null |
ironic/common/driver_factory.py
|
hongbin/ironic
|
857690560657bbfc5342000908bc84c918da1b11
|
[
"Apache-2.0"
] | null | null | null |
ironic/common/driver_factory.py
|
hongbin/ironic
|
857690560657bbfc5342000908bc84c918da1b11
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from ironic.common import exception
from ironic.common.i18n import _LI
from ironic.openstack.common import lockutils
from ironic.openstack.common import log
from stevedore import dispatch
LOG = log.getLogger(__name__)
driver_opts = [
cfg.ListOpt('enabled_drivers',
default=['pxe_ipmitool'],
help='Specify the list of drivers to load during service '
'initialization. Missing drivers, or drivers which '
'fail to initialize, will prevent the conductor '
'service from starting. The option default is a '
'recommended set of production-oriented drivers. A '
'complete list of drivers present on your system may '
'be found by enumerating the "ironic.drivers" '
'entrypoint. An example may be found in the '
'developer documentation online.'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
EM_SEMAPHORE = 'extension_manager'
def get_driver(driver_name):
"""Simple method to get a ref to an instance of a driver.
Driver loading is handled by the DriverFactory class. This method
conveniently wraps that class and returns the actual driver object.
:param driver_name: the name of the driver class to load
:returns: An instance of a class which implements
ironic.drivers.base.BaseDriver
:raises: DriverNotFound if the requested driver_name could not be
found in the "ironic.drivers" namespace.
"""
try:
factory = DriverFactory()
return factory[driver_name].obj
except KeyError:
raise exception.DriverNotFound(driver_name=driver_name)
class DriverFactory(object):
"""Discover, load and manage the drivers available."""
# NOTE(deva): loading the _extension_manager as a class member will break
# stevedore when it loads a driver, because the driver will
# import this file (and thus instantiate another factory).
# Instead, we instantiate a NameDispatchExtensionManager only
# once, the first time DriverFactory.__init__ is called.
_extension_manager = None
def __init__(self):
if not DriverFactory._extension_manager:
DriverFactory._init_extension_manager()
def __getitem__(self, name):
return self._extension_manager[name]
# NOTE(deva): Use lockutils to avoid a potential race in eventlet
# that might try to create two driver factories.
@classmethod
@lockutils.synchronized(EM_SEMAPHORE, 'ironic-')
def _init_extension_manager(cls):
# NOTE(deva): In case multiple greenthreads queue up on this lock
# before _extension_manager is initialized, prevent
# creation of multiple NameDispatchExtensionManagers.
if cls._extension_manager:
return
# NOTE(deva): Drivers raise "DriverLoadError" if they are unable to be
# loaded, eg. due to missing external dependencies.
# We capture that exception, and, only if it is for an
# enabled driver, raise it from here. If enabled driver
# raises other exception type, it is wrapped in
# "DriverLoadError", providing the name of the driver that
# caused it, and raised. If the exception is for a
# non-enabled driver, we suppress it.
def _catch_driver_not_found(mgr, ep, exc):
# NOTE(deva): stevedore loads plugins *before* evaluating
# _check_func, so we need to check here, too.
if ep.name in CONF.enabled_drivers:
if not isinstance(exc, exception.DriverLoadError):
raise exception.DriverLoadError(driver=ep.name, reason=exc)
raise exc
def _check_func(ext):
return ext.name in CONF.enabled_drivers
cls._extension_manager = \
dispatch.NameDispatchExtensionManager(
'ironic.drivers',
_check_func,
invoke_on_load=True,
on_load_failure_callback=_catch_driver_not_found)
LOG.info(_LI("Loaded the following drivers: %s"),
cls._extension_manager.names())
@property
def names(self):
"""The list of driver names available."""
return self._extension_manager.names()
| 41.109375
| 79
| 0.63607
|
bc3fd7119b55d266fd1776e1ae9f5f9072f8d240
| 4,911
|
py
|
Python
|
layers/modules/multibox_loss.py
|
dakeli/ssd_pytorch
|
ec1247ff1f9400b3a57747028d555284b0283474
|
[
"MIT"
] | null | null | null |
layers/modules/multibox_loss.py
|
dakeli/ssd_pytorch
|
ec1247ff1f9400b3a57747028d555284b0283474
|
[
"MIT"
] | null | null | null |
layers/modules/multibox_loss.py
|
dakeli/ssd_pytorch
|
ec1247ff1f9400b3a57747028d555284b0283474
|
[
"MIT"
] | 1
|
2018-12-12T06:40:07.000Z
|
2018-12-12T06:40:07.000Z
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from data import coco as cfg
from ..box_utils import match, log_sum_exp
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching,
bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,
use_gpu=True):
super(MultiBoxLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = cfg['variance']
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, priors = predictions
num = loc_data.size(0)
priors = priors[:loc_data.size(1), :]
num_priors = (priors.size(0))
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
match(self.threshold, truths, defaults, self.variance, labels,
loc_t, conf_t, idx)
if self.use_gpu:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
# wrap targets
loc_t = Variable(loc_t, requires_grad=False)
conf_t = Variable(conf_t, requires_grad=False)
pos = conf_t > 0
num_pos = pos.sum(dim=1, keepdim=True)
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# Hard Negative Mining
# loss_c[pos] = 0
loss_c[pos.view(-1)] = 0 # filter out pos boxes for now
loss_c = loss_c.view(num, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos+neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
N = num_pos.data.sum()
loss_l /= N.float()
loss_c /= N.float()
return loss_l, loss_c
| 41.268908
| 84
| 0.621462
|
a45028330df4b23d33b48cb0e79c2fb683919704
| 2,292
|
py
|
Python
|
lib/googlecloudsdk/core/util/tokenizer.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/core/util/tokenizer.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/core/util/tokenizer.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility for tokenizing strings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
__all__ = ['Literal', 'Separator', 'Tokenize']
_ESCAPE_CHAR = '\\'
class Literal(str):
pass
class Separator(str):
pass
def Tokenize(string, separators):
"""Tokenizes the given string based on a list of separator strings.
This is similar to splitting the string based on separators, except
that this function retains the separators. The separators are
wrapped in Separator objects and everything else is wrapped in
Literal objects.
For example, Partition('a:b,c:d', [':', ',']) returns [Literal('a'),
Separator(':'), Literal('b'), Separator(','), Literal('c'),
Seperator(':'), Literal('d')].
Args:
string: str, The string to partition.
separators: [str], A list of strings on which to partition.
Raises:
ValueError: If an unterminated escape sequence is at the
end of the input.
Returns:
[tuple], A list of strings which can be of types Literal or
Separator.
"""
tokens = []
curr = io.StringIO()
buf = io.StringIO(string)
while True:
c = buf.read(1)
if not c:
break
elif c == _ESCAPE_CHAR:
c = buf.read(1)
if c:
curr.write(c)
continue
else:
raise ValueError('illegal escape sequence at index {0}: {1}'.format(
buf.tell() - 1, string))
elif c in separators:
tokens.append(Literal(curr.getvalue()))
tokens.append(Separator(c))
curr = io.StringIO()
else:
curr.write(c)
tokens.append(Literal(curr.getvalue()))
return tokens
| 26.344828
| 76
| 0.679319
|
072477e321f716ecdda98426e5a0cbf847e0a0a0
| 36,967
|
py
|
Python
|
tests/devices/test_default_mixed.py
|
emildi/pennylane
|
64901ef2f920f42385b65c8da538941ff36da7be
|
[
"Apache-2.0"
] | 1
|
2021-12-07T17:18:26.000Z
|
2021-12-07T17:18:26.000Z
|
tests/devices/test_default_mixed.py
|
emildi/pennylane
|
64901ef2f920f42385b65c8da538941ff36da7be
|
[
"Apache-2.0"
] | null | null | null |
tests/devices/test_default_mixed.py
|
emildi/pennylane
|
64901ef2f920f42385b65c8da538941ff36da7be
|
[
"Apache-2.0"
] | 1
|
2021-12-07T17:18:36.000Z
|
2021-12-07T17:18:36.000Z
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.devices.DefaultMixed` device.
"""
import pytest
import pennylane as qml
from pennylane import QubitStateVector, BasisState, DeviceError
from pennylane.devices import DefaultMixed
from pennylane.ops import (
Identity,
PauliZ,
CZ,
PauliX,
Hadamard,
CNOT,
AmplitudeDamping,
DepolarizingChannel,
ResetError,
)
from pennylane.wires import Wires
import numpy as np
INV_SQRT2 = 1 / np.sqrt(2)
# functions for creating different states used in testing
def basis_state(index, nr_wires):
rho = np.zeros((2 ** nr_wires, 2 ** nr_wires), dtype=np.complex128)
rho[index, index] = 1
return rho
def hadamard_state(nr_wires):
"""Equal superposition state (Hadamard on all qubits)"""
return np.ones((2 ** nr_wires, 2 ** nr_wires), dtype=np.complex128) / (2 ** nr_wires)
def max_mixed_state(nr_wires):
return np.eye(2 ** nr_wires, dtype=np.complex128) / (2 ** nr_wires)
def root_state(nr_wires):
"""Pure state with equal amplitudes but phases equal to roots of unity"""
dim = 2 ** nr_wires
ket = [np.exp(1j * 2 * np.pi * n / dim) / np.sqrt(dim) for n in range(dim)]
return np.outer(ket, np.conj(ket))
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
class TestCreateBasisState:
"""Unit tests for the method `_create_basis_state()`"""
def test_shape(self, nr_wires):
"""Tests that the basis state has the correct shape"""
dev = qml.device("default.mixed", wires=nr_wires)
assert [2] * (2 * nr_wires) == list(np.shape(dev._create_basis_state(0)))
@pytest.mark.parametrize("index", [0, 1])
def test_expected_state(self, nr_wires, index, tol):
"""Tests output basis state against the expected one"""
rho = np.zeros((2 ** nr_wires, 2 ** nr_wires))
rho[index, index] = 1
rho = np.reshape(rho, [2] * (2 * nr_wires))
dev = qml.device("default.mixed", wires=nr_wires)
assert np.allclose(rho, dev._create_basis_state(index), atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [2, 3])
class TestState:
"""Tests for the method `state()`, which retrieves the state of the system"""
def test_shape(self, nr_wires):
"""Tests that the state has the correct shape"""
dev = qml.device("default.mixed", wires=nr_wires)
assert (2 ** nr_wires, 2 ** nr_wires) == np.shape(dev.state)
def test_init_state(self, nr_wires, tol):
"""Tests that the state is |0...0><0...0| after initialization of the device"""
rho = np.zeros((2 ** nr_wires, 2 ** nr_wires))
rho[0, 0] = 1
dev = qml.device("default.mixed", wires=nr_wires)
assert np.allclose(rho, dev.state, atol=tol, rtol=0)
@pytest.mark.parametrize("op", [CNOT, CZ])
def test_state_after_twoqubit(self, nr_wires, op, tol):
"""Tests that state is correctly retrieved after applying two-qubit operations on the
first wires"""
dev = qml.device("default.mixed", wires=nr_wires)
dev.apply([op(wires=[0, 1])])
current_state = np.reshape(dev._state, (2 ** nr_wires, 2 ** nr_wires))
assert np.allclose(dev.state, current_state, atol=tol, rtol=0)
@pytest.mark.parametrize(
"op",
[
AmplitudeDamping(0.5, wires=0),
DepolarizingChannel(0.5, wires=0),
ResetError(0.1, 0.5, wires=0),
],
)
def test_state_after_channel(self, nr_wires, op, tol):
"""Tests that state is correctly retrieved after applying a channel on the first wires"""
dev = qml.device("default.mixed", wires=nr_wires)
dev.apply([op])
current_state = np.reshape(dev._state, (2 ** nr_wires, 2 ** nr_wires))
assert np.allclose(dev.state, current_state, atol=tol, rtol=0)
@pytest.mark.parametrize("op", [PauliX, PauliZ, Hadamard])
def test_state_after_gate(self, nr_wires, op, tol):
"""Tests that state is correctly retrieved after applying operations on the first wires"""
dev = qml.device("default.mixed", wires=nr_wires)
dev.apply([op(wires=0)])
current_state = np.reshape(dev._state, (2 ** nr_wires, 2 ** nr_wires))
assert np.allclose(dev.state, current_state, atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [2, 3])
class TestReset:
"""Unit tests for the method `reset()`"""
def test_reset_basis(self, nr_wires, tol):
dev = qml.device("default.mixed", wires=nr_wires)
dev._state = dev._create_basis_state(1)
dev.reset()
assert np.allclose(dev._state, dev._create_basis_state(0), atol=tol, rtol=0)
@pytest.mark.parametrize("op", [CNOT, CZ])
def test_reset_after_twoqubit(self, nr_wires, op, tol):
"""Tests that state is correctly reset after applying two-qubit operations on the first
wires"""
dev = qml.device("default.mixed", wires=nr_wires)
dev.apply([op(wires=[0, 1])])
dev.reset()
assert np.allclose(dev._state, dev._create_basis_state(0), atol=tol, rtol=0)
@pytest.mark.parametrize(
"op",
[
AmplitudeDamping(0.5, wires=[0]),
DepolarizingChannel(0.5, wires=[0]),
ResetError(0.1, 0.5, wires=[0]),
],
)
def test_reset_after_channel(self, nr_wires, op, tol):
"""Tests that state is correctly reset after applying a channel on the first
wires"""
dev = qml.device("default.mixed", wires=nr_wires)
dev.apply([op])
dev.reset()
assert np.allclose(dev._state, dev._create_basis_state(0), atol=tol, rtol=0)
@pytest.mark.parametrize("op", [PauliX, PauliZ, Hadamard])
def test_reset_after_channel(self, nr_wires, op, tol):
"""Tests that state is correctly reset after applying gates on the first
wire"""
dev = qml.device("default.mixed", wires=nr_wires)
dev.apply([op(wires=0)])
dev.reset()
assert np.allclose(dev._state, dev._create_basis_state(0), atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
class TestAnalyticProb:
"""Unit tests for the method `analytic_probability()`"""
def test_prob_init_state(self, nr_wires, tol):
"""Tests that we obtain the correct probabilities for the state |0...0><0...0|"""
dev = qml.device("default.mixed", wires=nr_wires)
probs = np.zeros(2 ** nr_wires)
probs[0] = 1
assert np.allclose(probs, dev.analytic_probability(), atol=tol, rtol=0)
def test_prob_basis_state(self, nr_wires, tol):
"""Tests that we obtain correct probabilities for the basis state |1...1><1...1|"""
dev = qml.device("default.mixed", wires=nr_wires)
dev._state = dev._create_basis_state(2 ** nr_wires - 1)
probs = np.zeros(2 ** nr_wires)
probs[-1] = 1
assert np.allclose(probs, dev.analytic_probability(), atol=tol, rtol=0)
def test_prob_hadamard(self, nr_wires, tol):
"""Tests that we obtain correct probabilities for the equal superposition state"""
dev = qml.device("default.mixed", wires=nr_wires)
dev._state = hadamard_state(nr_wires)
probs = np.ones(2 ** nr_wires) / (2 ** nr_wires)
assert np.allclose(probs, dev.analytic_probability(), atol=tol, rtol=0)
def test_prob_mixed(self, nr_wires, tol):
"""Tests that we obtain correct probabilities for the maximally mixed state"""
dev = qml.device("default.mixed", wires=nr_wires)
dev._state = max_mixed_state(nr_wires)
probs = np.ones(2 ** nr_wires) / (2 ** nr_wires)
assert np.allclose(probs, dev.analytic_probability(), atol=tol, rtol=0)
def test_prob_root(self, nr_wires, tol):
"""Tests that we obtain correct probabilities for the root state"""
dev = qml.device("default.mixed", wires=nr_wires)
dev._state = root_state(nr_wires)
probs = np.ones(2 ** nr_wires) / (2 ** nr_wires)
assert np.allclose(probs, dev.analytic_probability(), atol=tol, rtol=0)
def test_none_state(self, nr_wires):
"""Tests that return is `None` when the state is `None`"""
dev = qml.device("default.mixed", wires=nr_wires)
dev._state = None
assert dev.analytic_probability() is None
def test_probability_not_negative(self, nr_wires):
"""Test that probabilities are always real"""
dev = qml.device("default.mixed", wires=nr_wires)
dev._state = np.zeros([2 ** nr_wires, 2 ** nr_wires])
dev._state[0, 0] = 1
dev._state[1, 1] = -5e-17
assert np.all(dev.analytic_probability() >= 0)
class TestKrausOps:
"""Unit tests for the method `_get_kraus_ops()`"""
unitary_ops = [
(PauliX, np.array([[0, 1], [1, 0]])),
(Hadamard, np.array([[INV_SQRT2, INV_SQRT2], [INV_SQRT2, -INV_SQRT2]])),
(CNOT, np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])),
]
@pytest.mark.parametrize("ops", unitary_ops)
def test_unitary_kraus(self, ops, tol):
"""Tests that matrices of non-diagonal unitary operations are retrieved correctly"""
dev = qml.device("default.mixed", wires=2)
assert np.allclose(dev._get_kraus(ops[0]), [ops[1]], atol=tol, rtol=0)
diagonal_ops = [
(PauliZ(wires=0), np.array([1, -1])),
(CZ(wires=[0, 1]), np.array([1, 1, 1, -1])),
]
@pytest.mark.parametrize("ops", diagonal_ops)
def test_diagonal_kraus(self, ops, tol):
"""Tests that matrices of non-diagonal unitary operations are retrieved correctly"""
dev = qml.device("default.mixed", wires=2)
assert np.allclose(dev._get_kraus(ops[0]), ops[1], atol=tol, rtol=0)
p = 0.5
p_0, p_1 = 0.1, 0.5
channel_ops = [
(
AmplitudeDamping(p, wires=0),
[np.diag([1, np.sqrt(1 - p)]), np.sqrt(p) * np.array([[0, 1], [0, 0]])],
),
(
DepolarizingChannel(p, wires=0),
[
np.sqrt(1 - p) * np.eye(2),
np.sqrt(p / 3) * np.array([[0, 1], [1, 0]]),
np.sqrt(p / 3) * np.array([[0, -1j], [1j, 0]]),
np.sqrt(p / 3) * np.array([[1, 0], [0, -1]]),
],
),
(
ResetError(p_0, p_1, wires=0),
[
np.sqrt(1 - p_0 - p_1) * np.eye(2),
np.sqrt(p_0) * np.array([[1, 0], [0, 0]]),
np.sqrt(p_0) * np.array([[0, 1], [0, 0]]),
np.sqrt(p_1) * np.array([[0, 0], [1, 0]]),
np.sqrt(p_1) * np.array([[0, 0], [0, 1]]),
],
),
]
@pytest.mark.parametrize("ops", channel_ops)
def test_channel_kraus(self, ops, tol):
"""Tests that kraus matrices of non-unitary channels are retrieved correctly"""
dev = qml.device("default.mixed", wires=1)
assert np.allclose(dev._get_kraus(ops[0]), ops[1], atol=tol, rtol=0)
class TestApplyChannel:
"""Unit tests for the method `_apply_channel()`"""
x_apply_channel_init = [
[1, PauliX, basis_state(1, 1)],
[1, Hadamard, np.array([[0.5 + 0.0j, 0.5 + 0.0j], [0.5 + 0.0j, 0.5 + 0.0j]])],
[2, CNOT, basis_state(0, 2)],
[1, AmplitudeDamping(0.5, wires=0), basis_state(0, 1)],
[
1,
DepolarizingChannel(0.5, wires=0),
np.array([[2 / 3 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 1 / 3 + 0.0j]]),
],
[
1,
ResetError(0.1, 0.5, wires=0),
np.array([[0.5 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.5 + 0.0j]]),
],
]
@pytest.mark.parametrize("x", x_apply_channel_init)
def test_channel_init(self, x, tol):
"""Tests that channels are correctly applied to the default initial state"""
nr_wires = x[0]
op = x[1]
target_state = np.reshape(x[2], [2] * 2 * nr_wires)
dev = qml.device("default.mixed", wires=nr_wires)
kraus = dev._get_kraus(op)
if op == CNOT:
dev._apply_channel(kraus, wires=Wires([0, 1]))
else:
dev._apply_channel(kraus, wires=Wires(0))
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
x_apply_channel_mixed = [
[1, PauliX, max_mixed_state(1)],
[2, Hadamard, max_mixed_state(2)],
[2, CNOT, max_mixed_state(2)],
[
1,
AmplitudeDamping(0.5, wires=0),
np.array([[0.75 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.25 + 0.0j]]),
],
[
1,
DepolarizingChannel(0.5, wires=0),
np.array([[0.5 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.5 + 0.0j]]),
],
[
1,
ResetError(0.1, 0.5, wires=0),
np.array([[0.3 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.7 + 0.0j]]),
],
]
@pytest.mark.parametrize("x", x_apply_channel_mixed)
def test_channel_mixed(self, x, tol):
"""Tests that channels are correctly applied to the maximally mixed state"""
nr_wires = x[0]
op = x[1]
target_state = np.reshape(x[2], [2] * 2 * nr_wires)
dev = qml.device("default.mixed", wires=nr_wires)
max_mixed = np.reshape(max_mixed_state(nr_wires), [2] * 2 * nr_wires)
dev._state = max_mixed
kraus = dev._get_kraus(op)
if op == CNOT:
dev._apply_channel(kraus, wires=Wires([0, 1]))
else:
dev._apply_channel(kraus, wires=Wires(0))
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
x_apply_channel_root = [
[1, PauliX, np.array([[0.5 + 0.0j, -0.5 + 0.0j], [-0.5 - 0.0j, 0.5 + 0.0j]])],
[1, Hadamard, np.array([[0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 1.0 + 0.0j]])],
[
2,
CNOT,
np.array(
[
[0.25 + 0.0j, 0.0 - 0.25j, 0.0 + 0.25j, -0.25],
[0.0 + 0.25j, 0.25 + 0.0j, -0.25 + 0.0j, 0.0 - 0.25j],
[0.0 - 0.25j, -0.25 + 0.0j, 0.25 + 0.0j, 0.0 + 0.25j],
[-0.25 + 0.0j, 0.0 + 0.25j, 0.0 - 0.25j, 0.25 + 0.0j],
]
),
],
[
1,
AmplitudeDamping(0.5, wires=0),
np.array([[0.75 + 0.0j, -0.35355339 - 0.0j], [-0.35355339 + 0.0j, 0.25 + 0.0j]]),
],
[
1,
DepolarizingChannel(0.5, wires=0),
np.array([[0.5 + 0.0j, -1 / 6 + 0.0j], [-1 / 6 + 0.0j, 0.5 + 0.0j]]),
],
[
1,
ResetError(0.1, 0.5, wires=0),
np.array([[0.3 + 0.0j, -0.2 + 0.0j], [-0.2 + 0.0j, 0.7 + 0.0j]]),
],
]
@pytest.mark.parametrize("x", x_apply_channel_root)
def test_channel_root(self, x, tol):
"""Tests that channels are correctly applied to root state"""
nr_wires = x[0]
op = x[1]
target_state = np.reshape(x[2], [2] * 2 * nr_wires)
dev = qml.device("default.mixed", wires=nr_wires)
root = np.reshape(root_state(nr_wires), [2] * 2 * nr_wires)
dev._state = root
kraus = dev._get_kraus(op)
if op == CNOT:
dev._apply_channel(kraus, wires=Wires([0, 1]))
else:
dev._apply_channel(kraus, wires=Wires(0))
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
class TestApplyDiagonal:
"""Unit tests for the method `_apply_diagonal_unitary()`"""
x_apply_diag_init = [[1, PauliZ, basis_state(0, 1)], [2, CZ, basis_state(0, 2)]]
@pytest.mark.parametrize("x", x_apply_diag_init)
def test_diag_init(self, x, tol):
"""Tests that diagonal gates are correctly applied to the default initial state"""
nr_wires = x[0]
op = x[1]
target_state = np.reshape(x[2], [2] * 2 * nr_wires)
dev = qml.device("default.mixed", wires=nr_wires)
kraus = dev._get_kraus(op)
if op == CZ:
dev._apply_diagonal_unitary(kraus, wires=Wires([0, 1]))
else:
dev._apply_diagonal_unitary(kraus, wires=Wires(0))
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
x_apply_diag_mixed = [[1, PauliZ, max_mixed_state(1)], [2, CZ, max_mixed_state(2)]]
@pytest.mark.parametrize("x", x_apply_diag_mixed)
def test_diag_mixed(self, x, tol):
"""Tests that diagonal gates are correctly applied to the maximally mixed state"""
nr_wires = x[0]
op = x[1]
target_state = np.reshape(x[2], [2] * 2 * nr_wires)
dev = qml.device("default.mixed", wires=nr_wires)
max_mixed = np.reshape(max_mixed_state(nr_wires), [2] * 2 * nr_wires)
dev._state = max_mixed
kraus = dev._get_kraus(op)
if op == CZ:
dev._apply_diagonal_unitary(kraus, wires=Wires([0, 1]))
else:
dev._apply_diagonal_unitary(kraus, wires=Wires(0))
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
x_apply_diag_root = [
[1, PauliZ, np.array([[0.5, 0.5], [0.5, 0.5]])],
[
2,
CZ,
np.array(
[
[0.25, -0.25j, -0.25, -0.25j],
[0.25j, 0.25, -0.25j, 0.25],
[-0.25, 0.25j, 0.25, 0.25j],
[0.25j, 0.25, -0.25j, 0.25],
]
),
],
]
@pytest.mark.parametrize("x", x_apply_diag_root)
def test_diag_root(self, x, tol):
"""Tests that diagonal gates are correctly applied to root state"""
nr_wires = x[0]
op = x[1]
target_state = np.reshape(x[2], [2] * 2 * nr_wires)
dev = qml.device("default.mixed", wires=nr_wires)
root = np.reshape(root_state(nr_wires), [2] * 2 * nr_wires)
dev._state = root
kraus = dev._get_kraus(op)
if op == CZ:
dev._apply_diagonal_unitary(kraus, wires=Wires([0, 1]))
else:
dev._apply_diagonal_unitary(kraus, wires=Wires(0))
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
class TestApplyBasisState:
"""Unit tests for the method `_apply_basis_state"""
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_all_ones(self, nr_wires, tol):
"""Tests that the state |11...1> is applied correctly"""
dev = qml.device("default.mixed", wires=nr_wires)
state = np.ones(nr_wires)
dev._apply_basis_state(state, wires=Wires(range(nr_wires)))
b_state = basis_state(2 ** nr_wires - 1, nr_wires)
target_state = np.reshape(b_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
fixed_states = [[3, np.array([0, 1, 1])], [5, np.array([1, 0, 1])], [6, np.array([1, 1, 0])]]
@pytest.mark.parametrize("state", fixed_states)
def test_fixed_states(self, state, tol):
"""Tests that different basis states are applied correctly"""
nr_wires = 3
dev = qml.device("default.mixed", wires=nr_wires)
dev._apply_basis_state(state[1], wires=Wires(range(nr_wires)))
b_state = basis_state(state[0], nr_wires)
target_state = np.reshape(b_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
wire_subset = [(6, [0, 1]), (5, [0, 2]), (3, [1, 2])]
@pytest.mark.parametrize("wires", wire_subset)
def test_subset_wires(self, wires, tol):
"""Tests that different basis states are applied correctly when applied to a subset of
wires"""
nr_wires = 3
dev = qml.device("default.mixed", wires=nr_wires)
state = np.ones(2)
dev._apply_basis_state(state, wires=Wires(wires[1]))
b_state = basis_state(wires[0], nr_wires)
target_state = np.reshape(b_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
def test_wrong_dim(self):
"""Checks that an error is raised if state has the wrong dimension"""
dev = qml.device("default.mixed", wires=3)
state = np.ones(2)
with pytest.raises(ValueError, match="BasisState parameter and wires"):
dev._apply_basis_state(state, wires=Wires(range(3)))
def test_not_01(self):
"""Checks that an error is raised if state doesn't have entries in {0,1}"""
dev = qml.device("default.mixed", wires=2)
state = np.array([INV_SQRT2, INV_SQRT2])
with pytest.raises(ValueError, match="BasisState parameter must"):
dev._apply_basis_state(state, wires=Wires(range(2)))
class TestApplyStateVector:
"""Unit tests for the method `_apply_state_vector()`"""
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_apply_equal(self, nr_wires, tol):
"""Checks that an equal superposition state is correctly applied"""
dev = qml.device("default.mixed", wires=nr_wires)
state = np.ones(2 ** nr_wires) / np.sqrt(2 ** nr_wires)
dev._apply_state_vector(state, Wires(range(nr_wires)))
eq_state = hadamard_state(nr_wires)
target_state = np.reshape(eq_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_apply_root(self, nr_wires, tol):
"""Checks that a root state is correctly applied"""
dev = qml.device("default.mixed", wires=nr_wires)
dim = 2 ** nr_wires
state = np.array([np.exp(1j * 2 * np.pi * n / dim) / np.sqrt(dim) for n in range(dim)])
dev._apply_state_vector(state, Wires(range(nr_wires)))
r_state = root_state(nr_wires)
target_state = np.reshape(r_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
subset_wires = [(4, 0), (2, 1), (1, 2)]
@pytest.mark.parametrize("wires", subset_wires)
def test_subset_wires(self, wires, tol):
"""Tests that applying state |1> on each individual single wire prepares the correct basis
state"""
nr_wires = 3
dev = qml.device("default.mixed", wires=nr_wires)
state = np.array([0, 1])
dev._apply_state_vector(state, Wires(wires[1]))
b_state = basis_state(wires[0], nr_wires)
target_state = np.reshape(b_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
def test_wrong_dim(self):
"""Checks that an error is raised if state has the wrong dimension"""
dev = qml.device("default.mixed", wires=3)
state = np.ones(7) / np.sqrt(7)
with pytest.raises(ValueError, match="State vector must be"):
dev._apply_state_vector(state, Wires(range(3)))
def test_not_normalized(self):
"""Checks that an error is raised if state is not normalized"""
dev = qml.device("default.mixed", wires=3)
state = np.ones(8) / np.sqrt(7)
with pytest.raises(ValueError, match="Sum of amplitudes"):
dev._apply_state_vector(state, Wires(range(3)))
def test_wires_as_list(self, tol):
"""Checks that state is correctly prepared when device wires are given as a list,
not a number. This test helps with coverage"""
nr_wires = 2
dev = qml.device("default.mixed", wires=[0, 1])
state = np.ones(2 ** nr_wires) / np.sqrt(2 ** nr_wires)
dev._apply_state_vector(state, Wires(range(nr_wires)))
eq_state = hadamard_state(nr_wires)
target_state = np.reshape(eq_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
class TestApplyDensityMatrix:
"""Unit tests for the method `_apply_density_matrix()`"""
def test_instantiate_density_mat(self, tol):
"""Checks that the specific density matrix is initialized"""
dev = qml.device("default.mixed", wires=2)
initialize_state = basis_state(1, 2)
@qml.qnode(dev)
def circuit():
qml.QubitDensityMatrix(initialize_state, wires=[0, 1])
return qml.state()
final_state = circuit()
assert np.allclose(final_state, initialize_state, atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_apply_equal(self, nr_wires, tol):
"""Checks that an equal superposition state is correctly applied"""
dev = qml.device("default.mixed", wires=nr_wires)
state = np.ones(2 ** nr_wires) / np.sqrt(2 ** nr_wires)
rho = np.outer(state, state.conj())
dev._apply_density_matrix(rho, Wires(range(nr_wires)))
eq_state = hadamard_state(nr_wires)
target_state = np.reshape(eq_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_apply_root(self, nr_wires, tol):
"""Checks that a root state is correctly applied"""
dev = qml.device("default.mixed", wires=nr_wires)
dim = 2 ** nr_wires
state = np.array([np.exp(1j * 2 * np.pi * n / dim) / np.sqrt(dim) for n in range(dim)])
rho = np.outer(state, state.conj())
dev._apply_density_matrix(rho, Wires(range(nr_wires)))
r_state = root_state(nr_wires)
target_state = np.reshape(r_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
subset_wires = [(4, 0), (2, 1), (1, 2)]
@pytest.mark.parametrize("wires", subset_wires)
def test_subset_wires_with_filling_remaining(self, wires, tol):
"""Tests that applying state |1><1| on a subset of wires prepares the correct state
|1><1| ⊗ |0><0|"""
nr_wires = 3
dev = qml.device("default.mixed", wires=nr_wires)
state = np.array([0, 1])
rho = np.outer(state, state.conj())
dev._apply_density_matrix(rho, Wires(wires[1]))
b_state = basis_state(wires[0], nr_wires)
target_state = np.reshape(b_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
subset_wires = [(7, (0, 1, 2), ()), (5, (0, 2), (1,)), (6, (0, 1), (2,))]
@pytest.mark.parametrize("wires", subset_wires)
def test_subset_wires_without_filling_remaining(self, wires, tol):
"""Tests that does nothing |1><1| on a subset of wires prepares the correct state
|1><1| ⊗ ρ if `fill_remaining=False`"""
nr_wires = 3
dev = qml.device("default.mixed", wires=nr_wires)
state0 = np.array([1, 0])
rho0 = np.outer(state0, state0.conj())
state1 = np.array([0, 1])
rho1 = np.outer(state1, state1.conj())
for wire in wires[1]:
dev._apply_density_matrix(rho1, Wires(wire))
for wire in wires[2]:
dev._apply_density_matrix(rho0, Wires(wire))
b_state = basis_state(wires[0], nr_wires)
target_state = np.reshape(b_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
def test_wrong_dim(self):
"""Checks that an error is raised if state has the wrong dimension"""
dev = qml.device("default.mixed", wires=3)
state = np.ones(7) / np.sqrt(7)
rho = np.outer(state, state.conj())
with pytest.raises(ValueError, match="Density matrix must be"):
dev._apply_density_matrix(rho, Wires(range(3)))
def test_not_normalized(self):
"""Checks that an error is raised if state is not normalized"""
dev = qml.device("default.mixed", wires=3)
state = np.ones(8) / np.sqrt(7)
rho = np.outer(state, state.conj())
with pytest.raises(ValueError, match="Trace of density matrix"):
dev._apply_density_matrix(rho, Wires(range(3)))
def test_wires_as_list(self, tol):
"""Checks that state is correctly prepared when device wires are given as a list,
not a number. This test helps with coverage"""
nr_wires = 2
dev = qml.device("default.mixed", wires=[0, 1])
state = np.ones(2 ** nr_wires) / np.sqrt(2 ** nr_wires)
rho = np.outer(state, state.conj())
dev._apply_density_matrix(rho, Wires(range(nr_wires)))
eq_state = hadamard_state(nr_wires)
target_state = np.reshape(eq_state, [2] * 2 * nr_wires)
assert np.allclose(dev._state, target_state, atol=tol, rtol=0)
class TestApplyOperation:
"""Unit tests for the method `_apply_operation()`. Since this just calls `_apply_channel()`
and `_apply_diagonal_unitary()`, we just check that the correct method is called"""
def test_diag_apply_op(self, mocker):
"""Tests that when applying a diagonal gate, only `_apply_diagonal_unitary` is called,
exactly once"""
spy_channel = mocker.spy(DefaultMixed, "_apply_channel")
spy_diag = mocker.spy(DefaultMixed, "_apply_diagonal_unitary")
dev = qml.device("default.mixed", wires=1)
dev._apply_operation(PauliZ(0))
spy_channel.assert_not_called()
spy_diag.assert_called_once()
def test_channel_apply_op(self, mocker):
"""Tests that when applying a non-diagonal gate, only `_apply_channel` is called,
exactly once"""
spy_channel = mocker.spy(DefaultMixed, "_apply_channel")
spy_diag = mocker.spy(DefaultMixed, "_apply_diagonal_unitary")
dev = qml.device("default.mixed", wires=1)
dev._apply_operation(PauliX(0))
spy_diag.assert_not_called()
spy_channel.assert_called_once()
class TestApply:
"""Unit tests for the main method `apply()`. We check that lists of operations are applied
correctly, rather than single operations"""
ops_and_true_state = [(None, basis_state(0, 2)), (Hadamard, hadamard_state(2))]
@pytest.mark.parametrize("op, true_state", ops_and_true_state)
def test_identity(self, op, true_state, tol):
"""Tests that applying the identity operator doesn't change the state"""
num_wires = 2
dev = qml.device("default.mixed", wires=num_wires) # prepare basis state
if op is not None:
ops = [op(i) for i in range(num_wires)]
dev.apply(ops)
# Apply Identity:
dev.apply([Identity(i) for i in range(num_wires)])
assert np.allclose(dev.state, true_state, atol=tol, rtol=0)
def test_bell_state(self, tol):
"""Tests that we correctly prepare a Bell state by applying a Hadamard then a CNOT"""
dev = qml.device("default.mixed", wires=2)
ops = [Hadamard(0), CNOT(wires=[0, 1])]
dev.apply(ops)
bell = np.zeros((4, 4))
bell[0, 0] = bell[0, 3] = bell[3, 0] = bell[3, 3] = 1 / 2
assert np.allclose(bell, dev.state, atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_hadamard_state(self, nr_wires, tol):
"""Tests that applying Hadamard gates on all qubits produces an equal superposition over
all basis states"""
dev = qml.device("default.mixed", wires=nr_wires)
ops = [Hadamard(i) for i in range(nr_wires)]
dev.apply(ops)
assert np.allclose(dev.state, hadamard_state(nr_wires), atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_max_mixed_state(self, nr_wires, tol):
"""Tests that applying damping channel on all qubits to the state |11...1> produces a
maximally mixed state"""
dev = qml.device("default.mixed", wires=nr_wires)
flips = [PauliX(i) for i in range(nr_wires)]
damps = [AmplitudeDamping(0.5, wires=i) for i in range(nr_wires)]
ops = flips + damps
dev.apply(ops)
assert np.allclose(dev.state, max_mixed_state(nr_wires), atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_undo_rotations(self, nr_wires, tol):
"""Tests that rotations are correctly applied by adding their inverse as initial
operations"""
dev = qml.device("default.mixed", wires=nr_wires)
ops = [Hadamard(i) for i in range(nr_wires)]
rots = ops
dev.apply(ops, rots)
basis = np.reshape(basis_state(0, nr_wires), [2] * (2 * nr_wires))
# dev.state = pre-rotated state, dev._state = state after rotations
assert np.allclose(dev.state, hadamard_state(nr_wires), atol=tol, rtol=0)
assert np.allclose(dev._state, basis, atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_apply_basis_state(self, nr_wires, tol):
"""Tests that we correctly apply a `BasisState` operation for the |11...1> state"""
dev = qml.device("default.mixed", wires=nr_wires)
state = np.ones(nr_wires)
dev.apply([BasisState(state, wires=range(nr_wires))])
assert np.allclose(dev.state, basis_state(2 ** nr_wires - 1, nr_wires), atol=tol, rtol=0)
@pytest.mark.parametrize("nr_wires", [1, 2, 3])
def test_apply_state_vector(self, nr_wires, tol):
"""Tests that we correctly apply a `QubitStateVector` operation for the root state"""
dev = qml.device("default.mixed", wires=nr_wires)
dim = 2 ** nr_wires
state = np.array([np.exp(1j * 2 * np.pi * n / dim) / np.sqrt(dim) for n in range(dim)])
dev.apply([QubitStateVector(state, wires=range(nr_wires))])
assert np.allclose(dev.state, root_state(nr_wires), atol=tol, rtol=0)
def test_apply_state_vector_wires(self, tol):
"""Tests that we correctly apply a `QubitStateVector` operation for the root state when
wires are passed as an ordered list"""
nr_wires = 3
dev = qml.device("default.mixed", wires=[0, 1, 2])
dim = 2 ** nr_wires
state = np.array([np.exp(1j * 2 * np.pi * n / dim) / np.sqrt(dim) for n in range(dim)])
dev.apply([QubitStateVector(state, wires=[0, 1, 2])])
assert np.allclose(dev.state, root_state(nr_wires), atol=tol, rtol=0)
def test_raise_order_error_basis_state(self):
"""Tests that an error is raised if a state is prepared after BasisState has been
applied"""
dev = qml.device("default.mixed", wires=1)
state = np.array([0])
ops = [PauliX(0), BasisState(state, wires=0)]
with pytest.raises(DeviceError, match="Operation"):
dev.apply(ops)
def test_raise_order_error_qubit_state(self):
"""Tests that an error is raised if a state is prepared after QubitStateVector has been
applied"""
dev = qml.device("default.mixed", wires=1)
state = np.array([1, 0])
ops = [PauliX(0), QubitStateVector(state, wires=0)]
with pytest.raises(DeviceError, match="Operation"):
dev.apply(ops)
def test_apply_toffoli(self, tol):
"""Tests that Toffoli gate is correctly applied on state |111> to give state |110>"""
nr_wires = 3
dev = qml.device("default.mixed", wires=nr_wires)
dev.apply([PauliX(0), PauliX(1), PauliX(2), qml.Toffoli(wires=[0, 1, 2])])
assert np.allclose(dev.state, basis_state(6, 3), atol=tol, rtol=0)
def test_apply_qubitunitary(self, tol):
"""Tests that custom qubit unitary is correctly applied"""
nr_wires = 1
theta = 0.42
U = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
dev = qml.device("default.mixed", wires=nr_wires)
dev.apply([qml.QubitUnitary(U, wires=[0])])
ket = np.array([np.cos(theta) + 0j, np.sin(theta) + 0j])
target_rho = np.outer(ket, np.conj(ket))
assert np.allclose(dev.state, target_rho, atol=tol, rtol=0)
class TestInit:
"""Tests related to device initializtion"""
def test_nr_wires(self):
"""Tests that an error is raised if the device is initialized with more than 23 wires"""
with pytest.raises(ValueError, match="This device does not currently"):
qml.device("default.mixed", wires=24)
def test_analytic_deprecation(self):
"""Tests if the kwarg `analytic` is used and displays error message."""
msg = "The analytic argument has been replaced by shots=None. "
msg += "Please use shots=None instead of analytic=True."
with pytest.raises(
DeviceError,
match=msg,
):
qml.device("default.mixed", wires=1, shots=1, analytic=True)
| 40.050921
| 98
| 0.605026
|
0d5bc254e5d558a962c96f7a2dae1a5f2956414c
| 79
|
py
|
Python
|
core/__init__.py
|
corenel/pytorch-starter-kit
|
f09e3193d4e03e0d018f66158e3d426cb8766e22
|
[
"MIT"
] | 2
|
2019-06-07T05:22:49.000Z
|
2020-07-22T10:11:26.000Z
|
core/__init__.py
|
corenel/pytorch-starter-kit
|
f09e3193d4e03e0d018f66158e3d426cb8766e22
|
[
"MIT"
] | null | null | null |
core/__init__.py
|
corenel/pytorch-starter-kit
|
f09e3193d4e03e0d018f66158e3d426cb8766e22
|
[
"MIT"
] | null | null | null |
from .demo import demo
from .evaluate import evaluate
from .train import train
| 19.75
| 30
| 0.810127
|
a68e4d6dad7169da07517402fa872099e63dbfa4
| 6,752
|
py
|
Python
|
frappe/core/doctype/event/event.py
|
pawaranand/phr_frappe
|
d997ae7d6fbade4b2c4a2491603d988876dfd67e
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/event/event.py
|
pawaranand/phr_frappe
|
d997ae7d6fbade4b2c4a2491603d988876dfd67e
|
[
"MIT"
] | 1
|
2015-07-11T20:52:38.000Z
|
2019-12-06T15:00:58.000Z
|
frappe/core/doctype/event/event.py
|
pawaranand/phr_frappe
|
d997ae7d6fbade4b2c4a2491603d988876dfd67e
|
[
"MIT"
] | 2
|
2015-09-05T05:30:23.000Z
|
2018-03-21T19:45:10.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, cint, add_months, date_diff, add_days, nowdate
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.model.document import Document
weekdays = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
class Event(Document):
def validate(self):
if self.starts_on and self.ends_on and self.starts_on > self.ends_on:
frappe.msgprint(frappe._("Event end must be after start"), raise_exception=True)
if self.starts_on == self.ends_on:
# this scenario doesn't make sense i.e. it starts and ends at the same second!
self.ends_on = None
if self.starts_on and self.ends_on and int(date_diff(self.ends_on.split(" ")[0], self.starts_on.split(" ")[0])) > 0 \
and self.repeat_on == "Every Day":
frappe.msgprint(frappe._("Every day events should finish on the same day."), raise_exception=True)
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
return """(tabEvent.event_type='Public' or tabEvent.owner='%(user)s'
or exists(select * from `tabEvent User` where
`tabEvent User`.parent=tabEvent.name and `tabEvent User`.person='%(user)s')
or exists(select * from `tabEvent Role` where
`tabEvent Role`.parent=tabEvent.name
and `tabEvent Role`.role in ('%(roles)s')))
""" % {
"user": frappe.db.escape(user),
"roles": "', '".join([frappe.db.escape(r) for r in frappe.get_roles(user)])
}
def has_permission(doc, user):
if doc.event_type=="Public" or doc.owner==user:
return True
if doc.get("event_individuals", {"person": user}):
return True
if doc.get("event_roles", {"role":("in", frappe.get_roles(user))}):
return True
return False
def send_event_digest():
today = nowdate()
for user in frappe.db.sql("""select name, email, language
from tabUser where ifnull(enabled,0)=1
and user_type='System User' and name not in ({})""".format(", ".join(["%s"]*len(STANDARD_USERS))),
STANDARD_USERS, as_dict=1):
events = get_events(today, today, user.name, for_reminder=True)
if events:
text = ""
frappe.set_user_lang(user.name, user.language)
text = "<h3>" + frappe._("Events In Today's Calendar") + "</h3>"
for e in events:
if e.all_day:
e.starts_on = "All Day"
text += "<h4>%(starts_on)s: %(subject)s</h4><p>%(description)s</p>" % e
text += '<p style="color: #888; font-size: 80%; margin-top: 20px; padding-top: 10px; border-top: 1px solid #eee;">'\
+ frappe._("Daily Event Digest is sent for Calendar Events where reminders are set.")+'</p>'
from frappe.utils.email_lib import sendmail
sendmail(recipients=user.email, subject=frappe._("Upcoming Events for Today"),
msg = text)
@frappe.whitelist()
def get_events(start, end, user=None, for_reminder=False):
if not user:
user = frappe.session.user
roles = frappe.get_roles(user)
events = frappe.db.sql("""select name, subject, description,
starts_on, ends_on, owner, all_day, event_type, repeat_this_event, repeat_on,repeat_till,
monday, tuesday, wednesday, thursday, friday, saturday, sunday
from tabEvent where ((
(date(starts_on) between date('%(start)s') and date('%(end)s'))
or (date(ends_on) between date('%(start)s') and date('%(end)s'))
or (date(starts_on) <= date('%(start)s') and date(ends_on) >= date('%(end)s'))
) or (
date(starts_on) <= date('%(start)s') and ifnull(repeat_this_event,0)=1 and
ifnull(repeat_till, "3000-01-01") > date('%(start)s')
))
%(reminder_condition)s
and (event_type='Public' or owner='%(user)s'
or exists(select * from `tabEvent User` where
`tabEvent User`.parent=tabEvent.name and person='%(user)s')
or exists(select * from `tabEvent Role` where
`tabEvent Role`.parent=tabEvent.name
and `tabEvent Role`.role in ('%(roles)s')))
order by starts_on""" % {
"start": start,
"end": end,
"reminder_condition": "and ifnull(send_reminder,0)=1" if for_reminder else "",
"user": user,
"roles": "', '".join(roles)
}, as_dict=1)
# process recurring events
start = start.split(" ")[0]
end = end.split(" ")[0]
add_events = []
remove_events = []
def add_event(e, date):
new_event = e.copy()
enddate = add_days(date,int(date_diff(e.ends_on.split(" ")[0], e.starts_on.split(" ")[0]))) \
if (e.starts_on and e.ends_on) else date
new_event.starts_on = date + " " + e.starts_on.split(" ")[1]
if e.ends_on:
new_event.ends_on = enddate + " " + e.ends_on.split(" ")[1]
add_events.append(new_event)
for e in events:
if e.repeat_this_event:
event_start, time_str = e.starts_on.split(" ")
if e.repeat_till == None or "":
repeat = "3000-01-01"
else:
repeat = e.repeat_till
if e.repeat_on=="Every Year":
start_year = cint(start.split("-")[0])
end_year = cint(end.split("-")[0])
event_start = "-".join(event_start.split("-")[1:])
# repeat for all years in period
for year in range(start_year, end_year+1):
date = str(year) + "-" + event_start
if date >= start and date <= end and date <= repeat:
add_event(e, date)
remove_events.append(e)
if e.repeat_on=="Every Month":
date = start.split("-")[0] + "-" + start.split("-")[1] + "-" + event_start.split("-")[2]
# last day of month issue, start from prev month!
try:
getdate(date)
except ValueError:
date = date.split("-")
date = date[0] + "-" + str(cint(date[1]) - 1) + "-" + date[2]
start_from = date
for i in xrange(int(date_diff(end, start) / 30) + 3):
if date >= start and date <= end and date <= repeat and date >= event_start:
add_event(e, date)
date = add_months(start_from, i+1)
remove_events.append(e)
if e.repeat_on=="Every Week":
weekday = getdate(event_start).weekday()
# monday is 0
start_weekday = getdate(start).weekday()
# start from nearest weeday after last monday
date = add_days(start, weekday - start_weekday)
for cnt in xrange(int(date_diff(end, start) / 7) + 3):
if date >= start and date <= end and date <= repeat and date >= event_start:
add_event(e, date)
date = add_days(date, 7)
remove_events.append(e)
if e.repeat_on=="Every Day":
for cnt in xrange(date_diff(end, start) + 1):
date = add_days(start, cnt)
if date >= event_start and date <= end and date <= repeat \
and e[weekdays[getdate(date).weekday()]]:
add_event(e, date)
remove_events.append(e)
for e in remove_events:
events.remove(e)
events = events + add_events
for e in events:
# remove weekday properties (to reduce message size)
for w in weekdays:
del e[w]
return events
| 34.274112
| 119
| 0.662767
|
ed0d5b80508ca11e43f580da5b495d4ba7bb3483
| 4,566
|
py
|
Python
|
Analyses/analysesMaster.py
|
alexgonzl/TreeMazeAnalyses
|
a834dc6b59beffe6bce59cdd9749b761fab3fe08
|
[
"MIT"
] | null | null | null |
Analyses/analysesMaster.py
|
alexgonzl/TreeMazeAnalyses
|
a834dc6b59beffe6bce59cdd9749b761fab3fe08
|
[
"MIT"
] | null | null | null |
Analyses/analysesMaster.py
|
alexgonzl/TreeMazeAnalyses
|
a834dc6b59beffe6bce59cdd9749b761fab3fe08
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from scipy import signal, ndimage, interpolate, stats
from itertools import combinations
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.ticker import FormatStrFormatter
from matplotlib.offsetbox import AnchoredText
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
import statsmodels.formula.api as smf
from joblib import Parallel, delayed
import seaborn as sns
font = {'family' : 'sans-serif',
'size' : 20}
plt.rc('font', **font)
plt.rc('text',usetex=False)
from pathlib import Path
import os,sys, time
import h5py, json
import pickle as pkl
sys.path.append('../PreProcessing/')
sys.path.append('../TrackingAnalyses/')
sys.path.append('../Lib/')
sys.path.append('../Analyses/')
import TreeMazeFunctions as TMF
import spike_functions as SF
import zone_analyses_session as ZA
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.ticker import FormatStrFormatter
from matplotlib.offsetbox import AnchoredText
import nept
sys.path.append('../PreProcessing/')
sys.path.append('../TrackingAnalyses/')
sys.path.append('../Lib/')
sys.path.append('../Analyses/')
from filters_ag import *
from importlib import reload # Python 3.4+ only.
import pre_process_neuralynx as PPN
import TreeMazeFunctions as TMF
import spike_functions as SF
import spatial_tuning as ST
import stats_functions as StatsF
import plot_functions as PF
import TrialAnalyses as TA
def getSessionPaths(rootPath, session,step=0.02,SR=32000):
tmp = session.split('_')
animal = tmp[0]
task = tmp[1]
date = tmp[2]
Paths = {}
Paths['session'] = session
Paths['animal']=animal
Paths['task'] = task
Paths['date'] = date
Paths['step'] = step
Paths['SR'] = SR
Paths['Clusters'] = rootPath['Clustered'] / animal /(session+'_KSClusters')
Paths['Raw'] = rootPath['Raw'] / animal / session
Paths['PreProcessed'] = rootPath['PreProcessed'] / animal / (session + '_Results')
Paths['ClusterTable'] = rootPath['Clustered'] / animal / (animal+'_ClusteringSummary.json')
Paths['Analyses'] = rootPath['Analyses'] / animal/ (session + '_Analyses')
if not Paths['Clusters'].exists():
print('Error, no Cluster Folder found.')
if not Paths['PreProcessed'].exists():
print('Error, no processed binaries found.')
if not Paths['ClusterTable'].exists():
print('Error, no clustering table found.')
Paths['Analyses'].mkdir(parents=True, exist_ok=True)
Paths['BehavTrackDat'] = Paths['Analyses'] / ('BehTrackVariables_{}ms.h5'.format(int(step*1000)))
Paths['Cell_Spikes'] = Paths['Analyses'] / 'Cell_Spikes.json'
Paths['Cell_Bin_Spikes'] = Paths['Analyses'] / ('Cell_Bin_Spikes_{}ms.npy'.format(int(step*1000)))
Paths['Cell_FR'] = Paths['Analyses'] / ('Cell_FR_{}ms.npy'.format(int(step*1000)))
Paths['Mua_Spikes'] = Paths['Analyses'] / 'Mua_Spikes.json'
Paths['Mua_Bin_Spikes'] = Paths['Analyses'] / ('Mua_Bin_Spikes_{}ms.npy'.format(int(step*1000)))
Paths['Mua_FR'] = Paths['Analyses'] / ('Mua_FR_{}ms.npy'.format(int(step*1000)))
Paths['Spike_IDs'] = Paths['Analyses'] / 'Spike_IDs.json'
Paths['ZoneAnalyses'] = Paths['Analyses'] / 'ZoneAnalyses.pkl'
Paths['TrialInfo'] = Paths['Analyses'] / 'TrInfo.pkl'
Paths['TrialCondMat'] = Paths['Analyses'] / 'TrialCondMat.csv'
Paths['TrLongPosMat'] = Paths['Analyses'] / 'TrLongPosMat.csv'
Paths['TrLongPosFRDat'] = Paths['Analyses'] / 'TrLongPosFRDat.csv'
Paths['TrModelFits'] = Paths['Analyses'] / 'TrModelFits.csv'
# plots directories
Paths['Plots'] = Paths['Analyses'] / 'Plots'
Paths['Plots'].mkdir(parents=True, exist_ok=True)
Paths['SampCountsPlots'] = Paths['Plots'] / 'SampCountsPlots'
Paths['SampCountsPlots'].mkdir(parents=True, exist_ok=True)
Paths['ZoneFRPlots'] = Paths['Plots'] / 'ZoneFRPlots'
Paths['ZoneFRPlots'].mkdir(parents=True, exist_ok=True)
Paths['ZoneCorrPlots'] = Paths['Plots'] / 'ZoneCorrPlots'
Paths['ZoneCorrPlots'].mkdir(parents=True, exist_ok=True)
Paths['SIPlots'] = Paths['Plots'] / 'SIPlots'
Paths['SIPlots'].mkdir(parents=True, exist_ok=True)
Paths['TrialPlots'] = Paths['Plots'] / 'TrialPlots'
Paths['TrialPlots'].mkdir(parents=True, exist_ok=True)
return Paths
| 36.238095
| 103
| 0.681778
|
20043559821bfdf33e476efd417b1db781e599c4
| 2,037
|
py
|
Python
|
sympy/combinatorics/group_constructs.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 2
|
2021-02-16T14:20:37.000Z
|
2021-02-16T16:37:47.000Z
|
sympy/combinatorics/group_constructs.py
|
otoosakyidavid/sympy
|
636221ff35c78b980f828a285d0c552fac77aaba
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/combinatorics/group_constructs.py
|
otoosakyidavid/sympy
|
636221ff35c78b980f828a285d0c552fac77aaba
|
[
"BSD-3-Clause"
] | 1
|
2021-02-28T20:26:24.000Z
|
2021-02-28T20:26:24.000Z
|
from __future__ import print_function, division
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.permutations import Permutation
from sympy.utilities.iterables import uniq
_af_new = Permutation._af_new
def DirectProduct(*groups):
"""
Returns the direct product of several groups as a permutation group.
This is implemented much like the __mul__ procedure for taking the direct
product of two permutation groups, but the idea of shifting the
generators is realized in the case of an arbitrary number of groups.
A call to DirectProduct(G1, G2, ..., Gn) is generally expected to be faster
than a call to G1*G2*...*Gn (and thus the need for this algorithm).
Examples
========
>>> from sympy.combinatorics.group_constructs import DirectProduct
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> C = CyclicGroup(4)
>>> G = DirectProduct(C, C, C)
>>> G.order()
64
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.__mul__
"""
degrees = []
gens_count = []
total_degree = 0
total_gens = 0
for group in groups:
current_deg = group.degree
current_num_gens = len(group.generators)
degrees.append(current_deg)
total_degree += current_deg
gens_count.append(current_num_gens)
total_gens += current_num_gens
array_gens = []
for i in range(total_gens):
array_gens.append(list(range(total_degree)))
current_gen = 0
current_deg = 0
for i in range(len(gens_count)):
for j in range(current_gen, current_gen + gens_count[i]):
gen = ((groups[i].generators)[j - current_gen]).array_form
array_gens[j][current_deg:current_deg + degrees[i]] = \
[x + current_deg for x in gen]
current_gen += gens_count[i]
current_deg += degrees[i]
perm_gens = list(uniq([_af_new(list(a)) for a in array_gens]))
return PermutationGroup(perm_gens, dups=False)
| 33.393443
| 79
| 0.678449
|
152c684cd25354739e8ca8026d4fe365444d805b
| 11,543
|
py
|
Python
|
scripts/clean/assign_read_pass.py
|
rheinbaylab/cDNA-detector
|
72c9415cea8568c11edf696542b8baa967e392ec
|
[
"BSD-3-Clause"
] | 1
|
2021-07-30T06:42:23.000Z
|
2021-07-30T06:42:23.000Z
|
scripts/clean/assign_read_pass.py
|
rheinbaylab/cDNA-detector
|
72c9415cea8568c11edf696542b8baa967e392ec
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/clean/assign_read_pass.py
|
rheinbaylab/cDNA-detector
|
72c9415cea8568c11edf696542b8baa967e392ec
|
[
"BSD-3-Clause"
] | 1
|
2021-08-23T20:33:07.000Z
|
2021-08-23T20:33:07.000Z
|
import pysam
import pandas as pd
import numpy as np
import re
import sys
import random
import os
import logging
from itertools import compress
from datetime import datetime
try:
from .remove_function import *
except:
from remove_function import *
def read_cDNA(exon_readlist_raw, exon_readlist_pos_type, tmp_chr, tmp_start, tmp_end):
# logging.logger.info('define if reads belong to cDNA')
# 1. start, start_S
# 2. end, end_S;
# 3. start, start_consensus_seq
# 4. end, end_consequence
# begin
readlist_cDNA = list()
# logging.logger.info('define if reads belong to cDNA')
# 1. start, start_S
len_list= len(exon_readlist_raw);
list_nuc_start = list();
for i in range(len_list):
tmp_read = exon_readlist_raw[i]
tmp_cigar = tmp_read.cigar[0][0]
tmp_cigar_len = tmp_read.cigar[0][1]
tmp_seq = tmp_read.to_dict()['seq']
# if re.search("^[0-9]*S",tmp_cigar) and exon_readlist_raw[i].pos == tmp_start:
if tmp_cigar == 4 and tmp_read.pos == tmp_start:
readlist_cDNA.append(tmp_read)
list_nuc_start.append(tmp_seq[0:int(tmp_cigar_len)][::-1])
elif tmp_cigar == 5 and tmp_read.pos == tmp_start:
readlist_cDNA.append(tmp_read)
# 2. start, start_consensus_seq
consensus_seq_start = str();
list_nuc_start.sort(key = len)
if len(list_nuc_start)==0:
pass;
else:
for i in range(0,len(list_nuc_start[-1])):
tmp = [x[0] for x in list_nuc_start if x]
tmp_count = [ tmp.count(n) for n in list('ACGT') ]
tmp_sum = sum(tmp_count)+ 0.01
tmp_freq = [float(x)/tmp_sum for x in tmp_count]
list_nuc_start = [x[1:] for x in list_nuc_start if x]
if tmp_count[tmp_freq.index(max(tmp_freq))]>=2 and max(tmp_freq)>=0.8:
consensus_seq_start = consensus_seq_start + ['ACGT'][0][tmp_freq.index(max(tmp_freq))]
else:
break
for read in exon_readlist_raw:
if read.pos < tmp_start:
tmp_seq = read.to_dict()['seq']
tmp_seq_hang = tmp_seq[0:(len(tmp_seq) - len([tmp_pos for tmp_pos in read.positions if tmp_pos>=tmp_start]))][::-1]
if any([re.match(tmp_seq_hang,consensus_seq_start),re.match(consensus_seq_start,tmp_seq_hang)]) and len(tmp_seq_hang)>0:
readlist_cDNA.append(read)
# 2. end, end_S;
list_nuc_end = list();
for i in range(len_list):
tmp_read = exon_readlist_raw[i]
tmp_cigar = tmp_read.cigar[-1][0]
tmp_cigar_len = tmp_read.cigar[-1][1]
tmp_seq = tmp_read.to_dict()['seq']
if tmp_cigar == 4 and tmp_read.aend == tmp_end:
readlist_cDNA.append(tmp_read)
list_nuc_end.append(tmp_seq[-int(tmp_cigar_len):])
elif tmp_cigar == 5 and tmp_read.aend == tmp_end:
readlist_cDNA.append(tmp_read)
# 4. end, end_consequence
consensus_seq_end = str();
list_nuc_end.sort(key = len)
if len(list_nuc_end)==0:
pass
else:
for i in range(0,len(list_nuc_end[-1])):
tmp = [x[0] for x in list_nuc_end if x]
tmp_count = [ tmp.count(n) for n in list('ACGT') ]
tmp_sum = sum(tmp_count)+ 0.01
tmp_freq = [float(x)/tmp_sum for x in tmp_count]
list_nuc_end = [x[1:] for x in list_nuc_end if x]
if tmp_count[tmp_freq.index(max(tmp_freq))]>=2 and max(tmp_freq)>=0.8:
consensus_seq_end = consensus_seq_end + ['ACGT'][0][tmp_freq.index(max(tmp_freq))]
else:
break
for read in exon_readlist_raw:
if read.aend>tmp_end:
tmp_seq = read.to_dict()['seq']
tmp_seq_hang = tmp_seq[::-1][0:int(len(tmp_seq) - len([tmp_pos for tmp_pos in read.positions if tmp_pos<tmp_end]))][::-1]
if any([re.match(tmp_seq_hang,consensus_seq_end),re.match(consensus_seq_end,tmp_seq_hang)]) and len(tmp_seq_hang)>0:
readlist_cDNA.append(read)
readlist_cDNA = list(set(readlist_cDNA))
return(readlist_cDNA)
def read_gDNA(exon_readlist_raw, exon_readlist_pos_type, tmp_chr, tmp_start, tmp_end,exon_readlist_cDNA):
# logging.logger.info('read_gDNA')
# 1. start, span the exon regions;
# 2. end, span the exon regions;
readlist_gDNA = list();
len_list= len(exon_readlist_raw);
for i in range(len_list):
# logging.logger.info(i)
tmp_read = exon_readlist_raw[i]
tmp_read_pos_start = tmp_read.pos
tmp_read_pos_end = tmp_read.aend
if exon_readlist_pos_type[i] == 'edge':
if (int(tmp_read_pos_start) - int(tmp_start))*(tmp_read_pos_end- int(tmp_start)) < 0:
readlist_gDNA.append(tmp_read)
elif (int(tmp_read_pos_start) - int(tmp_end))*(tmp_read_pos_end - int(tmp_end))< 0:
readlist_gDNA.append(tmp_read)
readlist_gDNA = list(set(readlist_gDNA) - set(exon_readlist_cDNA))
return readlist_gDNA
def read_unclass(readlist_raw, readlist_cDNA, readlist_gDNA):
# logging.logger.info('read_unclass')
readlist_uDNA = list(set(readlist_raw) - set(readlist_cDNA) - set(readlist_gDNA))
return readlist_uDNA
def cdna_exon_read_assign(tmp_region, exon_df_region, file_bam):
tmp_chr = exon_df_region.iloc[0]['seqname']
tmp_start = exon_df_region.iloc[0]['start']
tmp_end = exon_df_region.iloc[0]['end']
exon_readlist_raw, exon_readlist_pos_type, exon_readlist_region = read_allreads(exon_df_region, file_bam)
## class the raw read list from exon region
exon_readlist_cDNA = read_cDNA(exon_readlist_raw, exon_readlist_pos_type, tmp_chr, tmp_start, tmp_end)
exon_readlist_gDNA = read_gDNA(exon_readlist_raw, exon_readlist_pos_type, tmp_chr, tmp_start, tmp_end,exon_readlist_cDNA)
exon_readlist_uDNA = read_unclass(exon_readlist_raw, exon_readlist_cDNA, exon_readlist_gDNA)
return exon_readlist_raw, exon_readlist_pos_type, exon_readlist_region, exon_readlist_cDNA, exon_readlist_gDNA, exon_readlist_uDNA
def cdna_gene_read_assign_single_end(gene_df_region,file_bam):
gene_readlist_raw = list()
gene_readlist_pos_type = list()
gene_readlist_region = list()
gene_readlist_cDNA = list()
gene_readlist_gDNA = list()
gene_readlist_uDNA = list()
gene_readlist_raw_merge = list()
gene_readlist_pos_type_merge = list()
gene_readlist_region_merge = list()
gene_readlist_cDNA_merge = list()
gene_readlist_gDNA_merge = list()
gene_readlist_uDNA_merge = list()
for tmp_region, exon_df_region in gene_df_region.groupby('tmp_region'):
exon_readlist_raw, exon_readlist_pos_type, exon_readlist_region, exon_readlist_cDNA, exon_readlist_gDNA, exon_readlist_uDNA = cdna_exon_read_assign(tmp_region, exon_df_region, file_bam)
gene_readlist_raw.append(exon_readlist_raw)
gene_readlist_pos_type.append(exon_readlist_pos_type)
gene_readlist_region.append(exon_readlist_region)
gene_readlist_cDNA.append(exon_readlist_cDNA)
gene_readlist_gDNA.append(exon_readlist_gDNA)
gene_readlist_uDNA.append(exon_readlist_uDNA)
return(gene_readlist_raw, gene_readlist_pos_type, gene_readlist_cDNA, gene_readlist_gDNA, gene_readlist_uDNA)
def cdna_gene_read_assign_paired_end(gene_df_region, file_bam):
gene_readlist_raw = list()
gene_readlist_pos_type = list()
gene_readlist_region = list()
gene_readlist_cDNA = list()
gene_readlist_gDNA = list()
gene_readlist_uDNA = list()
gene_readlist_raw_merge = list()
gene_readlist_pos_type_merge = list()
gene_readlist_region_merge = list()
gene_readlist_cDNA_merge = list()
gene_readlist_gDNA_merge = list()
gene_readlist_uDNA_merge = list()
for tmp_region, exon_df_region in gene_df_region.groupby('tmp_region'):
exon_readlist_raw, exon_readlist_pos_type, exon_readlist_region, exon_readlist_cDNA, exon_readlist_gDNA, exon_readlist_uDNA = cdna_exon_read_assign(tmp_region, exon_df_region, file_bam)
gene_readlist_raw.append(exon_readlist_raw)
gene_readlist_pos_type.append(exon_readlist_pos_type)
gene_readlist_region.append(exon_readlist_region)
gene_readlist_cDNA.append(exon_readlist_cDNA)
gene_readlist_gDNA.append(exon_readlist_gDNA)
gene_readlist_uDNA.append(exon_readlist_uDNA)
# merge all readlist information
gene_readlist_raw_merge.extend(exon_readlist_raw)
gene_readlist_pos_type_merge.extend(exon_readlist_pos_type)
gene_readlist_region_merge.extend(exon_readlist_region)
gene_readlist_cDNA_merge.extend(exon_readlist_cDNA)
gene_readlist_gDNA_merge.extend(exon_readlist_gDNA)
gene_readlist_uDNA_merge.extend(exon_readlist_uDNA)
gene_readlist_cDNA_merge_tmp_twoexon = list()
for i in range(len(gene_readlist_raw)):
tmp_gene_readlist_uDNA_list1 = gene_readlist_uDNA[i]
tmp_gene_readlist_uDNA_list2 = list(set(exon_readlist_uDNA) - set(tmp_gene_readlist_uDNA_list1))
tmp_gene_readlist1_cDNA, tmp_gene_readlist2_cDNA = readlist_pair(tmp_gene_readlist_uDNA_list1, tmp_gene_readlist_uDNA_list2)
tmp_gene_readlist1_cDNA.extend(tmp_gene_readlist2_cDNA)
gene_readlist_cDNA_merge_tmp_twoexon.extend(list(set(tmp_gene_readlist1_cDNA) - set(gene_readlist_gDNA_merge)))
gene_readlist_cDNA_merge_tmp_twoexon = list(set(gene_readlist_cDNA_merge_tmp_twoexon))
gene_readlist_uDNA_merge = diff_list(gene_readlist_uDNA_merge, gene_readlist_cDNA_merge_tmp_twoexon)
# 2. one of reads are belonged to cds reads
gene_readlist_cDNA_merge_tmp_1cdna = list()
tmp_gene_readlist_uDNA_list1 = gene_readlist_uDNA_merge
tmp_gene_readlist_uDNA_list2 = list(set(gene_readlist_cDNA_merge_tmp_twoexon + gene_readlist_cDNA_merge ))
tmp_gene_readlist1_cDNA, tmp_gene_readlist2_cDNA = readlist_pair(tmp_gene_readlist_uDNA_list1, tmp_gene_readlist_uDNA_list2)
gene_readlist_cDNA_merge_tmp_1cdna = list(set(tmp_gene_readlist1_cDNA))
gene_readlist_cDNA_merge = list(set(gene_readlist_cDNA_merge + gene_readlist_cDNA_merge_tmp_1cdna + gene_readlist_cDNA_merge_tmp_twoexon))
gene_readlist_uDNA_merge = diff_list(gene_readlist_uDNA_merge, gene_readlist_cDNA_merge_tmp_1cdna)
# 2. gDNA
# one of reads are located outside exon regions
tmp_gene_readlist_uDNA_list1 = list(compress(gene_readlist_uDNA_merge,[read.is_proper_pair for read in gene_readlist_uDNA_merge]))
tmp_gene_readlist_uDNA_list2 = gene_readlist_raw_merge
tmp_gene_readlist1_uDNA, tmp_gene_readlist2_uDNA = readlist_pair(tmp_gene_readlist_uDNA_list1, tmp_gene_readlist_uDNA_list2)
gene_readlist_gDNA_merge_tmp_1noexon = diff_list(tmp_gene_readlist_uDNA_list1, tmp_gene_readlist1_uDNA)
# one of reads are belonged to gDNA
tmp_gene_readlist_uDNA_list1 = list(compress(gene_readlist_uDNA_merge,[read.is_proper_pair for read in gene_readlist_uDNA_merge]))
tmp_gene_readlist_uDNA_list2 = gene_readlist_gDNA_merge
tmp_gene_readlist1_cDNA, tmp_gene_readlist2_cDNA = readlist_pair(tmp_gene_readlist_uDNA_list1, tmp_gene_readlist_uDNA_list2)
gene_readlist_gDNA_merge_tmp_1gDNA = tmp_gene_readlist1_cDNA
gene_readlist_cDNA_merge_new = list(set(gene_readlist_cDNA_merge + gene_readlist_cDNA_merge_tmp_1cdna + gene_readlist_cDNA_merge_tmp_twoexon ))
gene_readlist_gDNA_merge_new = list(set(gene_readlist_gDNA_merge + gene_readlist_gDNA_merge_tmp_1gDNA + gene_readlist_gDNA_merge_tmp_1noexon))
gene_readlist_uDNA_merge_new = diff_list(gene_readlist_uDNA_merge, gene_readlist_cDNA_merge_new + gene_readlist_gDNA_merge_new)
# return gene_readlist_cDNA_merge_new, gene_readlist_gDNA_merge_new, gene_readlist_uDNA_merge_new
# reassign genelist to results
gene_readlist_cDNA = [list.clear() for list in gene_readlist_cDNA]
gene_readlist_gDNA = [list.clear() for list in gene_readlist_gDNA]
gene_readlist_uDNA = [list.clear() for list in gene_readlist_uDNA]
for i in range(len(gene_readlist_raw)):
tmp_gene_readlist_raw = set(gene_readlist_raw[i])
gene_readlist_cDNA[i] = list(set(gene_readlist_cDNA_merge_new).intersection(tmp_gene_readlist_raw))
gene_readlist_gDNA[i] = list(set(gene_readlist_gDNA_merge_new).intersection(tmp_gene_readlist_raw))
gene_readlist_uDNA[i] = list(set(gene_readlist_uDNA_merge_new).intersection(tmp_gene_readlist_raw))
return(gene_readlist_raw,gene_readlist_pos_type, gene_readlist_cDNA, gene_readlist_gDNA, gene_readlist_uDNA)
| 48.297071
| 187
| 0.803517
|
97de09e884c1a3587a6e108cadb7aa92b5242246
| 9,553
|
py
|
Python
|
stairs.py
|
pzy/LEDstairs
|
ccb16caf3f8dba20dddc153518529b265b1fc9b7
|
[
"MIT"
] | null | null | null |
stairs.py
|
pzy/LEDstairs
|
ccb16caf3f8dba20dddc153518529b265b1fc9b7
|
[
"MIT"
] | null | null | null |
stairs.py
|
pzy/LEDstairs
|
ccb16caf3f8dba20dddc153518529b265b1fc9b7
|
[
"MIT"
] | 1
|
2020-07-29T08:35:38.000Z
|
2020-07-29T08:35:38.000Z
|
#!/usr/bin/env python3
# rpi_ws281x library strandtest example
# Author: Tony DiCola (tony@tonydicola.com)
#
# Direct port of the Arduino NeoPixel library strandtest example. Showcases
# various animations on a strip of NeoPixels.
import time
import argparse
import math
import random
import sys
import logging
from random import seed
import RPi.GPIO as GPIO
from threading import Thread, Lock
from rpi_ws281x import *
from neopixel import Adafruit_NeoPixel,Color
# LED strip configuration:
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
LIGHT_PIN = 2 # GPIO pin light sensor is connected to since we do not use an analog to digital converter we can only get lights on or lights off
LIGHT_BRIGHTNESS=170 # brightness on strip when light is detected
DARK_BRIGHTNESS=40 # brightness on strip when lights are off
MOTION_PIN = 17 # pin the first motion sensor connected to (depending on which sensor is hit the strips starts the aniimation form top or from bottom)
MOTION2_PIN = 27 # pin the second motion sensor is connected to
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = DARK_BRIGHTNESS # Set to 0 for darkest and 255 for brightest, default brightness, will be overwritten by light or dark brightness
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
#CUSTOM_STEP_LENGTH=[60, 64, 84, 65, 58 , 58, 58, 58, 58, 58, 58, 58, 58, 58, 55, 55, 55];
CUSTOM_STEP_LENGTH=[63, 51, 50, 56, 56 , 56, 56, 57, 57, 56, 56, 56, 57, 62, 82, 62, 58 ];
#CUSTOM_STEP_LENGTH=[2, 4, 3, 5 , 1, 15]
CUSTOM_STEP_STARTS=[]
ANIMATION_MILLIES=0.005
ON=False
ANIMATION_RUN=False
ANIMATION_THREAD=None
TIMEOUT_THREAD=None
TIMEOUT_RUN=False
TIMEOUT_TIME=50
DARK=True
COLOR=Color(0,0,255)
WORKING=Lock()
fdelay=0.001
idelay=0.001
EFFECT="Rainbow"
def rainbowColor(pos):
colors= [ Color(0, 255, 0), Color(127, 255, 0), Color(255, 255, 0), Color(255, 0, 0), Color(0,0,255), Color(43,46,95), Color(0, 139,255) ]
pos=pos-1
return colors[pos%len(colors)]
def setColor(strip, color, step=None, reverse=False, rangeColor=0, show=True, showPixel=True, pdelay=0):
if step==None:
if reverse:
for i in range(len(CUSTOM_STEP_LENGTH),0,-1):
setStep(strip, i, color, show, showPixel, ANIMATION_MILLIES, reverse, rangeColor);
time.sleep(0.5)
else:
for i in range(1, len(CUSTOM_STEP_LENGTH)+1):
setStep(strip, i, color, show, showPixel, ANIMATION_MILLIES, reverse, rangeColor);
time.sleep(0.5)
#setStep(strip, i, color, True, True, ANIMATION_MILLIES, reverse);
else:
setStep(strip, step, color, show, showPixel, ANIMATION_MILLIES, reverse, rangeColor);
time.sleep(0.5)
#setStep(strip, step, color, True, True, ANIMATION_MILLIES, reverse, rangeColor);
def setStep(strip, stepNo, color, show=False, showPixel=False, delay=1.0, reverse=False, rangeColor=0):
start=0
stop=0
step=1
o_r=((color >> 16) & 0xFF)
o_g=((color >> 8) & 0xFF)
o_b=(color & 0xFF)
r=o_r
g=o_g
b=o_b
rr=0
rg=0
rb=0
if rangeColor>0:
rr=(rangeColor >> 16) & 0xFF
if rr>o_r:
rr=o_r/2
rg=(rangeColor >> 8) & 0xFF
if rg>o_g:
rg=o_g/2
rb=(rangeColor) & 0xFF
if rb>o_b:
rb=o_b/2
if reverse:
start=CUSTOM_STEP_STARTS[stepNo-1]+CUSTOM_STEP_LENGTH[stepNo-1]-1
stop=CUSTOM_STEP_STARTS[stepNo-1]-1
step=-1
else:
start=CUSTOM_STEP_STARTS[stepNo-1]
stop=start+CUSTOM_STEP_LENGTH[stepNo-1]
#logging.info("Setting step:" + str(stepNo) + " LED: " + str(start) + " - " + str(stop))
for i in range(start,stop, step):
if rangeColor>0:
#logging.info str(rr) +","+str(rg)+","+str(rb)
if rr>0:
r=random.randrange(o_r-rr, o_r+rr)
if rg>0:
g=random.randrange(o_g-rg, o_g+rg)
if rb>0:
b=random.randrange(o_b-rb, o_b+rb)
strip.setPixelColor(i, Color(r,g,b, 255))
#if showPixel:
# strip.show()
#time.sleep(delay/10)
if show:
strip.show()
def iceSteps(strip, reverse=False):
global idelay
if not reverse:
for i in range(1, len(CUSTOM_STEP_LENGTH)+1):
setColor(strip, Color(0, 0, 200), i, reverse, Color(0,0,40), True, False, idelay)
else:
for i in range(len(CUSTOM_STEP_LENGTH),0,-1):
setColor(strip, Color(0, 0, 200), i, reverse, Color(0,0,40), True, False, idelay)
def fireSteps(strip, reverse=False):
global fdelay
if not reverse:
for i in range(1, len(CUSTOM_STEP_LENGTH)+1):
setColor(strip, Color(80, 200, 0), i, reverse, Color(0,50,0), True, False, fdelay)
else:
for i in range(len(CUSTOM_STEP_LENGTH),0,-1):
setColor(strip, Color(80, 200, 0), i, reverse, Color(0,50,0), True, True, fdelay)
def animation(strip, reverse):
global ANIMATION_RUN, EFFECT,COLOR, ON
e=4
if EFFECT=="Switching":
logging.info("random effect")
e=random.randrange(1, 5)
elif EFFECT=="Rainbow":
e=2
elif EFFECT=="Fire":
e=1
elif EFFECT=="Ice":
e=3
if e==1:
logging.info("fire")
while ANIMATION_RUN:
fireSteps(strip, reverse)
elif e==2:
logging.info("rainbow")
rainbowSteps(strip, reverse)
elif e==3:
logging.info("ice")
while ANIMATION_RUN:
iceSteps(strip, reverse)
else:
logging.info("color")
setColor(strip, COLOR, None, reverse, Color(0,0,0), True, False)
def rainbowSteps(strip, reverse=False):
if not reverse:
for i in range(1, len(CUSTOM_STEP_LENGTH)+1):
setColor(strip, rainbowColor(i), i, reverse, Color(0,0,0), True, False, 0.01)
else:
for i in range(len(CUSTOM_STEP_LENGTH),0,-1):
setColor(strip, rainbowColor(len(CUSTOM_STEP_LENGTH)-i), i, reverse, Color(0,0,0), True, False, 0.01)
def timeout(reverse):
global ON, WORKING, TIMEOUT_RUN, TIMEOUT_TIME
tt=TIMEOUT_TIME
logging.info("timeout thread started, will trigger after: "+str(TIMEOUT_TIME))
while TIMEOUT_RUN and tt>0:
time.sleep(1)
tt=tt-1
if not WORKING.acquire(False):
return
if tt==0:
logging.info("timeout after :"+str(TIMEOUT_TIME))
clean(reverse)
WORKING.release()
def clean(reverse):
global ON, WORKING,ANIMATION_RUN,ANIMATION_THREAD,TIMEOUT_RUN,TIMEOUT_THREAD
ANIMATION_RUN=False
if ANIMATION_THREAD != None:
ANIMATION_THREAD.join()
setColor(strip, Color(0,0,0), None, reverse, Color(0,0,0), True, False, 0.1)
ON=False
def movement(strip, reverse):
global ON, WORKING,ANIMATION_RUN,ANIMATION_THREAD,TIMEOUT_THREAD, TIMEOUT_RUN,DARK
if not WORKING.acquire(False):
return;
lightsoff=GPIO.input(LIGHT_PIN)
#if lightsoff==1:
# strip.setBrightness(DARK_BRIGHTNESS)
#else:
# strip.setBrightness(LIGHT_BRIGHTNESS)
logging.info("movement: reverse: " + str(reverse)+" - on: "+str(ON)+" lightsoff: "+ str(lightsoff))
if ON:
clean(not reverse)
TIMEOUT_RUN=False
if TIMEOUT_THREAD != None:
TIMEOUT_THREAD.join()
else:
ANIMATION_RUN=True
ANIMATION_THREAD=Thread(target=animation, args=(strip, reverse,))
ANIMATION_THREAD.start()
TIMEOUT_RUN=True
TIMEOUT_THREAD=Thread(target=timeout, args=(reverse,))
TIMEOUT_THREAD.start()
ON=True
WORKING.release()
# Main program logic follows:
if __name__ == '__main__':
# Process arguments
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
args = parser.parse_args()
seed(1)
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(sum(CUSTOM_STEP_LENGTH), LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
logging.basicConfig(filename='/var/log/stairs.log', filemode='w', level=logging.INFO, format='%(asctime)s: %(message)s')
try:
GPIO.setmode(GPIO.BCM)
GPIO.setup(MOTION_PIN, GPIO.IN)
GPIO.setup(MOTION2_PIN, GPIO.IN)
GPIO.setup(LIGHT_PIN, GPIO.IN)
CUSTOM_STEP_STARTS.append(0)
for i in CUSTOM_STEP_LENGTH:
CUSTOM_STEP_STARTS.append(CUSTOM_STEP_STARTS[-1]+i)
if len(sys.argv)>2:
setColor(strip, COLOR, int(sys.argv[2]), False, Color(0,0,0))
time.sleep(int(sys.argv[1]))
clean(False)
else:
GPIO.add_event_detect(MOTION_PIN , GPIO.RISING, callback=lambda x : movement(strip, False), bouncetime=5000)
GPIO.add_event_detect(MOTION2_PIN , GPIO.RISING, callback=lambda x : movement(strip, True), bouncetime=5000)
logging.info("startup")
while True:
time.sleep(100)
except:
GPIO.cleanup()
clean(False)
TIMEOUT_RUN=False
if TIMEOUT_THREAD != None:
TIMEOUT_THREAD.join()
logging.info(sys.exc_info()[0])
| 36.049057
| 159
| 0.635612
|
3f7dd5daa755e8a4d8764354d03f1ad1318dc27c
| 1,596
|
py
|
Python
|
tests/benchmarks/benchmark_selectinquery.py
|
vdmit11/py-mongosql
|
8b66a3386344cf5b38021dccf32c7790a07617e5
|
[
"BSD-2-Clause"
] | null | null | null |
tests/benchmarks/benchmark_selectinquery.py
|
vdmit11/py-mongosql
|
8b66a3386344cf5b38021dccf32c7790a07617e5
|
[
"BSD-2-Clause"
] | null | null | null |
tests/benchmarks/benchmark_selectinquery.py
|
vdmit11/py-mongosql
|
8b66a3386344cf5b38021dccf32c7790a07617e5
|
[
"BSD-2-Clause"
] | null | null | null |
"""
This benchmark compares the performance of:
* selectinload()
* selectinquery() with query caching
* selectinquery() with no query caching
"""
from tests.benchmarks.benchmark_utils import benchmark_parallel_funcs
from sqlalchemy.orm import selectinload
from mongosql import selectinquery
from tests.models import get_working_db_for_tests, User, Article
# Run me: PyCharm Profiler
# Run me: python -m cProfile -o profile.out tests/benchmark_selectinquery.py
# Init DB
engine, Session = get_working_db_for_tests()
# Prepare
N_REPEATS = 1000
ssn = Session()
# Tests
def test_selectinload(n):
""" Test SqlAlchemy's selectinload(): using it as a baseline """
for i in range(n):
q = ssn.query(User).options(
selectinload(User.articles).selectinload(Article.comments)
)
list(q.all())
def test_selectinquery__cache(n):
""" Test our custom selectinquery(), with query caching """
for i in range(n):
q = ssn.query(User).options(
selectinquery(User.articles, lambda q: q, 'a').selectinquery(Article.comments, lambda q: q, 'b')
)
list(q.all())
def test_selectinquery__no_cache(n):
""" Test our custom selectinquery(), without query caching """
for i in range(n):
q = ssn.query(User).options(
selectinquery(User.articles, lambda q: q).selectinquery(Article.comments, lambda q: q)
)
list(q.all())
# Run
res = benchmark_parallel_funcs(
N_REPEATS, 10,
test_selectinload,
test_selectinquery__cache,
test_selectinquery__no_cache,
)
# Done
print(res)
| 25.741935
| 108
| 0.690476
|
f8ab131e7d1cc6d24408413f877fcf0cef8808ea
| 6,281
|
py
|
Python
|
keras_tuner/tuners/randomsearch.py
|
Jet132/keras-tuner
|
be682573c6f6be1e3f3e6dcac786a34ccac19d3b
|
[
"Apache-2.0"
] | 1
|
2022-03-01T06:13:17.000Z
|
2022-03-01T06:13:17.000Z
|
keras_tuner/tuners/randomsearch.py
|
maxpark/keras-tuner
|
be682573c6f6be1e3f3e6dcac786a34ccac19d3b
|
[
"Apache-2.0"
] | 1
|
2022-03-28T09:14:42.000Z
|
2022-03-28T09:14:42.000Z
|
keras_tuner/tuners/randomsearch.py
|
maxpark/keras-tuner
|
be682573c6f6be1e3f3e6dcac786a34ccac19d3b
|
[
"Apache-2.0"
] | 1
|
2022-02-28T20:10:53.000Z
|
2022-02-28T20:10:53.000Z
|
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Basic random search tuner."
from keras_tuner.engine import oracle as oracle_module
from keras_tuner.engine import trial as trial_module
from keras_tuner.engine import tuner as tuner_module
class RandomSearchOracle(oracle_module.Oracle):
"""Random search oracle.
Args:
objective: A string, `keras_tuner.Objective` instance, or a list of
`keras_tuner.Objective`s and strings. If a string, the direction of
the optimization (min or max) will be inferred. If a list of
`keras_tuner.Objective`, we will minimize the sum of all the
objectives to minimize subtracting the sum of all the objectives to
maximize. The `objective` argument is optional when
`Tuner.run_trial()` or `HyperModel.fit()` returns a single float as
the objective to minimize.
max_trials: Integer, the total number of trials (model configurations)
to test at most. Note that the oracle may interrupt the search
before `max_trial` models have been tested if the search space has
been exhausted. Defaults to 10.
seed: Optional integer, the random seed.
hyperparameters: Optional `HyperParameters` instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
"""
def __init__(
self,
objective=None,
max_trials=10,
seed=None,
hyperparameters=None,
allow_new_entries=True,
tune_new_entries=True,
):
super(RandomSearchOracle, self).__init__(
objective=objective,
max_trials=max_trials,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries,
seed=seed,
)
def populate_space(self, trial_id):
"""Fill the hyperparameter space with values.
Args:
trial_id: A string, the ID for this Trial.
Returns:
A dictionary with keys "values" and "status", where "values" is
a mapping of parameter names to suggested values, and "status"
is the TrialStatus that should be returned for this trial (one
of "RUNNING", "IDLE", or "STOPPED").
"""
values = self._random_values()
if values is None:
return {"status": trial_module.TrialStatus.STOPPED, "values": None}
return {"status": trial_module.TrialStatus.RUNNING, "values": values}
class RandomSearch(tuner_module.Tuner):
"""Random search tuner.
Args:
hypermodel: Instance of `HyperModel` class (or callable that takes
hyperparameters and returns a Model instance). It is optional when
`Tuner.run_trial()` is overriden and does not use
`self.hypermodel`.
objective: A string, `keras_tuner.Objective` instance, or a list of
`keras_tuner.Objective`s and strings. If a string, the direction of
the optimization (min or max) will be inferred. If a list of
`keras_tuner.Objective`, we will minimize the sum of all the
objectives to minimize subtracting the sum of all the objectives to
maximize. The `objective` argument is optional when
`Tuner.run_trial()` or `HyperModel.fit()` returns a single float as
the objective to minimize.
max_trials: Integer, the total number of trials (model configurations)
to test at most. Note that the oracle may interrupt the search
before `max_trial` models have been tested if the search space has
been exhausted. Defaults to 10.
seed: Optional integer, the random seed.
hyperparameters: Optional `HyperParameters` instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
**kwargs: Keyword arguments relevant to all `Tuner` subclasses.
Please see the docstring for `Tuner`.
"""
def __init__(
self,
hypermodel=None,
objective=None,
max_trials=10,
seed=None,
hyperparameters=None,
tune_new_entries=True,
allow_new_entries=True,
**kwargs
):
self.seed = seed
oracle = RandomSearchOracle(
objective=objective,
max_trials=max_trials,
seed=seed,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries,
)
super(RandomSearch, self).__init__(oracle, hypermodel, **kwargs)
| 43.020548
| 79
| 0.658653
|
9f6ee521422cf59d388324e360f82b508a73f5cc
| 5,811
|
py
|
Python
|
demos/ach/containers/RDFI processor/rdfi_process.py
|
kwkoo/datapipelines
|
cc57125b96ad6e45520a6c07bbb84664b6d40173
|
[
"MIT"
] | 30
|
2020-04-15T20:22:42.000Z
|
2021-11-30T01:55:01.000Z
|
demos/ach/containers/RDFI processor/rdfi_process.py
|
kwkoo/datapipelines
|
cc57125b96ad6e45520a6c07bbb84664b6d40173
|
[
"MIT"
] | 1
|
2020-12-27T20:09:50.000Z
|
2020-12-29T16:02:12.000Z
|
demos/ach/containers/RDFI processor/rdfi_process.py
|
kwkoo/datapipelines
|
cc57125b96ad6e45520a6c07bbb84664b6d40173
|
[
"MIT"
] | 11
|
2020-12-17T21:25:21.000Z
|
2021-12-28T05:59:55.000Z
|
import http.server
import io
import json
import logging
import os
import random
import socketserver
import sys
from io import BytesIO
import boto3
import mysql.connector
from cloudevents.sdk import marshaller
from cloudevents.sdk.event import v02
access_key = os.environ['AWS_ACCESS_KEY_ID']
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
service_point = os.environ['service_point']
db_user = os.environ['database-user']
db_password = os.environ['database-password']
db_host = os.environ['database-host']
db_db = os.environ['database-db']
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
s3client = boto3.client('s3', 'us-east-1', endpoint_url=service_point,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
use_ssl=True if 'https' in service_point else False)
m = marshaller.NewDefaultHTTPMarshaller()
class ForkedHTTPServer(socketserver.ForkingMixIn, http.server.HTTPServer):
"""Handle requests with fork."""
class CloudeventsServer(object):
"""Listen for incoming HTTP cloudevents requests.
cloudevents request is simply a HTTP Post request following a well-defined
of how to pass the event data.
"""
def __init__(self, port=8080):
self.port = port
def start_receiver(self, func):
"""Start listening to HTTP requests
:param func: the callback to call upon a cloudevents request
:type func: cloudevent -> none
"""
class BaseHttp(http.server.BaseHTTPRequestHandler):
def do_POST(self):
logging.info('POST received')
content_type = self.headers.get('Content-Type')
content_len = int(self.headers.get('Content-Length'))
headers = dict(self.headers)
data = self.rfile.read(content_len)
data = data.decode('utf-8')
logging.info(content_type)
logging.info(data)
#if content_type != 'application/json':
# logging.info('Not JSON')
# data = io.StringIO(data)
#try:
# event = v02.Event()
# event = m.FromRequest(event, headers, data, json.loads)
#except Exception as e:
# logging.error(f"Event error: {e}")
# raise
event = eval(data)['Records'][0]
logging.info(event)
func(event)
self.send_response(204)
self.end_headers()
return
socketserver.TCPServer.allow_reuse_address = True
with ForkedHTTPServer(("", self.port), BaseHttp) as httpd:
try:
logging.info("serving at port {}".format(self.port))
httpd.serve_forever()
except:
httpd.server_close()
raise
def extract_data(msg):
logging.info('extract_data')
bucket_eventName = msg['eventName']
bucket_name = msg['s3']['bucket']['name']
object_key = msg['s3']['object']['key']
data = {'bucket_eventName': bucket_eventName,
'bucket_name': bucket_name, 'object_key': object_key}
return data
def load_file(bucket_name, object_key):
logging.info('load_file')
obj = s3client.get_object(Bucket=bucket_name, Key=object_key)
content = obj['Body'].read().decode('utf-8')
return content
def delete_file(bucket_name, object_key):
logging.info('delete_file')
s3client.delete_object(Bucket=bucket_name,Key=object_key)
def compute_amount(content):
lines = content.splitlines()
total = 0
for i in range(2, len(lines)): # Read lines
if lines[i][0] == '6': # If match for transaction
total += float(lines[i][29:39])/100
return total
def update_balance(transactions_amount):
try:
cnx = mysql.connector.connect(user=db_user, password=db_password,
host=db_host,
database=db_db)
cursor = cnx.cursor()
query = 'INSERT INTO bank_balance(time,balance) SELECT CURRENT_TIMESTAMP(), MAX(balance) + ' + str(
transactions_amount) + ' FROM bank_balance;'
cursor.execute(query)
cnx.commit()
cursor.close()
cnx.close()
except Exception as e:
logging.error(f"Unexpected error: {e}")
raise
def update_rdfi_process():
try:
cnx = mysql.connector.connect(user=db_user, password=db_password,
host=db_host,
database=db_db)
cursor = cnx.cursor()
query = 'INSERT INTO rdfi_process(time,entry) SELECT CURRENT_TIMESTAMP(), 1;'
cursor.execute(query)
cnx.commit()
cursor.close()
cnx.close()
except Exception as e:
logging.error(f"Unexpected error: {e}")
raise
def run_event(event):
try:
extracted_data = extract_data(event)
bucket_eventName = extracted_data['bucket_eventName']
bucket_name = extracted_data['bucket_name']
object_key = extracted_data['object_key']
logging.info(bucket_eventName + ' ' + bucket_name + ' ' + object_key)
if bucket_eventName == 's3:ObjectCreated:Put':
# Load file and treat it
content = load_file(bucket_name, object_key)
transactions_amount = compute_amount(content)
update_balance(transactions_amount)
update_rdfi_process()
delete_file(bucket_name, object_key)
except Exception as e:
logging.error(f"Unexpected error: {e}")
raise
client = CloudeventsServer()
client.start_receiver(run_event)
| 32.283333
| 107
| 0.60678
|
9452bddd44aacb6ddb0f38ecf39a8a0f1ea9847a
| 2,369
|
py
|
Python
|
Combatant/tests/test_combatant.py
|
vakker/plark_ai_public
|
ea17b0f475b3c1927a7bb249eef8db54c2be093f
|
[
"Apache-2.0"
] | 2
|
2021-02-28T16:20:18.000Z
|
2021-03-02T11:58:05.000Z
|
Combatant/tests/test_combatant.py
|
vakker/plark_ai_public
|
ea17b0f475b3c1927a7bb249eef8db54c2be093f
|
[
"Apache-2.0"
] | 4
|
2021-02-26T16:06:27.000Z
|
2021-03-03T09:51:59.000Z
|
Combatant/tests/test_combatant.py
|
vakker/plark_ai_public
|
ea17b0f475b3c1927a7bb249eef8db54c2be093f
|
[
"Apache-2.0"
] | 6
|
2021-02-19T18:41:57.000Z
|
2021-03-05T06:02:17.000Z
|
#!/usr/bin/env python
import os
import sys
import json
from combatant import load_combatant
from schema import deserialize_state
import numpy as np
from combatant import AGENTS_PATH
ALLOWED_ACTIONS = {
"PANTHER": [
"1", # Up
"2", # Up right
"3", # Down right
"4", # Down
"5", # Down left
"6", # Up left
"end",
],
"PELICAN": [
"1", # Up
"2", # Up right
"3", # Down right
"4", # Down
"5", # Down left
"6", # Up left
"drop_buoy",
"drop_torpedo",
"end",
],
}
#AGENT_NAME = ""
BASIC_AGENTS_PATH = os.path.join(
os.path.abspath(
os.path.join(
os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
),
os.pardir,
)
),
"Components",
"plark-game",
"plark_game",
"agents",
"basic"
)
def main():
agent_type = sys.argv[1].upper()
AGENT_NAME = "comb_%s" % (agent_type)
if agent_type not in ["PELICAN", "PANTHER"]:
raise Exception("Agent type must PELICAN or PANTHER: %s" % (agent_type))
if not os.path.exists(AGENTS_PATH):
raise Exception("Given agent path doesn't exist: %s" % (AGENTS_PATH))
if agent_type == "PELICAN":
agent_path = os.path.join(AGENTS_PATH, "pelican")
test_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "pelican")
else:
agent_path = os.path.join(AGENTS_PATH, "panther")
test_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "panther")
state = deserialize_state(json.load(open(os.path.join(test_path, "10x10_state.json"))))
obs = np.loadtxt(os.path.join(test_path, "10x10_obs.txt"))
obs_norm = np.loadtxt(os.path.join(test_path, "10x10_obs_norm.txt"))
d_params = np.loadtxt(os.path.join(test_path, "10x10_domain_params.txt"))
d_params_norm = np.loadtxt(os.path.join(test_path, "10x10_domain_params_norm.txt"))
agent = load_combatant(agent_path, AGENT_NAME, BASIC_AGENTS_PATH)
action = agent.getTournamentAction(obs, obs_norm, d_params, d_params_norm, state)
if action not in ALLOWED_ACTIONS[agent_type]:
raise RuntimeError("NO!")
else:
print("Test successful")
if __name__ == "__main__":
main()
| 26.032967
| 91
| 0.599409
|
50d462e671250b6aaec256c4a181390b521f7e89
| 5,051
|
py
|
Python
|
parentopticon/restrictions.py
|
EliRibble/parentopticon
|
8593d7f72fac9706f1bd8e8326ac932f5af95a32
|
[
"MIT"
] | null | null | null |
parentopticon/restrictions.py
|
EliRibble/parentopticon
|
8593d7f72fac9706f1bd8e8326ac932f5af95a32
|
[
"MIT"
] | null | null | null |
parentopticon/restrictions.py
|
EliRibble/parentopticon
|
8593d7f72fac9706f1bd8e8326ac932f5af95a32
|
[
"MIT"
] | null | null | null |
"""This module contains functions for reading in configured restrictoins."""
import datetime
import enum
import typing
class Window:
"""A window of time between unlock and lock."""
def __init__(self, start: datetime.time, end: datetime.time):
self.end = end
self.start = start
def is_locked(self, when: datetime.time):
return when < self.start or self.end <= when
class WindowSet:
"""A group of window sets to apply together."""
def __init__(self, windows: typing.Iterable[Window]):
self.windows = sorted(windows, key=lambda w: w.start)
def is_locked(self, when: datetime.time):
for window in self.windows:
if not window.is_locked(when):
return False
return True
class WindowWeek:
"""The windows applied every week."""
def __init__(self,
name: str,
monday: WindowSet,
tuesday: WindowSet,
wednesday: WindowSet,
thursday: WindowSet,
friday: WindowSet,
saturday: WindowSet,
sunday: WindowSet):
"""A grouping of windows for a restriction."""
self.name = name
self.monday = monday
self.tuesday = tuesday
self.wednesday = wednesday
self.thursday = thursday
self.friday = friday
self.saturday = saturday
self.sunday = sunday
class LimitSet:
"""A group of limits that form a pool."""
def __init__(self,
name: str,
daily: typing.Optional[int],
hourly: typing.Optional[int],
monthly: typing.Optional[int],
weekly: typing.Optional[int]):
self.name = name
self.daily = daily
self.hourly = hourly
self.monthly = monthly
self.weekly = weekly
class Program:
"""Information about a single program we are tracking."""
def __init__(self, name: str, processes: typing.List[str]):
self.name = name
self.processes = processes
class Group:
"""A group of programs with the same restrictions."""
def __init__(self,
name: str,
limits: typing.Optional[LimitSet] = None,
window: typing.Optional[WindowWeek] = None):
self.name = name
self.limits = limits
self.window = window
class Config:
"""A full configuration with various restrictions."""
def __init__(self,
groups: typing.List[Group],
programs: typing.List[Program],
windows: typing.List[WindowWeek]):
self.groups = {group.name: group for group in groups}
self.programs = {program.name: program for program in programs}
self.windows = {window.name: window for window in windows}
def _parse_entire_config(content: typing.Dict) -> Config:
"""Take a config which is just a JSON body and turn it into the proper instances."""
limits = [_parse_limitset(l) for l in content["limits"]]
windows = [_parse_window_week(w) for w in content["windows"]]
groups = [_parse_group(g, limits, windows) for g in content["groups"]]
programs = [_parse_program(p) for p in content["programs"]]
return Config(groups, programs, windows)
def _parse_group(
content: typing.Dict[str, typing.Any],
limits: typing.List[LimitSet],
windows: typing.List[WindowWeek]) -> Group:
"""Create a group from the provided config data structure."""
limit_name = content.get("limits")
limit = {l.name: l for l in limits}[limit_name] if limit_name else None
window_name = content.get("window")
window = {w.name: w for w in windows}[window_name] if window_name else None
return Group(
name = content["name"],
limits = limit if limit else None,
window = window if window else None,
)
def _parse_limitset(content: typing.Dict[str, typing.Any]) -> LimitSet:
"""Take a dict of data and turn it into a list of Limits."""
daily = content.get("daily")
hourly = content.get("hourly")
monthly = content.get("monthly")
weekly = content.get("weekly")
return LimitSet(
name = content["name"],
daily = daily,
hourly = hourly,
monthly = monthly,
weekly = weekly,
)
def _parse_program(content: typing.Dict[str, typing.Any]) -> Program:
"""Parse a single program from a config file."""
return Program(
name = content["name"],
processes = content.get("processes", []),
)
def _parse_time(t: str) -> datetime.time:
"Parse a string like 0735 into a time."
if len(t) <= 2:
return datetime.time(hour=int(t))
elif len(t) == 4:
return datetime.time(hour=int(t[:2]), minute=int(t[2:]))
else:
raise ValueError("Can't parse '{}' as a time".format(t))
def _parse_window_week(content: typing.Dict[str, typing.Any]) -> WindowWeek:
return WindowWeek(
name = content["name"],
monday = _parse_windowset(content["monday"]),
tuesday = _parse_windowset(content["tuesday"]),
wednesday = _parse_windowset(content["wednesday"]),
thursday = _parse_windowset(content["thursday"]),
friday = _parse_windowset(content["friday"]),
saturday = _parse_windowset(content["saturday"]),
sunday = _parse_windowset(content["sunday"]),
)
def _parse_window(content: str) -> Window:
"Create a window from a string."
start, _, end = content.partition("-")
return Window(
start = _parse_time(start),
end = _parse_time(end),
)
def _parse_windowset(content: typing.Dict[str, typing.List[str]]) -> WindowSet:
windows = [_parse_window(w) for w in content]
return WindowSet(windows)
| 29.028736
| 85
| 0.706593
|
c3b0a603abd464145d0b862626594daa648e61e8
| 874
|
py
|
Python
|
traw/__init__.py
|
levi-rs/testrail-api-wrapper
|
5cafbb4ef8c97a0c1e0dc6dac64534fa72e56f11
|
[
"MIT"
] | 2
|
2019-01-19T13:18:26.000Z
|
2019-02-23T19:39:50.000Z
|
traw/__init__.py
|
levi-rs/testrail-api-wrapper
|
5cafbb4ef8c97a0c1e0dc6dac64534fa72e56f11
|
[
"MIT"
] | 3
|
2019-01-20T19:21:34.000Z
|
2019-02-13T17:54:30.000Z
|
traw/__init__.py
|
leviable/testrail-api-wrapper
|
5cafbb4ef8c97a0c1e0dc6dac64534fa72e56f11
|
[
"MIT"
] | null | null | null |
""" TRAW: TestRail API Wrapper
TRAW is an API wrapper for Gurrock's TestRail test management suite
The intended way to begin is to instantiate the TRAW Client:
.. code-block:: python
import traw
testrail = traw.Client(username='username',
user_api_key='api_key',
url='url')
See the Client help documentation (`help(traw.Client)`) for more information
"""
import logging
from os.path import dirname, join, realpath
from .client import Client # NOQA
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
with open(join(dirname(realpath(__file__)), 'VERSION'), 'r') as r:
version = r.read()
except FileNotFoundError:
version = '0.0.0'
__version__ = version
__all__ = ('__version__', 'Client')
logging.getLogger(__package__).addHandler(logging.NullHandler())
| 23.621622
| 76
| 0.681922
|
e594c42d3d9265dee7f3c7cb8d66f6f088fc89d4
| 4,699
|
py
|
Python
|
tracker.py
|
rashiddaha/ivy
|
59106ba29ee21305c2b3222d08d1b2869d6e27f3
|
[
"MIT"
] | 206
|
2018-10-08T17:04:33.000Z
|
2019-12-22T02:51:08.000Z
|
tracker.py
|
rashiddaha/ivy
|
59106ba29ee21305c2b3222d08d1b2869d6e27f3
|
[
"MIT"
] | 28
|
2019-03-07T11:58:21.000Z
|
2019-12-20T18:05:19.000Z
|
tracker.py
|
rashiddaha/ivy
|
59106ba29ee21305c2b3222d08d1b2869d6e27f3
|
[
"MIT"
] | 85
|
2019-12-23T10:33:09.000Z
|
2022-01-06T18:16:04.000Z
|
'''
Functions for keeping track of detected objects in a video.
'''
import sys
import cv2
import settings
from util.blob import Blob
from util.bounding_box import get_overlap, get_box_image
from util.image import get_base64_image
from util.object_info import generate_object_id
from util.logger import get_logger
logger = get_logger()
def _csrt_create(bounding_box, frame):
'''
Create an OpenCV CSRT Tracker object.
'''
tracker = cv2.TrackerCSRT_create()
tracker.init(frame, tuple(bounding_box))
return tracker
def _kcf_create(bounding_box, frame):
'''
Create an OpenCV KCF Tracker object.
'''
tracker = cv2.TrackerKCF_create()
tracker.init(frame, tuple(bounding_box))
return tracker
def get_tracker(algorithm, bounding_box, frame):
'''
Fetch a tracker object based on the algorithm specified.
'''
if algorithm == 'csrt':
return _csrt_create(bounding_box, frame)
if algorithm == 'kcf':
return _kcf_create(bounding_box, frame)
logger.error('Invalid tracking algorithm specified (options: csrt, kcf)', extra={
'meta': {'label': 'INVALID_TRACKING_ALGORITHM'},
})
sys.exit()
def _remove_stray_blobs(blobs, matched_blob_ids, mcdf):
'''
Remove blobs that "hang" after a tracked object has left the frame.
'''
for blob_id, blob in list(blobs.items()):
if blob_id not in matched_blob_ids:
blob.num_consecutive_detection_failures += 1
if blob.num_consecutive_detection_failures > mcdf:
del blobs[blob_id]
return blobs
def add_new_blobs(boxes, classes, confidences, blobs, frame, tracker, mcdf):
'''
Add new blobs or updates existing ones.
'''
matched_blob_ids = []
for i, box in enumerate(boxes):
_type = classes[i] if classes is not None else None
_confidence = confidences[i] if confidences is not None else None
_tracker = get_tracker(tracker, box, frame)
match_found = False
for _id, blob in blobs.items():
if get_overlap(box, blob.bounding_box) >= 0.6:
match_found = True
if _id not in matched_blob_ids:
blob.num_consecutive_detection_failures = 0
matched_blob_ids.append(_id)
blob.update(box, _type, _confidence, _tracker)
blob_update_log_meta = {
'label': 'BLOB_UPDATE',
'object_id': _id,
'bounding_box': blob.bounding_box,
'type': blob.type,
'type_confidence': blob.type_confidence,
}
if settings.LOG_IMAGES:
blob_update_log_meta['image'] = get_base64_image(get_box_image(frame, blob.bounding_box))
logger.debug('Blob updated.', extra={'meta': blob_update_log_meta})
break
if not match_found:
_blob = Blob(box, _type, _confidence, _tracker)
blob_id = generate_object_id()
blobs[blob_id] = _blob
blog_create_log_meta = {
'label': 'BLOB_CREATE',
'object_id': blob_id,
'bounding_box': _blob.bounding_box,
'type': _blob.type,
'type_confidence': _blob.type_confidence,
}
if settings.LOG_IMAGES:
blog_create_log_meta['image'] = get_base64_image(get_box_image(frame, _blob.bounding_box))
logger.debug('Blob created.', extra={'meta': blog_create_log_meta})
blobs = _remove_stray_blobs(blobs, matched_blob_ids, mcdf)
return blobs
def remove_duplicates(blobs):
'''
Remove duplicate blobs i.e blobs that point to an already detected and tracked object.
'''
for blob_id, blob_a in list(blobs.items()):
for _, blob_b in list(blobs.items()):
if blob_a == blob_b:
break
if get_overlap(blob_a.bounding_box, blob_b.bounding_box) >= 0.6 and blob_id in blobs:
del blobs[blob_id]
return blobs
def update_blob_tracker(blob, blob_id, frame):
'''
Update a blob's tracker object.
'''
success, box = blob.tracker.update(frame)
if success:
blob.num_consecutive_tracking_failures = 0
blob.update(box)
logger.debug('Object tracker updated.', extra={
'meta': {
'label': 'TRACKER_UPDATE',
'object_id': blob_id,
'bounding_box': blob.bounding_box,
'centroid': blob.centroid,
},
})
else:
blob.num_consecutive_tracking_failures += 1
return (blob_id, blob)
| 33.326241
| 109
| 0.61396
|
196ebec2019f556e8f411dfe8b28c82af8e81199
| 8,243
|
py
|
Python
|
openstack/utils.py
|
gthiemonge/openstacksdk
|
e1c6f233eb6b07f488fe4acded72312d8d265b33
|
[
"Apache-2.0"
] | null | null | null |
openstack/utils.py
|
gthiemonge/openstacksdk
|
e1c6f233eb6b07f488fe4acded72312d8d265b33
|
[
"Apache-2.0"
] | null | null | null |
openstack/utils.py
|
gthiemonge/openstacksdk
|
e1c6f233eb6b07f488fe4acded72312d8d265b33
|
[
"Apache-2.0"
] | 1
|
2021-03-12T14:28:28.000Z
|
2021-03-12T14:28:28.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import string
import time
import deprecation
import keystoneauth1
from keystoneauth1 import discover
from openstack import _log
from openstack import exceptions
from openstack import version
def deprecated(deprecated_in=None, removed_in=None,
details=""):
"""Mark a method as deprecated
:param deprecated_in: The version string where this method is deprecated.
Generally this is the next version to be released.
:param removed_in: The version where this method will be removed
from the code base. Generally this is the next
major version. This argument is helpful for the
tests when using ``deprecation.fail_if_not_removed``.
:param str details: Helpful details to callers and the documentation.
This will usually be a recommendation for alternate
code to use.
"""
# As all deprecations within this library have the same current_version,
# return a partial function with the library version always set.
partial = functools.partial(deprecation.deprecated,
current_version=version.__version__)
# TODO(shade) shade's tags break these - so hard override them for now.
# We'll want a patch fixing this before we cut any releases.
removed_in = '2.0.0'
return partial(deprecated_in=deprecated_in, removed_in=removed_in,
details=details)
@deprecated(deprecated_in="0.10.0", removed_in="1.0",
details="Use openstack.enable_logging instead")
def enable_logging(*args, **kwargs):
"""Backwards compatibility wrapper function.
openstacksdk has had enable_logging in utils. It's in _log now and
exposed directly at openstack.enable_logging.
"""
return _log.enable_logging(*args, **kwargs)
def urljoin(*args):
"""A custom version of urljoin that simply joins strings into a path.
The real urljoin takes into account web semantics like when joining a url
like /path this should be joined to http://host/path as it is an anchored
link. We generally won't care about that in client.
"""
return '/'.join(str(a or '').strip('/') for a in args)
def iterate_timeout(timeout, message, wait=2):
"""Iterate and raise an exception on timeout.
This is a generator that will continually yield and sleep for
wait seconds, and if the timeout is reached, will raise an exception
with <message>.
"""
log = _log.setup_logging('openstack.iterate_timeout')
try:
# None as a wait winds up flowing well in the per-resource cache
# flow. We could spread this logic around to all of the calling
# points, but just having this treat None as "I don't have a value"
# seems friendlier
if wait is None:
wait = 2
elif wait == 0:
# wait should be < timeout, unless timeout is None
wait = 0.1 if timeout is None else min(0.1, timeout)
wait = float(wait)
except ValueError:
raise exceptions.SDKException(
"Wait value must be an int or float value. {wait} given"
" instead".format(wait=wait))
start = time.time()
count = 0
while (timeout is None) or (time.time() < start + timeout):
count += 1
yield count
log.debug('Waiting %s seconds', wait)
time.sleep(wait)
raise exceptions.ResourceTimeout(message)
def get_string_format_keys(fmt_string, old_style=True):
"""Gets a list of required keys from a format string
Required mostly for parsing base_path urls for required keys, which
use the old style string formatting.
"""
if old_style:
class AccessSaver(object):
def __init__(self):
self.keys = []
def __getitem__(self, key):
self.keys.append(key)
a = AccessSaver()
fmt_string % a
return a.keys
else:
keys = []
for t in string.Formatter().parse(fmt_string):
if t[1] is not None:
keys.append(t[1])
return keys
def supports_microversion(adapter, microversion):
"""Determine if the given adapter supports the given microversion.
Checks the min and max microversion asserted by the service and checks
to make sure that ``min <= microversion <= max``.
:param adapter:
:class:`~keystoneauth1.adapter.Adapter` instance.
:param str microversion:
String containing the desired microversion.
:returns: True if the service supports the microversion.
:rtype: bool
"""
endpoint_data = adapter.get_endpoint_data()
if (endpoint_data.min_microversion
and endpoint_data.max_microversion
and discover.version_between(
endpoint_data.min_microversion,
endpoint_data.max_microversion,
microversion)):
return True
return False
def pick_microversion(session, required):
"""Get a new microversion if it is higher than session's default.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param required: Version that is required for an action.
:type required: String or tuple or None.
:return: ``required`` as a string if the ``session``'s default is too low,
the ``session``'s default otherwise. Returns ``None`` of both
are ``None``.
:raises: TypeError if ``required`` is invalid.
"""
if required is not None:
required = discover.normalize_version_number(required)
if session.default_microversion is not None:
default = discover.normalize_version_number(
session.default_microversion)
if required is None:
required = default
else:
required = (default if discover.version_match(required, default)
else required)
if required is not None:
return discover.version_to_string(required)
def maximum_supported_microversion(adapter, client_maximum):
"""Determinte the maximum microversion supported by both client and server.
:param adapter: :class:`~keystoneauth1.adapter.Adapter` instance.
:param client_maximum: Maximum microversion supported by the client.
If ``None``, ``None`` is returned.
:returns: the maximum supported microversion as string or ``None``.
"""
if client_maximum is None:
return None
# NOTE(dtantsur): if we cannot determine supported microversions, fall back
# to the default one.
try:
endpoint_data = adapter.get_endpoint_data()
except keystoneauth1.exceptions.discovery.DiscoveryFailure:
endpoint_data = None
if endpoint_data is None:
log = _log.setup_logging('openstack')
log.warning('Cannot determine endpoint data for service %s',
adapter.service_type or adapter.service_name)
return None
if not endpoint_data.max_microversion:
return None
client_max = discover.normalize_version_number(client_maximum)
server_max = discover.normalize_version_number(
endpoint_data.max_microversion)
if endpoint_data.min_microversion:
server_min = discover.normalize_version_number(
endpoint_data.min_microversion)
if client_max < server_min:
# NOTE(dtantsur): we may want to raise in this case, but this keeps
# the current behavior intact.
return None
result = min(client_max, server_max)
return discover.version_to_string(result)
| 35.683983
| 79
| 0.667961
|
97242a689a08d95819d2189f6652acfc7189c6c9
| 3,170
|
py
|
Python
|
textparser.py
|
apatel762/anki-parser
|
06357eb23b0dc930c848a2fcc36ebe5f433c0ee2
|
[
"MIT"
] | null | null | null |
textparser.py
|
apatel762/anki-parser
|
06357eb23b0dc930c848a2fcc36ebe5f433c0ee2
|
[
"MIT"
] | null | null | null |
textparser.py
|
apatel762/anki-parser
|
06357eb23b0dc930c848a2fcc36ebe5f433c0ee2
|
[
"MIT"
] | null | null | null |
class TextParser(object):
"""
A class containing methods used for parsing lists of data. Keeps the main
script clean.
"""
@staticmethod
def kanji_deck_only_main_vocab_word(lst):
"""
All of the text in each card of the Kanji deck is separated by
'\x1f' characters. This method cleanly separates the unneeded
information from those flashcards, returning only the first
vocabulary word learned from the card and nothing else.
"""
tmp = [note_data[0].split("\x1f")[1] for note_data in lst]
return TextParser.__get_usable_vocab(tmp)
@staticmethod
def __get_usable_vocab(lst):
"""
At this point, each piece of vocab in the list could contain
multiple words separated by commas e.g. one, the thing, it etc.
This function returns a list of vocab where only the first
word is kept (since this is the important one for searching
the sentence deck)
"""
return [TextParser.__get_usable_vocab_trim(v) for v in lst]
@staticmethod
def __get_usable_vocab_trim(v):
if ", " in v:
return v.split(", ")[0]
elif "<div>" in v:
return v[5:]
else:
return v
@staticmethod
def for_each_trim_to_first(query_results):
"""
The query which gets the sort field (IDs) (of all the sentence
cards that you haven't learned) returns them in a strange format
where every element in the results list is a tuple with one element.
This gets the first element from each tuple and puts it in a list
and returns that.
"""
return [e[0] for e in query_results]
@staticmethod
def for_each_get_doable_ids(query_results, learned_words):
"""
The query which gets all of the sentence card vocabulary literally
just returns all of the content on the card. This function will parse
that raw data and get all of the card IDs that correspond to the
card IDs that haven't been studied, but are doable given my current
knowledge.
It then returns a list of ids that can have their respective cards
updated to match this.
"""
tmp = []
learned_words_set = set(learned_words)
query_results = TextParser.for_each_trim_to_first(query_results)
for note in query_results:
if note.split("\t")[0].split("\x1f")[4].split(", ") == ['']:
continue
current_vocabs = set(note.split("\t")[0].split("\x1f")[4].split(", "))
if current_vocabs.issubset(learned_words_set):
tmp.append(note.split("\t")[0].split("\x1f")[1])
return TextParser.all_elements_string_to_int(tmp)
@staticmethod
def all_elements_string_to_int(lst):
"""
Convert every element in a list into an integer from a string
"""
return [int(e) for e in lst]
@staticmethod
def all_elements_int_to_string(lst):
"""
Convert every element in a list into a string from an integer
"""
return [str(e) for e in lst]
| 37.738095
| 82
| 0.626183
|
31f06772d6412e18e49b7afa61ca10562c63fee2
| 3,064
|
py
|
Python
|
modules/calendar/calendar.py
|
wvanlit/smart-mirror
|
707da82af3e241d91618a79dfaa423704ab0bddb
|
[
"MIT"
] | null | null | null |
modules/calendar/calendar.py
|
wvanlit/smart-mirror
|
707da82af3e241d91618a79dfaa423704ab0bddb
|
[
"MIT"
] | null | null | null |
modules/calendar/calendar.py
|
wvanlit/smart-mirror
|
707da82af3e241d91618a79dfaa423704ab0bddb
|
[
"MIT"
] | null | null | null |
from kivy.uix.widget import Widget
from kivy.properties import StringProperty
from kivy.clock import Clock
import datetime
from dateutil import tz
# Google Calendar Stuff
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
filepath = 'modules/calendar/'
class CalendarWidget(Widget):
eventText= StringProperty()
calendarService = None
def setUpCalendar(self):
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(filepath+'token.pickle'):
with open(filepath+'token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
filepath+'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(filepath+'token.pickle', 'wb') as token:
pickle.dump(creds, token)
self.calendarService = build('calendar', 'v3', credentials=creds)
def updateCalendar(self, dt):
n_events = 5
if self.calendarService is None:
eventText = "[color=#ffffff]Not Connected![/color]"
return
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
events_result = self.calendarService.events().list(calendarId='primary', timeMin=now,
maxResults=n_events, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
self.eventText = f'[color=#ffffff]Upcoming {n_events} Calendar Events:\n'
if not events:
self.eventText = '[color=#ffffff]No upcoming events found.[/color]'
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
summary = event['summary']
self.eventText += f'{convertTimeToLocal(start)} - {summary}\n'
self.eventText += '[/color]'
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
def convertTimeToLocal(inputText):
split_input = inputText.split('T')[0]
utc = datetime.datetime.strptime(split_input, '%Y-%m-%d')
utc = utc.replace(tzinfo=from_zone)
localTime = utc.astimezone(to_zone)
return localTime.strftime("%d %b")
def BuildWidget():
# Build Calendar
widget = CalendarWidget()
widget.setUpCalendar()
widget.updateCalendar(0)
Clock.schedule_interval(widget.updateCalendar, 600)
return widget
| 31.587629
| 87
| 0.699739
|
956597fce3b1684784fd3ce76cef7bc84cddbce0
| 2,674
|
py
|
Python
|
examples/background_mask_sphere.py
|
RI-imaging/qpimage
|
e572f0ece5b0a33b031e44ce390c2d40a1975851
|
[
"MIT"
] | 1
|
2022-02-13T15:14:21.000Z
|
2022-02-13T15:14:21.000Z
|
examples/background_mask_sphere.py
|
RI-imaging/qpimage
|
e572f0ece5b0a33b031e44ce390c2d40a1975851
|
[
"MIT"
] | 13
|
2017-10-10T11:23:56.000Z
|
2021-12-06T00:02:46.000Z
|
examples/background_mask_sphere.py
|
RI-imaging/qpimage
|
e572f0ece5b0a33b031e44ce390c2d40a1975851
|
[
"MIT"
] | 2
|
2018-04-05T04:21:34.000Z
|
2020-07-21T11:57:04.000Z
|
"""Object-mask background image correction
In some cases, using :ref:`only the border of the phase image
<example_background_poly2o>` for background correction
might not be enough. To increase the area of the background image,
it is possible to mask only the cell area. The :ref:`qpsphere package
<qpsphere:index>` provides the convenience method
:func:`qpsphere.cnvnc.bg_phase_mask_for_qpi` which computes
the background phase mask based on the position and radius of an
automatically detected spherical phase object. The sized of the
mask can be tuned with the `radial_clearance` parameter.
Note that the various methods used in the examples for determining
such a phase mask can be combined. Also note that before
applying the method discussed here, an initial background correction
might be necessary.
"""
import matplotlib.pylab as plt
import numpy as np
# The data are stored in a .jpg file (lossy compression).
# If `PIL` is not found, try installing the `pillow` package.
from PIL import Image
import qpimage
import qpsphere
edata = np.array(Image.open("./data/hologram_cell_curved_bg.jpg"))
# create QPImage instance
qpi = qpimage.QPImage(data=edata,
which_data="hologram",
meta_data={"medium index": 1.335,
"wavelength": 550e-9,
"pixel size": 0.107e-6})
pha0 = qpi.pha
# determine the position of the cell (takes a while)
mask = qpsphere.cnvnc.bg_phase_mask_for_qpi(qpi=qpi,
r0=7e-6,
method="edge",
model="projection",
radial_clearance=1.15)
# background correction using polynomial and mask
qpi.compute_bg(which_data=["phase"],
fit_offset="fit",
fit_profile="poly2o",
from_mask=mask,
)
pha_corr = qpi.pha
# plot phase data
fig = plt.figure(figsize=(8, 3.3))
phakw = {"cmap": "viridis",
"interpolation": "bicubic",
"vmin": pha_corr.min(),
"vmax": pha_corr.max()}
ax1 = plt.subplot(131, title="input phase")
map1 = ax1.imshow(pha0, **phakw)
plt.colorbar(map1, ax=ax1, fraction=.067, pad=0.04)
ax2 = plt.subplot(132, title="background phase mask")
map2 = ax2.imshow(1.*mask, cmap="gray_r")
plt.colorbar(map2, ax=ax2, fraction=.067, pad=0.04)
ax3 = plt.subplot(133, title="polynomial correction")
map3 = ax3.imshow(pha_corr, **phakw)
plt.colorbar(map3, ax=ax3, fraction=.067, pad=0.04)
# disable axes
[ax.axis("off") for ax in [ax1, ax2, ax3]]
plt.tight_layout(w_pad=0)
plt.show()
| 35.184211
| 69
| 0.648467
|
750f449a7e4e18500729621fde95f5f7e2e7adb7
| 570
|
py
|
Python
|
config/celery_app.py
|
flashboyka/initpy
|
d6000c6141e54b35619ebed843039bd8f9667353
|
[
"MIT"
] | null | null | null |
config/celery_app.py
|
flashboyka/initpy
|
d6000c6141e54b35619ebed843039bd8f9667353
|
[
"MIT"
] | 3
|
2019-07-29T05:33:39.000Z
|
2022-01-13T01:24:41.000Z
|
config/celery_app.py
|
flashboyka/initpy
|
d6000c6141e54b35619ebed843039bd8f9667353
|
[
"MIT"
] | 2
|
2019-07-12T04:12:20.000Z
|
2019-07-29T04:34:04.000Z
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
app = Celery("initpy")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| 33.529412
| 72
| 0.782456
|
c82dbd866c8cb5768cd2a037ea09371a0f1bbcb0
| 633
|
py
|
Python
|
dream_project/manage.py
|
saikotprof/dream_project
|
938ba854d5e2277effe19b6945c4d9ab5d6eda78
|
[
"MIT"
] | null | null | null |
dream_project/manage.py
|
saikotprof/dream_project
|
938ba854d5e2277effe19b6945c4d9ab5d6eda78
|
[
"MIT"
] | null | null | null |
dream_project/manage.py
|
saikotprof/dream_project
|
938ba854d5e2277effe19b6945c4d9ab5d6eda78
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dream_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.772727
| 77
| 0.685624
|
4d4b5194e37fad08add68f2709d4bcd188328849
| 8,629
|
py
|
Python
|
tests/safety/test_subaru.py
|
martinl/panda
|
c2491558770f90f2fb645a9828a9044c8f1f826e
|
[
"MIT"
] | null | null | null |
tests/safety/test_subaru.py
|
martinl/panda
|
c2491558770f90f2fb645a9828a9044c8f1f826e
|
[
"MIT"
] | 1
|
2021-05-06T18:39:54.000Z
|
2021-05-26T13:38:12.000Z
|
tests/safety/test_subaru.py
|
martinl/panda
|
c2491558770f90f2fb645a9828a9044c8f1f826e
|
[
"MIT"
] | 10
|
2020-05-13T19:37:38.000Z
|
2021-05-19T20:03:58.000Z
|
#!/usr/bin/env python3
import unittest
import numpy as np
from panda import Panda
from panda.tests.safety import libpandasafety_py
import panda.tests.safety.common as common
from panda.tests.safety.common import CANPackerPanda
MAX_RATE_UP = 50
MAX_RATE_DOWN = 70
MAX_RT_DELTA = 940
RT_INTERVAL = 250000
DRIVER_TORQUE_ALLOWANCE = 60
DRIVER_TORQUE_FACTOR = 10
class TestSubaruSafety(common.PandaSafetyTest):
cnt_gas = 0
cnt_torque_driver = 0
cnt_cruise = 0
cnt_speed = 0
cnt_brake = 0
TX_MSGS = [[0x122, 0], [0x221, 0], [0x322, 0]]
STANDSTILL_THRESHOLD = 20 # 1kph (see dbc file)
RELAY_MALFUNCTION_ADDR = 0x122
RELAY_MALFUNCTION_BUS = 0
FWD_BLACKLISTED_ADDRS = {2: [0x122, 0x221, 0x322]}
FWD_BUS_LOOKUP = {0: 2, 2: 0}
MAX_STEER = 2047
def setUp(self):
self.packer = CANPackerPanda("subaru_global_2017_generated")
self.safety = libpandasafety_py.libpandasafety
self.safety.set_safety_hooks(Panda.SAFETY_SUBARU, 0)
self.safety.init_tests()
def _set_prev_torque(self, t):
self.safety.set_desired_torque_last(t)
self.safety.set_rt_torque_last(t)
def _torque_driver_msg(self, torque):
values = {"Steer_Torque_Sensor": torque, "Counter": self.cnt_torque_driver % 4}
self.__class__.cnt_torque_driver += 1
return self.packer.make_can_msg_panda("Steering_Torque", 0, values)
def _speed_msg(self, speed):
# subaru safety doesn't use the scaled value, so undo the scaling
values = {s: speed * 0.057 for s in ["FR", "FL", "RR", "RL"]}
values["Counter"] = self.cnt_speed % 4
self.__class__.cnt_speed += 1
return self.packer.make_can_msg_panda("Wheel_Speeds", 0, values)
def _brake_msg(self, brake):
values = {"Brake": brake, "Counter": self.cnt_brake % 4}
self.__class__.cnt_brake += 1
return self.packer.make_can_msg_panda("Brake_Status", 0, values)
def _torque_msg(self, torque):
values = {"LKAS_Output": torque}
return self.packer.make_can_msg_panda("ES_LKAS", 0, values)
def _gas_msg(self, gas):
values = {"Throttle_Pedal": gas, "Counter": self.cnt_gas % 4}
self.__class__.cnt_gas += 1
return self.packer.make_can_msg_panda("Throttle", 0, values)
def _pcm_status_msg(self, enable):
values = {"Cruise_Activated": enable, "Counter": self.cnt_cruise % 4}
self.__class__.cnt_cruise += 1
return self.packer.make_can_msg_panda("CruiseControl", 0, values)
def _set_torque_driver(self, min_t, max_t):
for _ in range(0, 5):
self._rx(self._torque_driver_msg(min_t))
self._rx(self._torque_driver_msg(max_t))
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-3000, 3000):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
block = abs(t) > self.MAX_STEER or (not enabled and abs(t) > 0)
self.assertEqual(not block, self._tx(self._torque_msg(t)))
def test_non_realtime_limit_up(self):
self._set_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self._tx(self._torque_msg(MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertTrue(self._tx(self._torque_msg(-MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self._tx(self._torque_msg(MAX_RATE_UP + 1)))
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertFalse(self._tx(self._torque_msg(-MAX_RATE_UP - 1)))
def test_non_realtime_limit_down(self):
self._set_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
def test_against_torque_driver(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
for t in np.arange(0, DRIVER_TORQUE_ALLOWANCE + 1, 1):
t *= -sign
self._set_torque_driver(t, t)
self._set_prev_torque(self.MAX_STEER * sign)
self.assertTrue(self._tx(self._torque_msg(self.MAX_STEER * sign)))
self._set_torque_driver(DRIVER_TORQUE_ALLOWANCE + 1, DRIVER_TORQUE_ALLOWANCE + 1)
self.assertFalse(self._tx(self._torque_msg(-self.MAX_STEER)))
# arbitrary high driver torque to ensure max steer torque is allowed
max_driver_torque = int(self.MAX_STEER / DRIVER_TORQUE_FACTOR + DRIVER_TORQUE_ALLOWANCE + 1)
# spot check some individual cases
for sign in [-1, 1]:
driver_torque = (DRIVER_TORQUE_ALLOWANCE + 10) * sign
torque_desired = (self.MAX_STEER - 10 * DRIVER_TORQUE_FACTOR) * sign
delta = 1 * sign
self._set_prev_torque(torque_desired)
self._set_torque_driver(-driver_torque, -driver_torque)
self.assertTrue(self._tx(self._torque_msg(torque_desired)))
self._set_prev_torque(torque_desired + delta)
self._set_torque_driver(-driver_torque, -driver_torque)
self.assertFalse(self._tx(self._torque_msg(torque_desired + delta)))
self._set_prev_torque(self.MAX_STEER * sign)
self._set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertTrue(self._tx(self._torque_msg((self.MAX_STEER - MAX_RATE_DOWN) * sign)))
self._set_prev_torque(self.MAX_STEER * sign)
self._set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertTrue(self._tx(self._torque_msg(0)))
self._set_prev_torque(self.MAX_STEER * sign)
self._set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertFalse(self._tx(self._torque_msg((self.MAX_STEER - MAX_RATE_DOWN + 1) * sign)))
def test_realtime_limits(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests()
self._set_prev_torque(0)
self._set_torque_driver(0, 0)
for t in np.arange(0, MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self._tx(self._torque_msg(t)))
self.assertFalse(self._tx(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self._tx(self._torque_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(RT_INTERVAL + 1)
self.assertTrue(self._tx(self._torque_msg(sign * (MAX_RT_DELTA - 1))))
self.assertTrue(self._tx(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
class TestSubaru2020Safety(TestSubaruSafety):
MAX_STEER = 1439
def setUp(self):
self.packer = CANPackerPanda("subaru_global_2017_generated")
self.safety = libpandasafety_py.libpandasafety
self.safety.set_safety_hooks(Panda.SAFETY_SUBARU, 1)
self.safety.init_tests()
class TestSubaruGen2Safety(TestSubaruSafety):
TX_MSGS = [[0x122, 0], [0x322, 0], [0x139, 2]]
FWD_BLACKLISTED_ADDRS = {0: [0x139], 2: [0x122, 0x322]}
def setUp(self):
self.packer = CANPackerPanda("subaru_global_2017_generated")
self.safety = libpandasafety_py.libpandasafety
self.safety.set_safety_hooks(Panda.SAFETY_SUBARU_GEN2, 0)
self.safety.init_tests()
def _speed_msg(self, speed):
# subaru safety doesn't use the scaled value, so undo the scaling
values = {s: speed * 0.057 for s in ["FR", "FL", "RR", "RL"]}
values["Counter"] = self.cnt_speed % 4
self.__class__.cnt_speed += 1
return self.packer.make_can_msg_panda("Wheel_Speeds", 1, values)
def _brake_msg(self, brake):
values = {"Brake": brake, "Counter": self.cnt_brake % 4}
self.__class__.cnt_brake += 1
return self.packer.make_can_msg_panda("Brake_Status", 1, values)
def _pcm_status_msg(self, enable):
values = {"Cruise_Activated": enable, "Counter": self.cnt_cruise % 4}
self.__class__.cnt_cruise += 1
return self.packer.make_can_msg_panda("CruiseControl", 1, values)
class TestSubaruHybridSafety(TestSubaruSafety):
TX_MSGS = [[0x122, 0], [0x322, 0], [0x139, 2]]
FWD_BLACKLISTED_ADDRS = {0: [0x139], 2: [0x122, 0x322]}
def setUp(self):
self.packer = CANPackerPanda("subaru_global_2020_hybrid_generated")
self.safety = libpandasafety_py.libpandasafety
self.safety.set_safety_hooks(Panda.SAFETY_SUBARU_HYBRID, 0)
self.safety.init_tests()
def _brake_msg(self, brake):
values = {"Brake": brake}
return self.packer.make_can_msg_panda("Brake_Hybrid", 1, values)
def _gas_msg(self, gas):
values = {"Throttle_Pedal": gas, "Counter": self.cnt_gas % 4}
self.__class__.cnt_gas += 1
return self.packer.make_can_msg_panda("Throttle_Hybrid", 1, values)
def _pcm_status_msg(self, enable):
values = {"Cruise_Activated": enable, "Counter": self.cnt_cruise % 4}
self.__class__.cnt_cruise += 1
return self.packer.make_can_msg_panda("ES_DashStatus", 2, values)
if __name__ == "__main__":
unittest.main()
| 37.354978
| 96
| 0.713988
|
243736df9a98f09105de38d78215ccb5d2792b19
| 1,728
|
py
|
Python
|
roppy/averator.py
|
nilsmkMET/roppy
|
c68d698fa4970174af2c7f7137bd4a3e5983b644
|
[
"MIT"
] | null | null | null |
roppy/averator.py
|
nilsmkMET/roppy
|
c68d698fa4970174af2c7f7137bd4a3e5983b644
|
[
"MIT"
] | null | null | null |
roppy/averator.py
|
nilsmkMET/roppy
|
c68d698fa4970174af2c7f7137bd4a3e5983b644
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Generator for moving averages from ROMS file(s)"""
import numpy as np
def roms_averator(ncid, var_name, L, grd):
"""Generator for moving averages from ROMS file(s)
var_name : text string, name of NetCDF variable
ncid : an open NetCDF Dataset or MFDataset
grd : a roppy.SGrid instance
L : integer, length of averaging period (only even presently)
n_rec = len(fid.dimensions['ocean_time']) # Number of time records
"""
# TODO: Make grd optional
# Only use of grd is to look work on subdomain,
# alternatively: use subgrid specification
# make attribute grd.subgrid
N = L // 2
assert 2*N == L, "Only even averaging periods allowed (presently)"
# Dimension and staggering
if var_name == 'u': # 3D u-point
I, J = grd.Iu, grd.Ju
s = (slice(None), grd.Ju, grd.Iu)
elif var_name == 'v': # 3D v-point
I, J = grd.Iv, grd.Jv
s = (slice(None), grd.Jv, grd.Iv)
elif var_name == "ocean_time": # scalar
s = ()
else: # default = 3D rho-point
I, J = grd.I, grd.J
s = (slice(None), grd.J, grd.I)
# First average
MF = fid.variables[var_name][(0,) + s]/(4*N)
for t in range(1, 2*N):
MF += fid.variables[var_name][(t,) + s] / (2*N)
MF += fid.variables[var_name][(2*N,) + s]/(4*N)
yield MF
# Update the average
for t in range(N+1, n_rec - N):
MF += fid.variables[var_name][(t+N,) + s]/(4*N)
MF += fid.variables[var_name][(t+N-1,) + s]/(4*N)
MF -= fid.variables[var_name][(t-N,) + s]/(4*N)
MF -= fid.variables[var_name][(t-N-1,) + s]/(4*N)
yield MF
| 29.793103
| 73
| 0.555556
|
441594322682bbf84319f05f238ca6ce65298592
| 1,871
|
py
|
Python
|
xdplayer/vdlauncher.py
|
anjakefala/xdplayer
|
3508e77b4f9fe45b8a8a61bdf3ce1d895b371f91
|
[
"MIT"
] | 9
|
2021-03-11T00:55:37.000Z
|
2022-02-15T03:16:33.000Z
|
xdplayer/vdlauncher.py
|
anjakefala/xdplayer
|
3508e77b4f9fe45b8a8a61bdf3ce1d895b371f91
|
[
"MIT"
] | 3
|
2022-02-06T06:01:01.000Z
|
2022-03-12T21:27:01.000Z
|
xdplayer/vdlauncher.py
|
anjakefala/xdplayer
|
3508e77b4f9fe45b8a8a61bdf3ce1d895b371f91
|
[
"MIT"
] | 2
|
2022-01-05T21:24:49.000Z
|
2022-01-29T20:20:01.000Z
|
#!/usr/bin/env python3
import os
import stat
from visidata import SqliteQuerySheet, Path, Column, date
class vdLauncher(SqliteQuerySheet):
'Load puzzles started, but not submitted by teamid.'
@classmethod
def stat_guesses(cls, fn):
'Return Path($TEAMDIR/{fn.stem}.xd.guesses.jsonl'
xdid = Path(fn).stem
p = Path(os.getenv('TEAMDIR', '.'))/(xdid+'.xd-guesses.jsonl')
return p.stat()
@classmethod
def is_submitted(cls, fn):
'Return True if exists and is readonly.'
g = cls.stat_guesses(fn)
if not g:
return False
return not (cls.stat_guesses(fn).st_mode & stat.S_IWUSR)
@classmethod
def modtime(cls, fn):
g = cls.stat_guesses(fn)
if not g:
return None
return g.st_mtime
@classmethod
def solve_hours(cls, fn):
g = cls.stat_guesses(fn)
if not g:
return None
return (g.st_ctime - g.st_mtime)/3600
query = '''SELECT
solvings.teamid,
solvings.correct*100/solvings.nonblocks AS completed,
date_published,
size,
title,
author,
editor,
copyright,
xdmeta.xdid,
path
FROM xdmeta
LEFT OUTER JOIN solvings ON xdmeta.xdid = solvings.xdid
WHERE (solvings.submitted = 0 AND solvings.teamid = ?)
'''
columns = [
Column('modtime', width=0, type=date, getter=lambda c,r: vdLauncher.modtime(r[-1])),
Column('submitted', width=0, getter=lambda c,r: vdLauncher.is_submitted(r[-1])),
Column('solve_h', type=float, getter=lambda c,r: vdLauncher.solve_hours(r[-1])),
]
_ordering = [('modtime', True)] # sort by reverse modtime initially
parms = [os.getenv('TEAMID', '')]
| 29.234375
| 96
| 0.571887
|
598d1e0b714fde1d36ab82e6e6908b5444b4d4e8
| 117,253
|
py
|
Python
|
kubernetes_asyncio/client/api/storage_v1beta1_api.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/api/storage_v1beta1_api.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/api/storage_v1beta1_api.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.23.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class StorageV1beta1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_csi_storage_capacity(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_csi_storage_capacity # noqa: E501
create a CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_csi_storage_capacity(namespace, body, async_req=True)
>>> result = thread.get()
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param body: (required)
:type body: V1beta1CSIStorageCapacity
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:type field_manager: str
:param field_validation: fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.
:type field_validation: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1beta1CSIStorageCapacity
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_csi_storage_capacity_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_csi_storage_capacity_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_csi_storage_capacity # noqa: E501
create a CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_csi_storage_capacity_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param body: (required)
:type body: V1beta1CSIStorageCapacity
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:type field_manager: str
:param field_validation: fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.
:type field_validation: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1beta1CSIStorageCapacity, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_csi_storage_capacity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_csi_storage_capacity`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_csi_storage_capacity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1beta1CSIStorageCapacity",
201: "V1beta1CSIStorageCapacity",
202: "V1beta1CSIStorageCapacity",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_collection_namespaced_csi_storage_capacity(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_csi_storage_capacity # noqa: E501
delete collection of CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_csi_storage_capacity(namespace, async_req=True)
>>> result = thread.get()
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:type _continue: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:type field_selector: str
:param grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:type grace_period_seconds: int
:param label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:type label_selector: str
:param limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:type limit: int
:param orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:type orphan_dependents: bool
:param propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:type propagation_policy: str
:param resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version: str
:param resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version_match: str
:param timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:type timeout_seconds: int
:param body:
:type body: V1DeleteOptions
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1Status
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_csi_storage_capacity_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_csi_storage_capacity_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_csi_storage_capacity # noqa: E501
delete collection of CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_csi_storage_capacity_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:type _continue: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:type field_selector: str
:param grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:type grace_period_seconds: int
:param label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:type label_selector: str
:param limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:type limit: int
:param orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:type orphan_dependents: bool
:param propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:type propagation_policy: str
:param resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version: str
:param resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version_match: str
:param timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:type timeout_seconds: int
:param body:
:type body: V1DeleteOptions
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_csi_storage_capacity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_csi_storage_capacity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1Status",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_namespaced_csi_storage_capacity(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_csi_storage_capacity # noqa: E501
delete a CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_csi_storage_capacity(name, namespace, async_req=True)
>>> result = thread.get()
:param name: name of the CSIStorageCapacity (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:type grace_period_seconds: int
:param orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:type orphan_dependents: bool
:param propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:type propagation_policy: str
:param body:
:type body: V1DeleteOptions
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1Status
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_csi_storage_capacity_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_csi_storage_capacity_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_csi_storage_capacity # noqa: E501
delete a CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_csi_storage_capacity_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param name: name of the CSIStorageCapacity (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:type grace_period_seconds: int
:param orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:type orphan_dependents: bool
:param propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:type propagation_policy: str
:param body:
:type body: V1DeleteOptions
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_csi_storage_capacity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_csi_storage_capacity`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_csi_storage_capacity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1Status",
202: "V1Status",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1APIResourceList
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1APIResourceList",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_csi_storage_capacity_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_csi_storage_capacity_for_all_namespaces # noqa: E501
list or watch objects of kind CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_csi_storage_capacity_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:type allow_watch_bookmarks: bool
:param _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:type _continue: str
:param field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:type field_selector: str
:param label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:type label_selector: str
:param limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:type limit: int
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version: str
:param resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version_match: str
:param timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:type timeout_seconds: int
:param watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:type watch: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1beta1CSIStorageCapacityList
"""
kwargs['_return_http_data_only'] = True
return self.list_csi_storage_capacity_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_csi_storage_capacity_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_csi_storage_capacity_for_all_namespaces # noqa: E501
list or watch objects of kind CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_csi_storage_capacity_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:type allow_watch_bookmarks: bool
:param _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:type _continue: str
:param field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:type field_selector: str
:param label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:type label_selector: str
:param limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:type limit: int
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version: str
:param resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version_match: str
:param timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:type timeout_seconds: int
:param watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:type watch: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1beta1CSIStorageCapacityList, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_csi_storage_capacity_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1beta1CSIStorageCapacityList",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/csistoragecapacities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_namespaced_csi_storage_capacity(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_csi_storage_capacity # noqa: E501
list or watch objects of kind CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_csi_storage_capacity(namespace, async_req=True)
>>> result = thread.get()
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:type allow_watch_bookmarks: bool
:param _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:type _continue: str
:param field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:type field_selector: str
:param label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:type label_selector: str
:param limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:type limit: int
:param resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version: str
:param resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version_match: str
:param timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:type timeout_seconds: int
:param watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:type watch: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1beta1CSIStorageCapacityList
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_csi_storage_capacity_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_csi_storage_capacity_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_csi_storage_capacity # noqa: E501
list or watch objects of kind CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_csi_storage_capacity_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:type allow_watch_bookmarks: bool
:param _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:type _continue: str
:param field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:type field_selector: str
:param label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:type label_selector: str
:param limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:type limit: int
:param resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version: str
:param resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:type resource_version_match: str
:param timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:type timeout_seconds: int
:param watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:type watch: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1beta1CSIStorageCapacityList, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_csi_storage_capacity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_csi_storage_capacity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1beta1CSIStorageCapacityList",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def patch_namespaced_csi_storage_capacity(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_csi_storage_capacity # noqa: E501
partially update the specified CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_csi_storage_capacity(name, namespace, body, async_req=True)
>>> result = thread.get()
:param name: name of the CSIStorageCapacity (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param body: (required)
:type body: object
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:type field_manager: str
:param field_validation: fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.
:type field_validation: str
:param force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:type force: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1beta1CSIStorageCapacity
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_csi_storage_capacity_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_csi_storage_capacity_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_csi_storage_capacity # noqa: E501
partially update the specified CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_csi_storage_capacity_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param name: name of the CSIStorageCapacity (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param body: (required)
:type body: object
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:type field_manager: str
:param field_validation: fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.
:type field_validation: str
:param force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:type force: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1beta1CSIStorageCapacity, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_csi_storage_capacity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_csi_storage_capacity`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_csi_storage_capacity`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_csi_storage_capacity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = local_var_params.get('_content_type',
self.api_client.select_header_content_type(
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml'],
'PATCH', body_params)) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1beta1CSIStorageCapacity",
201: "V1beta1CSIStorageCapacity",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def read_namespaced_csi_storage_capacity(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_csi_storage_capacity # noqa: E501
read the specified CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_csi_storage_capacity(name, namespace, async_req=True)
>>> result = thread.get()
:param name: name of the CSIStorageCapacity (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1beta1CSIStorageCapacity
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_csi_storage_capacity_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_csi_storage_capacity_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_csi_storage_capacity # noqa: E501
read the specified CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_csi_storage_capacity_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param name: name of the CSIStorageCapacity (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1beta1CSIStorageCapacity, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_csi_storage_capacity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_csi_storage_capacity`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_csi_storage_capacity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1beta1CSIStorageCapacity",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def replace_namespaced_csi_storage_capacity(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_csi_storage_capacity # noqa: E501
replace the specified CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_csi_storage_capacity(name, namespace, body, async_req=True)
>>> result = thread.get()
:param name: name of the CSIStorageCapacity (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param body: (required)
:type body: V1beta1CSIStorageCapacity
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:type field_manager: str
:param field_validation: fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.
:type field_validation: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1beta1CSIStorageCapacity
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_csi_storage_capacity_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_csi_storage_capacity_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_csi_storage_capacity # noqa: E501
replace the specified CSIStorageCapacity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_csi_storage_capacity_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param name: name of the CSIStorageCapacity (required)
:type name: str
:param namespace: object name and auth scope, such as for teams and projects (required)
:type namespace: str
:param body: (required)
:type body: V1beta1CSIStorageCapacity
:param pretty: If 'true', then the output is pretty printed.
:type pretty: str
:param dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:type dry_run: str
:param field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:type field_manager: str
:param field_validation: fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.
:type field_validation: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1beta1CSIStorageCapacity, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_csi_storage_capacity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_csi_storage_capacity`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_csi_storage_capacity`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_csi_storage_capacity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = dict(local_var_params.get('_headers', {}))
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
response_types_map = {
200: "V1beta1CSIStorageCapacity",
201: "V1beta1CSIStorageCapacity",
401: None,
}
return self.api_client.call_api(
'/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 67.933372
| 1,386
| 0.674891
|
5d5a404676cba185d09014a8a355dca50e004c9c
| 6,713
|
py
|
Python
|
camkes/parser/tests/teststage2.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
camkes/parser/tests/teststage2.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
camkes/parser/tests/teststage2.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
import os, sys, unittest
ME = os.path.abspath(__file__)
# Make CAmkES importable
sys.path.append(os.path.join(os.path.dirname(ME), '../../..'))
from camkes.internal.tests.utils import CAmkESTest, cpp_available
from camkes.parser.stage0 import CPP, Reader
from camkes.parser.stage1 import Parse1
from camkes.parser.stage2 import Parse2
class TestStage2(CAmkESTest):
def setUp(self):
super(TestStage2, self).setUp()
r = Reader()
s1 = Parse1(r)
self.parser = Parse2(s1)
r = CPP()
s1 = Parse1(r)
self.cpp_parser = Parse2(s1)
def test_empty_string(self):
content, read = self.parser.parse_string('')
self.assertEqual(content, [])
self.assertLen(read, 0)
def test_basic_entity(self):
content, read = self.parser.parse_string('component foo {}')
self.assertLen(content, 1)
self.assertEqual(content[0][0], 'component foo {}')
self.assertIsNone(content[0][1])
comp = content[0][2]
self.assertEqual(comp.head, 'component_decl')
self.assertLen(comp.tail, 2)
self.assertEqual(comp.tail[0].head, 'id')
self.assertLen(comp.tail[0].tail, 1)
self.assertEqual(comp.tail[0].tail[0], 'foo')
self.assertEqual(comp.tail[1].head, 'component_defn')
self.assertLen(comp.tail[1].tail, 0)
self.assertLen(read, 0)
def test_malformed(self):
with self.assertRaises(Exception):
self.parser.parse_string('hello world')
def test_unicode(self):
content, read = self.parser.parse_string('component foó {}')
self.assertLen(content, 1)
self.assertEqual(content[0][0], 'component foó {}')
self.assertIsNone(content[0][1])
comp = content[0][2]
self.assertEqual(comp.head, 'component_decl')
self.assertLen(comp.tail, 2)
self.assertEqual(comp.tail[0].head, 'id')
self.assertLen(comp.tail[0].tail, 1)
self.assertEqual(comp.tail[0].tail[0], 'foó')
self.assertEqual(comp.tail[1].head, 'component_defn')
self.assertLen(comp.tail[1].tail, 0)
self.assertLen(read, 0)
def test_from_file(self):
tmp = self.mkstemp()
with open(tmp, 'wt') as f:
f.write('component foo {}')
content, read = self.parser.parse_file(tmp)
self.assertLen(content, 1)
self.assertEqual(content[0][0], 'component foo {}')
self.assertEqual(content[0][1], tmp)
comp = content[0][2]
self.assertEqual(comp.head, 'component_decl')
self.assertLen(comp.tail, 2)
self.assertEqual(comp.tail[0].head, 'id')
self.assertLen(comp.tail[0].tail, 1)
self.assertEqual(comp.tail[0].tail[0], 'foo')
self.assertEqual(comp.tail[1].head, 'component_defn')
self.assertLen(comp.tail[1].tail, 0)
self.assertEqual(read, set([tmp]))
@unittest.skipIf(not cpp_available(), 'CPP not found')
def test_with_cpp(self):
parent = self.mkstemp()
child = self.mkstemp()
with open(parent, 'wt') as f:
f.write('component foo\n#include "%s"' % child)
with open(child, 'wt') as f:
f.write('{}')
content, read = self.cpp_parser.parse_file(parent)
self.assertLen(content, 1)
self.assertEqual(content[0][1], parent)
comp = content[0][2]
self.assertEqual(comp.head, 'component_decl')
self.assertLen(comp.tail, 2)
self.assertEqual(comp.tail[0].head, 'id')
self.assertLen(comp.tail[0].tail, 1)
self.assertEqual(comp.tail[0].tail[0], 'foo')
self.assertEqual(comp.tail[1].head, 'component_defn')
self.assertLen(comp.tail[1].tail, 0)
self.assertIn(parent, read)
self.assertIn(child, read)
def test_simple_spec_complete(self):
content, _ = self.parser.parse_string('''
procedure Hello {
void hello(void);
}
component Foo {
provides Hello h;
}
component Bar {
control;
uses Hello w;
}
assembly {
composition {
component Foo f;
component Bar b;
connection Conn conn(from Foo.h, to Bar.w);
}
}
''')
def test_self_import(self):
'''
The stage 2 parser should notice cycles in the import graph and
automatically terminate. This case validates a trivial cycle.
'''
input = self.mkstemp()
with open(input, 'wt') as f:
f.write('''
component Foo {}
import "%s";
''' % input)
content, read = self.parser.parse_file(input)
self.assertLen(content, 1)
Foo = content[0][2]
self.assertEqual(Foo.head, 'component_decl')
self.assertEqual(read, set([input]))
content, read = self.cpp_parser.parse_file(input)
self.assertLen(content, 1)
Foo = content[0][2]
self.assertEqual(Foo.head, 'component_decl')
self.assertIn(input, read)
def test_cycle_import(self):
'''
Similar to the previous test, but a cycle involving multiple files.
'''
a = self.mkstemp()
b = self.mkstemp()
c = self.mkstemp()
with open(a, 'wt') as f:
f.write('''
component Foo {}
import "%s";
''' % b)
with open(b, 'wt') as f:
f.write('import "%s";' % c)
with open(c, 'wt') as f:
f.write('import "%s";' % a)
content, read = self.parser.parse_file(a)
self.assertLen(content, 1)
Foo = content[0][2]
self.assertEqual(Foo.head, 'component_decl')
self.assertEqual(read, set([a, b, c]))
content, read = self.cpp_parser.parse_file(a)
self.assertLen(content, 1)
Foo = content[0][2]
self.assertEqual(Foo.head, 'component_decl')
self.assertIn(a, read)
self.assertIn(b, read)
self.assertIn(c, read)
if __name__ == '__main__':
unittest.main()
| 29.70354
| 75
| 0.577536
|
2dd2fe7cf221a4a4a707eb2821a3f20c3c46c736
| 20,920
|
py
|
Python
|
fpga/mqnic/VCU1525/fpga_100g/tb/fpga_core/test_fpga_core.py
|
andrewray/corundum
|
7c8abe261b2ec3e653da7bc881f769668a231bde
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
fpga/mqnic/VCU1525/fpga_100g/tb/fpga_core/test_fpga_core.py
|
andrewray/corundum
|
7c8abe261b2ec3e653da7bc881f769668a231bde
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
fpga/mqnic/VCU1525/fpga_100g/tb/fpga_core/test_fpga_core.py
|
andrewray/corundum
|
7c8abe261b2ec3e653da7bc881f769668a231bde
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""
Copyright 2020, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
"""
import logging
import os
import sys
import scapy.utils
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
from cocotbext.axi import AxiStreamSource, AxiStreamSink
try:
import mqnic
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
import mqnic
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE"))
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.rc.max_payload_size = 0x1 # 256 bytes
self.rc.max_read_request_size = 0x2 # 512 bytes
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
pcie_link_width=16,
user_clk_frequency=250e6,
alignment="dword",
cq_cc_straddle=False,
rq_rc_straddle=False,
rc_4tlp_straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk_250mhz,
user_reset=dut.rst_250mhz,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_entity=dut,
rq_name="m_axis_rq",
pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0
# pcie_rq_tag1
# pcie_rq_tag_av
# pcie_rq_tag_vld0
# pcie_rq_tag_vld1
# Requester Completion Interface
rc_entity=dut,
rc_name="s_axis_rc",
# Completer reQuest Interface
cq_entity=dut,
cq_name="s_axis_cq",
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_entity=dut,
cc_name="m_axis_cc",
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
cfg_fc_ph=dut.cfg_fc_ph,
cfg_fc_pd=dut.cfg_fc_pd,
cfg_fc_nph=dut.cfg_fc_nph,
cfg_fc_npd=dut.cfg_fc_npd,
cfg_fc_cplh=dut.cfg_fc_cplh,
cfg_fc_cpld=dut.cfg_fc_cpld,
cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
# cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
# cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.driver = mqnic.Driver(self.rc)
self.dev.functions[0].msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**self.BAR0_APERTURE, ext=True, prefetch=True)
# Ethernet
cocotb.fork(Clock(dut.qsfp0_rx_clk, 3.102, units="ns").start())
self.qsfp0_source = AxiStreamSource(dut, "qsfp0_rx_axis", dut.qsfp0_rx_clk, dut.qsfp0_rx_rst)
cocotb.fork(Clock(dut.qsfp0_tx_clk, 3.102, units="ns").start())
self.qsfp0_sink = AxiStreamSink(dut, "qsfp0_tx_axis", dut.qsfp0_tx_clk, dut.qsfp0_tx_rst)
cocotb.fork(Clock(dut.qsfp1_rx_clk, 3.102, units="ns").start())
self.qsfp1_source = AxiStreamSource(dut, "qsfp1_rx_axis", dut.qsfp1_rx_clk, dut.qsfp1_rx_rst)
cocotb.fork(Clock(dut.qsfp1_tx_clk, 3.102, units="ns").start())
self.qsfp1_sink = AxiStreamSink(dut, "qsfp1_tx_axis", dut.qsfp1_tx_clk, dut.qsfp1_tx_rst)
dut.sw.setimmediatevalue(0)
dut.i2c_scl_i.setimmediatevalue(1)
dut.i2c_sda_i.setimmediatevalue(1)
dut.qsfp0_modprsl.setimmediatevalue(0)
dut.qsfp0_intl.setimmediatevalue(1)
dut.qsfp1_modprsl.setimmediatevalue(0)
dut.qsfp1_intl.setimmediatevalue(1)
dut.qspi_dq_i.setimmediatevalue(0)
self.loopback_enable = False
cocotb.fork(self._run_loopback())
async def init(self):
self.dut.qsfp0_rx_rst.setimmediatevalue(0)
self.dut.qsfp0_tx_rst.setimmediatevalue(0)
self.dut.qsfp1_rx_rst.setimmediatevalue(0)
self.dut.qsfp1_tx_rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp0_rx_rst.setimmediatevalue(1)
self.dut.qsfp0_tx_rst.setimmediatevalue(1)
self.dut.qsfp1_rx_rst.setimmediatevalue(1)
self.dut.qsfp1_tx_rst.setimmediatevalue(1)
await FallingEdge(self.dut.rst_250mhz)
await Timer(100, 'ns')
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp0_rx_rst.setimmediatevalue(0)
self.dut.qsfp0_tx_rst.setimmediatevalue(0)
self.dut.qsfp1_rx_rst.setimmediatevalue(0)
self.dut.qsfp1_tx_rst.setimmediatevalue(0)
await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
async def _run_loopback(self):
while True:
await RisingEdge(self.dut.clk_250mhz)
if self.loopback_enable:
if not self.qsfp0_sink.empty():
await self.qsfp0_source.send(await self.qsfp0_sink.recv())
if not self.qsfp1_sink.empty():
await self.qsfp1_source.send(await self.qsfp1_sink.recv())
@cocotb.test()
async def run_test_nic(dut):
tb = TB(dut)
await tb.init()
tb.log.info("Init driver")
await tb.driver.init_dev(tb.dev.functions[0].pcie_id)
await tb.driver.interfaces[0].open()
# await driver.interfaces[1].open()
# enable queues
tb.log.info("Enable queues")
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_SCHED_ENABLE, 0x00000001)
for k in range(tb.driver.interfaces[0].tx_queue_count):
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[0].hw_addr+4*k, 0x00000003)
# wait for all writes to complete
await tb.rc.mem_read(tb.driver.hw_addr, 4)
tb.log.info("Init complete")
tb.log.info("Send and receive single packet")
data = bytearray([x % 256 for x in range(1024)])
await tb.driver.interfaces[0].start_xmit(data, 0)
pkt = await tb.qsfp0_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.qsfp0_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
# await tb.driver.interfaces[1].start_xmit(data, 0)
# pkt = await tb.qsfp1_0_sink.recv()
# tb.log.info("Packet: %s", pkt)
# await tb.qsfp1_0_source.send(pkt)
# pkt = await tb.driver.interfaces[1].recv()
# tb.log.info("Packet: %s", pkt)
# assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.log.info("RX and TX checksum tests")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5A:51:52:53:54:55', dst='DA:D1:D2:D3:D4:D5')
ip = IP(src='192.168.1.100', dst='192.168.1.101')
udp = UDP(sport=1, dport=2)
test_pkt = eth / ip / udp / payload
test_pkt2 = test_pkt.copy()
test_pkt2[UDP].chksum = scapy.utils.checksum(bytes(test_pkt2[UDP]))
await tb.driver.interfaces[0].start_xmit(test_pkt2.build(), 0, 34, 6)
pkt = await tb.qsfp0_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.qsfp0_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
assert Ether(pkt.data).build() == test_pkt.build()
tb.log.info("Multiple small packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(60)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Multiple large packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Jumbo frames")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(9014)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
await RisingEdge(dut.clk_250mhz)
await RisingEdge(dut.clk_250mhz)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axi_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axi', 'rtl'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "common", "mqnic_interface.v"),
os.path.join(rtl_dir, "common", "mqnic_port.v"),
os.path.join(rtl_dir, "common", "cpl_write.v"),
os.path.join(rtl_dir, "common", "cpl_op_mux.v"),
os.path.join(rtl_dir, "common", "desc_fetch.v"),
os.path.join(rtl_dir, "common", "desc_op_mux.v"),
os.path.join(rtl_dir, "common", "queue_manager.v"),
os.path.join(rtl_dir, "common", "cpl_queue_manager.v"),
os.path.join(rtl_dir, "common", "tx_engine.v"),
os.path.join(rtl_dir, "common", "rx_engine.v"),
os.path.join(rtl_dir, "common", "tx_checksum.v"),
os.path.join(rtl_dir, "common", "rx_hash.v"),
os.path.join(rtl_dir, "common", "rx_checksum.v"),
os.path.join(rtl_dir, "common", "tx_scheduler_rr.v"),
os.path.join(rtl_dir, "common", "event_mux.v"),
os.path.join(rtl_dir, "common", "tdma_scheduler.v"),
os.path.join(rtl_dir, "common", "tdma_ber.v"),
os.path.join(rtl_dir, "common", "tdma_ber_ch.v"),
os.path.join(eth_rtl_dir, "ptp_clock.v"),
os.path.join(eth_rtl_dir, "ptp_clock_cdc.v"),
os.path.join(eth_rtl_dir, "ptp_perout.v"),
os.path.join(eth_rtl_dir, "ptp_ts_extract.v"),
os.path.join(axi_rtl_dir, "axil_interconnect.v"),
os.path.join(axi_rtl_dir, "arbiter.v"),
os.path.join(axi_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_arb_mux.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_register.v"),
os.path.join(axis_rtl_dir, "axis_pipeline_register.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axil_master.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_wr.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_sink.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_source.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_us_msi.v"),
os.path.join(pcie_rtl_dir, "pcie_tag_manager.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = 512
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 62 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 137
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 161
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 81
parameters['RQ_SEQ_NUM_WIDTH'] = 6
parameters['BAR0_APERTURE'] = 24
parameters['AXIS_ETH_DATA_WIDTH'] = 512
parameters['AXIS_ETH_KEEP_WIDTH'] = parameters['AXIS_ETH_DATA_WIDTH'] // 8
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| 36.573427
| 119
| 0.658748
|
a1b700f63466be0a937d11bac0094c3ab791cfa9
| 1,489
|
py
|
Python
|
test/mac/gyptest-identical-name.py
|
kaadam/GYP
|
86ab71caad4640dcc3ec572c2118a1ce3f24150f
|
[
"BSD-3-Clause"
] | 11
|
2019-07-30T14:15:13.000Z
|
2020-08-17T10:03:46.000Z
|
test/mac/gyptest-identical-name.py
|
kaadam/GYP
|
86ab71caad4640dcc3ec572c2118a1ce3f24150f
|
[
"BSD-3-Clause"
] | 25
|
2018-10-18T15:39:07.000Z
|
2019-07-03T00:40:01.000Z
|
test/mac/gyptest-identical-name.py
|
kaadam/GYP
|
86ab71caad4640dcc3ec572c2118a1ce3f24150f
|
[
"BSD-3-Clause"
] | 15
|
2018-10-18T14:40:18.000Z
|
2019-07-02T16:25:55.000Z
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies libraries (in identical-names) are properly handeled by xcode.
The names for all libraries participating in this build are:
libtestlib.a - identical-name/testlib
libtestlib.a - identical-name/proxy/testlib
libproxy.a - identical-name/proxy
The first two libs produce a hash collision in Xcode when Gyp is executed,
because they have the same name and would be copied to the same directory with
Xcode default settings.
For this scenario to work one needs to change the Xcode variables SYMROOT and
CONFIGURATION_BUILD_DIR. Setting these to per-lib-unique directories, avoids
copying the libs into the same directory.
The test consists of two steps. The first one verifies that by setting both
vars, there is no hash collision anymore during Gyp execution and that the libs
can actually be be built. The second one verifies that there is still a hash
collision if the vars are not set and thus the current behavior is preserved.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
test.run_gyp('test.gyp', chdir='identical-name')
test.build('test.gyp', test.ALL, chdir='identical-name')
test.run_gyp('test-should-fail.gyp', chdir='identical-name')
test.built_file_must_not_exist('test-should-fail.xcodeproj')
test.pass_test()
| 34.627907
| 79
| 0.773674
|
3326d026692b5105d90e372569239a058dca4b01
| 7,514
|
py
|
Python
|
src/snovault/calculated.py
|
Lattice-Data/snovault
|
384f4a3bf3afcc4b825f98c80d18a5852d46ef5c
|
[
"MIT"
] | null | null | null |
src/snovault/calculated.py
|
Lattice-Data/snovault
|
384f4a3bf3afcc4b825f98c80d18a5852d46ef5c
|
[
"MIT"
] | null | null | null |
src/snovault/calculated.py
|
Lattice-Data/snovault
|
384f4a3bf3afcc4b825f98c80d18a5852d46ef5c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import venusian
from pyramid.decorator import reify
from pyramid.traversal import find_root
from types import MethodType
from .interfaces import (
CALCULATED_PROPERTIES,
CONNECTION,
)
def includeme(config):
config.registry[CALCULATED_PROPERTIES] = CalculatedProperties()
config.add_directive('add_calculated_property', add_calculated_property)
class ItemNamespace(object):
def __init__(self, context, request, defined=None, ns=None):
self.context = context
self.request = request
self._defined = defined or {}
if ns:
self.__dict__.update(ns)
self._results = {}
@reify
def _properties(self):
return self.context.__json__(self.request)
@reify
def root(self):
return find_root(self.context)
@reify
def registry(self):
return self.request.registry
def __getattr__(self, name):
context = self.context
request = self.request
conn = self.registry[CONNECTION]
if name in self._defined:
value = self._defined[name](self)
setattr(self, name, value)
return value
if name in self._properties:
value = self._properties[name]
if name in context.type_info.schema_links:
if isinstance(value, list):
value = [
request.resource_path(conn.get_by_uuid(v))
for v in value
]
else:
value = request.resource_path(conn.get_by_uuid(value))
setattr(self, name, value)
return value
if name in context.rev:
value = context.get_rev_links(name)
value = [
request.resource_path(conn.get_by_uuid(v))
for v in value
]
setattr(self, name, value)
return value
raise AttributeError(name)
def __call__(self, fn):
try:
return self._results[fn]
except KeyError:
pass
if isinstance(fn, str):
result = self._results[fn] = getattr(self, fn, None)
return result
start = 1 if isinstance(fn, MethodType) else 0
# Not using inspect.getargspec as it is slow
args = fn.__code__.co_varnames[start:fn.__code__.co_argcount]
kw = {}
for name in args:
try:
kw[name] = getattr(self, name)
except AttributeError:
pass
result = self._results[fn] = fn(**kw)
return result
class CalculatedProperties(object):
def __init__(self):
self.category_cls_props = {}
def register_prop(self, fn, name, context, condition=None, schema=None,
attr=None, define=False, category='object'):
prop = CalculatedProperty(fn, name, attr, condition, schema, define)
cls_props = self.category_cls_props.setdefault(category, {})
cls_props.setdefault(context, {})[name] = prop
def props_for(self, context, category='object'):
if isinstance(context, type):
cls = context
else:
cls = type(context)
props = {}
cls_props = self.category_cls_props.get(category, {})
for base in reversed(cls.mro()):
props.update(cls_props.get(base, {}))
return props
class CalculatedProperty(object):
condition_args = None
def __init__(self, fn, name, attr=None, condition=None, schema=None, define=False):
self.fn = fn
self.attr = attr
self.name = name
self.condition = condition
self.define = define
if schema is not None:
if 'default' in schema:
raise ValueError('schema may not specify default for calculated property')
if 'linkFrom' not in schema.get('items', {}):
schema = schema.copy()
schema['notSubmittable'] = True
self.schema = schema
def __call__(self, namespace):
if self.condition is not None:
if not namespace(self.condition):
return None
if self.attr:
fn = getattr(namespace.context, self.attr)
else:
fn = self.fn
return namespace(fn)
# Imperative configuration
def add_calculated_property(config, fn, name, context, condition=None, schema=None,
attr=None, define=False, category='object'):
calculated_properties = config.registry[CALCULATED_PROPERTIES]
config.action(
('calculated_property', context, category, name),
calculated_properties.register_prop,
(fn, name, context, condition, schema, attr, define, category),
)
# Declarative configuration
def calculated_property(**settings):
""" Register a calculated property
"""
def decorate(wrapped):
def callback(scanner, factory_name, factory):
if settings.get('context') is None:
settings['context'] = factory
if settings.get('name') is None:
settings['name'] = factory_name
scanner.config.add_calculated_property(wrapped, **settings)
info = venusian.attach(wrapped, callback, category='object')
if info.scope == 'class':
# if the decorator was attached to a method in a class, or
# otherwise executed at class scope, we need to set an
# 'attr' into the settings if one isn't already in there
if settings.get('attr') is None:
settings['attr'] = wrapped.__name__
if settings.get('name') is None:
settings['name'] = wrapped.__name__
elif settings.get('context') is None:
raise TypeError('must supply context type for function')
return wrapped
return decorate
def calculate_properties(context, request, ns=None, category='object'):
calculated_properties = request.registry[CALCULATED_PROPERTIES]
props = calculated_properties.props_for(context, category)
defined = {name: prop for name, prop in props.items() if prop.define}
if isinstance(context, type):
context = None
namespace = ItemNamespace(context, request, defined, ns)
return {
name: value
for name, value in (
(name, prop(namespace))
for name, prop in props.items()
) if value is not None
}
def _init_property_calculation(context, request, ns=None, category='object'):
calculated_properties = request.registry[CALCULATED_PROPERTIES]
props = calculated_properties.props_for(context, category)
defined = {name: prop for name, prop in props.items() if prop.define}
if isinstance(context, type):
context = None
namespace = ItemNamespace(context, request, defined, ns)
return namespace, props
def calculate_select_properties(context, request, ns=None, category='object', select_properties=None):
select_properties = select_properties or []
namespace, props = _init_property_calculation(context, request, ns=ns, category=category)
filtered_properties = (
(name, prop(namespace))
for name, prop in props.items()
if name in select_properties
)
select_calculated_properties = {
name: value
for name, value in filtered_properties
if value is not None
}
return select_calculated_properties
| 33.247788
| 102
| 0.614586
|
067b91965208ffe6821f27c2caea3814c35e8725
| 858
|
py
|
Python
|
problems/p001.py
|
davisschenk/project-euler-python
|
1375412e6c8199ab02250bd67223c758d4df1725
|
[
"MIT"
] | null | null | null |
problems/p001.py
|
davisschenk/project-euler-python
|
1375412e6c8199ab02250bd67223c758d4df1725
|
[
"MIT"
] | null | null | null |
problems/p001.py
|
davisschenk/project-euler-python
|
1375412e6c8199ab02250bd67223c758d4df1725
|
[
"MIT"
] | 2
|
2020-10-08T23:35:03.000Z
|
2020-10-09T00:28:36.000Z
|
from itertools import count, takewhile
from problem import Problem
class MultiplesOr(Problem, name="Multiples of 3 and 5", expected=233168):
@Problem.solution()
def summed_sums(self):
return self.sum_divisible_by(3, 999) + self.sum_divisible_by(5, 999) - self.sum_divisible_by(15, 999)
@Problem.solution()
def brute_force(self):
return sum(self.multiple_generator(3, 5, max_n=1000))
@staticmethod
def multiple_of_any(n, multiples):
return any(n % i == 0 for i in multiples)
@classmethod
def multiple_generator(cls, *args, max_n=None):
for i in takewhile(lambda n: max_n and n < max_n, count()):
if cls.multiple_of_any(i, tuple(args)):
yield i
@staticmethod
def sum_divisible_by(n, target):
p = target // n
return n * (p * (p + 1)) / 2
| 29.586207
| 109
| 0.641026
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.