hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4959b0c95664f6b5e44804e1d10f0d164fcb9038
| 2,335
|
py
|
Python
|
setup.py
|
sankethvedula/flowtorch
|
44a0f0eff842dd33ca17b01f4e02d8cdda005aa8
|
[
"MIT"
] | 29
|
2020-12-19T00:29:42.000Z
|
2021-08-12T19:11:47.000Z
|
setup.py
|
sankethvedula/flowtorch
|
44a0f0eff842dd33ca17b01f4e02d8cdda005aa8
|
[
"MIT"
] | 30
|
2020-12-29T04:42:38.000Z
|
2021-02-19T22:29:38.000Z
|
setup.py
|
sankethvedula/flowtorch
|
44a0f0eff842dd33ca17b01f4e02d8cdda005aa8
|
[
"MIT"
] | 1
|
2021-05-06T21:25:45.000Z
|
2021-05-06T21:25:45.000Z
|
# Copyright (c) Meta Platforms, Inc
import os
import sys
from setuptools import find_packages, setup
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 7
TEST_REQUIRES = ["numpy", "pytest", "pytest-cov", "scipy"]
DEV_REQUIRES = TEST_REQUIRES + [
"black",
"flake8",
"flake8-bugbear",
"mypy",
"toml",
"usort",
]
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR):
error = (
"Your version of python ({major}.{minor}) is too old. You need "
"python >= {required_major}.{required_minor}."
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
required_minor=REQUIRED_MINOR,
required_major=REQUIRED_MAJOR,
)
sys.exit(error)
# read in README.md as the long description
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="flowtorch",
description="Normalizing Flows for PyTorch",
author="FlowTorch Development Team",
author_email="info@stefanwebb.me",
license="MIT",
url="https://flowtorch.ai/users",
project_urls={
"Documentation": "https://flowtorch.ai/users",
"Source": "https://www.github.com/facebookincubator/flowtorch",
},
keywords=[
"Deep Learning",
"Bayesian Inference",
"Statistical Modeling",
"Variational Inference",
"PyTorch",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">={}.{}".format(REQUIRED_MAJOR, REQUIRED_MINOR),
install_requires=[
"torch>=1.8.1",
],
setup_requires=["setuptools_scm"],
use_scm_version={
"root": ".",
"relative_to": __file__,
"write_to": os.path.join("flowtorch", "version.py"),
},
packages=find_packages(
include=["flowtorch", "flowtorch.*"],
exclude=["debug", "tests", "website", "examples", "scripts"],
),
extras_require={
"dev": DEV_REQUIRES,
"test": TEST_REQUIRES,
},
)
| 26.534091
| 72
| 0.616702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,036
| 0.443683
|
495c1efa0fbbd4eda9cf54a8e5e4784dac2375f0
| 12,923
|
py
|
Python
|
nn/train.py
|
julian-carpenter/airynet
|
7ad87bbc717107f72d69547e4243373c05dadf70
|
[
"MIT"
] | 8
|
2019-03-17T10:45:19.000Z
|
2022-01-13T17:36:54.000Z
|
nn/train.py
|
julian-carpenter/airynet
|
7ad87bbc717107f72d69547e4243373c05dadf70
|
[
"MIT"
] | null | null | null |
nn/train.py
|
julian-carpenter/airynet
|
7ad87bbc717107f72d69547e4243373c05dadf70
|
[
"MIT"
] | 1
|
2019-07-24T05:59:38.000Z
|
2019-07-24T05:59:38.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import nn
import tensorflow as tf
from tensorflow.python.client import device_lib
from numpy import min, max
def model_fn(features, labels, mode, params):
"""Model function for airynet network"""
def select_architecture(arc="resnet"):
if arc == "resnet":
model_fn = nn.airynet_resnet_variant(cfg.resnet_size,
cfg.num_classes,
cfg.data_format,
cfg.relu_leakiness)
elif arc == "vgg":
model_fn = nn.airynet_vgg_variant(cfg.vgg_size, cfg.num_classes,
cfg.data_format)
return model_fn
if mode == tf.estimator.ModeKeys.PREDICT:
labels = features["lbl"]
features = features["imgs"]
labels = tf.identity(labels, name="bids")
features = tf.identity(features, name="images")
feat_converted = tf.map_fn(
lambda x: tf.image.convert_image_dtype(x, tf.float32), features)
tf.summary.image("images", feat_converted, max_outputs=3)
cfg = params["config"]
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
# Multiply the learning rate by 0.1 at 30, 60, 120 and 150 epochs.
batches_per_epoch = cfg.dataset_size / cfg.batch_size
boundaries = [
int(batches_per_epoch * epoch) for epoch in [30, 60, 120, 150]
]
# Scale the learning rate linearly with the batch size. When the
# batch size is 128, the learning rate should be 0.1.
lr = cfg.lr * cfg.batch_size / 128
values = [lr * decay for decay in [1, 0.1, 0.01, 1e-3, 1e-4]]
learning_rate = tf.train.piecewise_constant(
tf.cast(global_step, tf.int32), boundaries, values)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name="learning_rate")
tf.summary.scalar("learning_rate", learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=cfg.gamma,
use_nesterov=True)
avail_gpus = get_available_gpus()
tower_grads = []
reuse = False
with tf.variable_scope(tf.get_variable_scope()):
print(cfg.resnet_size, cfg.num_classes, cfg.data_format,
cfg.relu_leakiness)
network = select_architecture(cfg.airynet_type)
for dev in avail_gpus:
print("Building inference on: {}".format(dev))
if int(dev[-1]) != 0:
# set scope to reuse if more than one gpu are available
tf.get_variable_scope().reuse_variables()
reuse = True
with tf.device(dev), tf.name_scope(
dev.replace(":", "_").replace("/", "")):
logits = network(features, mode == tf.estimator.ModeKeys.TRAIN,
reuse)
if mode == tf.estimator.ModeKeys.TRAIN:
if cfg.dataset == "cifar10":
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
else:
cross_entropy = tf.losses.sigmoid_cross_entropy(
logits=logits, multi_class_labels=labels)
# get l1_regularizer loss
reg_penalty = tf.reduce_mean(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# get trainable variables
trainable_variables = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES)
# get weight decay_loss
if cfg.use_weight_decay:
loss_weight_decay = tf.reduce_sum(
tf.stack([
tf.nn.l2_loss(i) for i in trainable_variables
])) * cfg.weight_decay
else:
loss_weight_decay = 0.
# define loss, consider to add the weight_decay
loss = cross_entropy + reg_penalty + loss_weight_decay
comp_grad_op = optimizer.compute_gradients(
loss, trainable_variables)
tower_grads.append(comp_grad_op)
if mode == tf.estimator.ModeKeys.TRAIN:
grads = average_gradients(tower_grads, tf.get_default_graph())
if mode == tf.estimator.ModeKeys.TRAIN:
# Batch norm requires update ops to be
# added as a dependency to the train_op
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads, global_step)
else:
train_op = None
loss = tf.constant(0.)
if cfg.dataset == "cifar10":
fc_out_activation_fun = tf.nn.softmax
fc_out_activation_name = "softmax_output"
else:
fc_out_activation_fun = tf.nn.sigmoid
fc_out_activation_name = "sigmoid_output"
predictions = {
"classes":
tf.round(
fc_out_activation_fun(logits,
name=fc_out_activation_name + "_classes")),
"probabilities":
fc_out_activation_fun(logits, name=fc_out_activation_name),
"bunchID":
labels
}
print(logits.get_shape())
if mode == tf.estimator.ModeKeys.PREDICT:
# We calculate the gradients between the output and tha last
# convolutional layer for the GradCam
graph = tf.get_default_graph()
conv_ = graph.get_operation_by_name(
"device_GPU_0/fourth_block/last_block_before_fc").outputs[0]
out_ = graph.get_operation_by_name(
"device_GPU_0/nn_out/final_dense").outputs[0]
out_ = tf.nn.sigmoid(out_)
out_ *= tf.round(out_)
heat_loss_ = []
grads = []
norm = []
normed_grads = []
for class_idx in range(out_.get_shape()[-1]):
print("Building GradCam for class: {}".format(class_idx))
heat_loss_.append(
tf.reduce_mean(out_[:, class_idx],
name="class_loss_{}".format(class_idx)))
curr_grad = tf.gradients(
heat_loss_, conv_, name="class_grads_{}".format(class_idx))[0]
grads.append(curr_grad)
norm.append(
tf.sqrt(tf.reduce_mean(tf.square(curr_grad)),
name="class_norm_{}".format(class_idx)))
normed_grads.append(
tf.divide(tf.convert_to_tensor(grads[class_idx]),
tf.convert_to_tensor(norm[class_idx]) +
tf.constant(1e-5),
name="normalized_grads_{}".format(class_idx)))
tf.identity(tf.convert_to_tensor(normed_grads),
name="normalized_grads")
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Create some metrics for logging purposes
lbl = tf.to_float(labels)
prediction = predictions["classes"]
if mode == tf.estimator.ModeKeys.TRAIN:
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name="cross_entropy")
tf.summary.scalar("metrics/cross_entropy", cross_entropy)
tf.summary.scalar("metrics/reg_penalty", reg_penalty)
# tf.summary.scalar("metrics/weight_decay_loss", weight_decay_loss)
# Calculate the confusion matrix
confusion_matr = tf.to_float(
tf.confusion_matrix(tf.reshape(lbl, [-1]),
tf.reshape(prediction, [-1]),
num_classes=2))
tf.identity(confusion_matr, name="confusion_matr")
# Matthews Correlation Coefficient
TP = confusion_matr[1][1]
TN = confusion_matr[0][0]
FP = confusion_matr[0][1]
FN = confusion_matr[1][0]
MCC = (TP * TN - FP * FN) / (tf.sqrt(
(TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)))
tf.identity(MCC, name="mcc")
tf.summary.scalar("metrics/mcc", MCC)
# Stack lbl and predictions as image for the summary
lbl_vs_prediction = tf.multiply(tf.ones_like(lbl), 255)
lbl_vs_prediction = tf.expand_dims(tf.stack([
tf.multiply(lbl_vs_prediction, lbl),
tf.multiply(lbl_vs_prediction, prediction),
tf.zeros_like(lbl)
],
axis=-1),
axis=0)
tf.identity(lbl_vs_prediction, name="lbl_vs_prediction")
tf.summary.image("metrics/lbl_vs_prediction", lbl_vs_prediction)
lbl_image = tf.expand_dims(tf.expand_dims(lbl, axis=-1), axis=0)
tf.identity(lbl_image, name="lbl_image")
tf.summary.image("metrics/lbl_image", lbl_image)
prediction_image = tf.expand_dims(tf.expand_dims(prediction, axis=-1),
axis=0)
tf.identity(prediction_image, name="prediction_image")
tf.summary.image("metrics/prediction_image", prediction_image)
accuracy = tf.metrics.accuracy(lbl, prediction)
tf.identity(accuracy[1], name="train_accuracy")
tf.summary.scalar("metrics/train_accuracy", accuracy[1])
eval_tp = tf.metrics.true_positives(lbl, prediction)
eval_fp = tf.metrics.false_positives(lbl, prediction)
eval_fn = tf.metrics.false_negatives(lbl, prediction)
eval_precision = tf.metrics.precision(lbl, prediction)
eval_mean_per_class = tf.metrics.mean_per_class_accuracy(
lbl, prediction, cfg.num_classes)
metrics = {
"accuracy": accuracy,
"mean_per_class_accuracy": eval_mean_per_class,
"precision": eval_precision,
"true_positives": eval_tp,
"false_positives": eval_fp,
"false_negatives": eval_fn
}
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def get_available_gpus():
"""
Get a list of available GPU"s
"""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == "GPU"]
def average_gradients(tower_grads, graph):
"""
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer
list is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
"""
with graph.name_scope("averaging_gradients"):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g_idx, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g_idx, 0)
# Append on a "tower" dimension which we will
# average over below.
grads.append(expanded_g)
# Average over the "tower" dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are
# shared across towers. So .. we will just return the first
# tower"s pointer to the Variable.
vals = grad_and_vars[0][1]
grad_and_var = (grad, vals)
average_grads.append(grad_and_var)
return average_grads
def rescale(vals, new_min, new_max):
old_min = min(vals)
old_max = max(vals)
# range check
if old_min == old_max:
print("Warning: Zero input range")
return vals
if new_min == new_max:
print("Warning: Zero output range")
return vals
portion = (vals - old_min) * (new_max - new_min) / (old_max - old_min)
portion = (old_max - vals) * (new_max - new_min) / (old_max - old_min)
result = portion + new_min
result = new_max - portion
return result
| 40.384375
| 79
| 0.583998
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,765
| 0.21396
|
495c925b5444da2267f89a8db3c5a875669b6a75
| 310
|
py
|
Python
|
retrobiocat_web/app/db_analysis/__init__.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 9
|
2020-12-01T16:33:02.000Z
|
2022-01-19T20:02:42.000Z
|
retrobiocat_web/app/db_analysis/__init__.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 4
|
2020-10-02T14:38:32.000Z
|
2021-08-02T09:23:58.000Z
|
retrobiocat_web/app/db_analysis/__init__.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 6
|
2021-01-14T07:48:36.000Z
|
2022-03-20T17:34:27.000Z
|
from flask import Blueprint
bp = Blueprint('db_analysis',
__name__,
template_folder='templates',
static_folder='static',
static_url_path='/db_analysis/static'
)
from retrobiocat_web.app.db_analysis.routes import bioinformatics, ssn
| 25.833333
| 70
| 0.616129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.170968
|
495d8b60f3e72589eb60ba4876bee12c223e7de1
| 10,249
|
py
|
Python
|
tasks.py
|
pycopia/devtest
|
9ec93045ba4bab5b20ce99dc61cebd5b5a234d01
|
[
"Apache-2.0"
] | null | null | null |
tasks.py
|
pycopia/devtest
|
9ec93045ba4bab5b20ce99dc61cebd5b5a234d01
|
[
"Apache-2.0"
] | null | null | null |
tasks.py
|
pycopia/devtest
|
9ec93045ba4bab5b20ce99dc61cebd5b5a234d01
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3.9
"""Tasks file used by the *invoke* command.
This simplifies some common development tasks.
Run these tasks with the `invoke` tool.
"""
from __future__ import annotations
import sys
import os
import shutil
import getpass
from glob import glob
from pathlib import Path
import keyring
import semver
from setuptools_scm import get_version
from invoke import task, run, Exit
SIGNERS = ["keith"]
PYTHONBIN = os.environ.get("PYTHONBIN", sys.executable)
# Put the path in quotes in case there is a space in it.
PYTHONBIN = f'"{PYTHONBIN}"'
GPG = "gpg2"
CURRENT_USER = getpass.getuser()
# Putting pypi info here eliminates the need for user-private ~/.pypirc file.
PYPI_HOST = "upload.pypi.org"
PYPI_URL = f"https://{PYPI_HOST}/legacy/"
PYPI_USER = "__token__"
PYPI_INDEX = f"{PYPI_URL}simple"
@task
def info(ctx):
"""Show information about the current Python and environment."""
version = get_version()
suffix = get_suffix()
print(f"Python being used: {PYTHONBIN}")
print(f"Python extension suffix: {suffix}")
print(f"Package version: {version}")
venv = get_virtualenv()
if venv:
print(f"Virtual environment:", venv)
@task
def flake8(ctx, pathname="devtest"):
"""Run flake8 linter on the package."""
ctx.run(f"{PYTHONBIN} -m flake8 {pathname}")
@task
def format(ctx, pathname="devtest", check=False):
"""Run yapf formatter on the specified file, or recurse into directory."""
option = "-d" if check else "-i"
recurse = "--recursive" if os.path.isdir(pathname) else ""
ctx.run(f"{PYTHONBIN} -m yapf --style setup.cfg {option} {recurse} {pathname}")
@task
def format_changed(ctx, check=False, untracked=False):
"""Run yapf formatter on currently modified python files.
If check option given then just show the diff.
"""
option = "-d" if check else "-i"
files = get_modified_files(untracked)
if files:
ctx.run(f'{PYTHONBIN} -m yapf --style setup.cfg {option} {" ".join(files)}')
else:
print("No changed python files.")
@task
def set_pypi_token(ctx):
"""Set the token in the local key ring.
"""
pw = getpass.getpass(f"Enter pypi token? ")
if pw:
keyring.set_password(PYPI_HOST, PYPI_USER, pw)
else:
raise Exit("No password entered.", 3)
@task
def build(ctx):
"""Build the intermediate package components."""
ctx.run(f"{PYTHONBIN} setup.py build")
@task
def dev_requirements(ctx):
"""Install development requirements."""
ctx.run(f"{PYTHONBIN} -m pip install --index-url {PYPI_INDEX} --trusted-host {PYPI_HOST} "
f"-r dev-requirements.txt --user")
@task(pre=[dev_requirements])
def develop(ctx, uninstall=False):
"""Start developing in developer mode."""
if uninstall:
ctx.run(f"{PYTHONBIN} setup.py develop --uninstall --user")
else:
ctx.run(f'{PYTHONBIN} setup.py develop --index-url "{PYPI_INDEX}" --user')
@task
def clean(ctx):
"""Clean out build and cache files. Remove extension modules."""
ctx.run(f"{PYTHONBIN} setup.py clean")
ctx.run(r"find . -depth -type d -name __pycache__ -exec rm -rf {} \;")
ctx.run('find devtest -name "*.so" -delete')
with ctx.cd("docs"):
ctx.run('rm -f modules/devtest.*.rst')
ctx.run(f"{PYTHONBIN} -m sphinx.cmd.build -M clean . _build")
@task
def cleandist(ctx):
"""Clean out dist subdirectory."""
if os.path.isdir("dist"):
shutil.rmtree("dist", ignore_errors=True)
os.mkdir("dist")
@task
def test(ctx, testfile=None, ls=False):
"""Run unit tests. Use ls option to only list them."""
if ls:
ctx.run(f"{PYTHONBIN} -m pytest --collect-only -qq tests")
elif testfile:
ctx.run(f"{PYTHONBIN} -m pytest -s {testfile}")
else:
ctx.run(f"{PYTHONBIN} -m pytest tests", hide=False, in_stream=False)
@task
def tag(ctx, tag=None, major=False, minor=False, patch=False):
"""Tag or bump release with a semver tag. Makes a signed tag if you're a signer."""
latest = None
if tag is None:
tags = get_tags()
if not tags:
latest = semver.VersionInfo(0, 0, 0)
else:
latest = tags[-1]
if patch:
nextver = latest.bump_patch()
elif minor:
nextver = latest.bump_minor()
elif major:
nextver = latest.bump_major()
else:
nextver = latest.bump_patch()
else:
if tag.startswith("v"):
tag = tag[1:]
try:
nextver = semver.parse_version_info(tag)
except ValueError:
raise Exit("Invalid semver tag.", 2)
print(latest, "->", nextver)
tagopt = "-s" if CURRENT_USER in SIGNERS else "-a"
ctx.run(f'git tag {tagopt} -m "Release v{nextver}" v{nextver}')
@task
def tag_delete(ctx, tag=None):
"""Delete a tag, both local and remote."""
if tag:
ctx.run(f"git tag -d {tag}")
ctx.run(f"git push origin :refs/tags/{tag}")
@task(cleandist)
def sdist(ctx):
"""Build source distribution."""
ctx.run(f"{PYTHONBIN} setup.py sdist")
@task
def build_ext(ctx):
"""Build compiled extension modules, in place."""
ctx.run(f"{PYTHONBIN} setup.py build_ext --inplace")
@task(sdist)
def bdist(ctx):
"""Build a standard wheel file, an installable format."""
ctx.run(f"{PYTHONBIN} setup.py bdist_wheel")
@task(bdist)
def sign(ctx):
"""Cryptographically sign dist with your default GPG key."""
if CURRENT_USER in SIGNERS:
ctx.run(f"{GPG} --detach-sign -a dist/devtest-*.whl")
ctx.run(f"{GPG} --detach-sign -a dist/devtest-*.tar.gz")
else:
print("Not signing.")
@task(pre=[sign])
def publish(ctx):
"""Publish built wheel file to package repo."""
token = get_pypi_token()
distfiles = glob("dist/*.whl")
distfiles.extend(glob("dist/*.tar.gz"))
if not distfiles:
raise Exit("Nothing in dist folder!")
distfiles = " ".join(distfiles)
ctx.run(f'{PYTHONBIN} -m twine upload --repository-url \"{PYPI_URL}\" '
f'--username {PYPI_USER} --password {token} {distfiles}')
@task
def docs(ctx):
"""Build the HTML documentation."""
ctx.run("rm docs/modules/devtest.*.rst", warn=True)
ctx.run(f"{PYTHONBIN} -m sphinx.ext.apidoc --force --separate --no-toc --output-dir "
f"docs/modules devtest")
with ctx.cd("docs"):
ctx.run(f"{PYTHONBIN} -m sphinx.cmd.build -M html . _build")
if os.environ.get("DISPLAY"):
ctx.run("xdg-open docs/_build/html/index.html")
@task
def branch(ctx, name=None):
"""start a new branch, both local and remote tracking."""
if name:
ctx.run(f"git checkout -b {name}")
ctx.run(f"git push -u origin {name}")
else:
ctx.run("git --no-pager branch")
@task
def branch_delete(ctx, name=None):
"""Delete local, remote and tracking branch by name."""
if name:
ctx.run(f"git branch -d {name}", warn=True) # delete local branch
ctx.run(f"git branch -d -r {name}", warn=True) # delete local tracking info
ctx.run(f"git push origin --delete {name}", warn=True) # delete remote (origin) branch.
else:
print("Supply a branch name: --name <name>")
@task(pre=[sdist])
def docker_build(ctx):
"""Build docker image."""
version = get_version()
if not version:
raise Exit("Need to tag a version first.", 2)
environ = {
"PYVER": "{}.{}".format(sys.version_info.major, sys.version_info.minor),
"VERSION": version,
"PYPI_REPO": PYPI_INDEX,
"PYPI_HOST": PYPI_HOST,
}
ctx.run(
f"docker build "
f"--build-arg PYVER --build-arg VERSION "
f"--build-arg PYPI_REPO --build-arg PYPI_HOST -t devtest:{version} .",
env=environ)
print(f"Done. To run it:\n docker run -it devtest:{version}")
@task
def logfile(ctx, name="devtester"):
"""Dump the system log file with optional name filter."""
if WINDOWS:
ctx.run(f'wevtutil.exe qe Application /query:"*[System[Provider[@Name={name!r}]]]" /f:text')
elif LINUX:
ctx.run(f'journalctl --identifier={name!r} --no-pager --priority=debug')
elif DARWIN: # May need a tweak
ctx.run(f'log stream --predicate \'senderImagePath contains "Python"\' --level debug')
# Helper functions follow.
def get_virtualenv():
venv = os.environ.get("VIRTUAL_ENV")
if venv and os.path.isdir(venv):
return venv
return None
def get_tags():
rv = run('git tag -l "v*"', hide="out")
vilist = []
for line in rv.stdout.split():
try:
vi = semver.parse_version_info(line[1:])
except ValueError:
pass
else:
vilist.append(vi)
vilist.sort()
return vilist
def get_pypi_token():
cred = keyring.get_credential(PYPI_HOST, PYPI_USER)
if not cred:
raise Exit("You must set the pypi token with the set-pypi-token target.", 1)
return cred.password
def get_suffix():
return run(
f'{PYTHONBIN} -c \'import sysconfig; print(sysconfig.get_config_vars()["EXT_SUFFIX"])\'',
hide=True,
).stdout.strip() # noqa
def resolve_path(base, p):
p = Path(p)
return str(base / p)
def find_git_base():
"""Find the base directory of this git repo.
The git status output is always relative to this directory.
"""
start = Path.cwd().resolve()
while start:
if (start / ".git").exists():
return start
start = start.parent
raise Exit("Not able to find git repo base.")
def get_modified_files(untracked):
"""Find the list of modified and, optionally, untracked Python files.
If `untracked` is True, also include untracked Python files.
"""
filelist = []
gitbase = find_git_base()
gitout = run('git status --porcelain=1 -z', hide=True)
for line in gitout.stdout.split("\0"):
if line:
if not line.endswith(".py"):
continue
if line[0:2] == " M":
filelist.append(resolve_path(gitbase, line[3:]))
if untracked and line[0:2] == "??":
filelist.append(resolve_path(gitbase, line[3:]))
return filelist
| 28.469444
| 100
| 0.623085
| 0
| 0
| 0
| 0
| 7,445
| 0.726412
| 0
| 0
| 4,757
| 0.464143
|
495dad09c3d51139d0567841ccbcb4d16adb5840
| 3,846
|
py
|
Python
|
sandbox/lib/jumpscale/Jumpscale/servers/gedis/tests/3_threebot_redis_registration.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/Jumpscale/servers/gedis/tests/3_threebot_redis_registration.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/Jumpscale/servers/gedis/tests/3_threebot_redis_registration.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
from Jumpscale import j
from io import BytesIO
import binascii
def main(self):
"""
kosmos -p 'j.servers.gedis.test("threebot_redis_registration")'
"""
####THREEBOT REGISTRATION
phonebook = j.threebot.package.phonebook.client_get()
if j.sal.nettools.tcpPortConnectionTest("www.google.com", 443):
phonebook.actors.phonebook.wallet_create("test")
j.data.nacl.configure(name="client_test", generate=True, interactive=False)
client_nacl = j.data.nacl.get(name="client_test")
cl = j.clients.redis.get(port=8901)
def register_threebot_redis():
cl.execute_command("config_format", "json")
# get a nacl config (to act as a virtual person)
myname = "test.ibiza"
data_return_json = cl.execute_command(
"default.phonebook.name_register",
j.data.serializers.json.dumps({"name": myname, "pubkey": client_nacl.verify_key_hex}),
)
data_return = j.data.serializers.json.loads(data_return_json)
assert data_return["pubkey"] == client_nacl.verify_key_hex
assert data_return["name"] == myname
data = {
"tid": data_return["id"],
"name": data_return["name"],
"email": "something@threefold.com",
"ipaddr": "212.3.247.26",
"description": "",
"pubkey": client_nacl.verify_key_hex,
}
def sign(nacl, *args):
buffer = BytesIO()
for item in args:
if isinstance(item, str):
item = item.encode()
elif isinstance(item, int):
item = str(item).encode()
elif isinstance(item, bytes):
pass
else:
raise RuntimeError()
buffer.write(item)
payload = buffer.getvalue()
print(payload)
signature = nacl.sign(payload)
return binascii.hexlify(signature).decode()
# we sign the different records to come up with the right 'sender_signature_hex'
sender_signature_hex = sign(
client_nacl, data["tid"], data["name"], data["email"], data["ipaddr"], data["description"], data["pubkey"]
)
data["sender_signature_hex"] = sender_signature_hex
data2 = j.data.serializers.json.dumps(data)
data_return_json = cl.execute_command("default.phonebook.record_register", data2)
data_return = j.data.serializers.json.loads(data_return_json)
print(data)
return data_return
def query_threebot_redis(tid):
cl.execute_command("config_format", "json")
myname = "test.ibiza"
data2 = j.data.serializers.json.dumps({"name": myname})
res_json = cl.execute_command("default.phonebook.get", data2)
threebot_info3 = j.data.serializers.json.loads(res_json)
data2 = j.data.serializers.json.dumps({"tid": tid})
res_json = cl.execute_command("default.phonebook.get", data2)
threebot_info4 = j.data.serializers.json.loads(res_json)
assert threebot_info3 == threebot_info4
# verify the data (is same logic as above in register threebot, to see if record is valid)
rc = j.data.nacl.payload_verify(
threebot_info4["id"],
threebot_info4["name"],
threebot_info4["email"],
threebot_info4["ipaddr"],
threebot_info4["description"],
threebot_info4["pubkey"],
verifykey=threebot_info4["pubkey"],
signature=threebot_info4["signature"],
die=True,
)
return threebot_info4
threebot_info = register_threebot_redis()
threebot_info2 = query_threebot_redis(threebot_info["id"])
assert threebot_info == threebot_info2
print("**DONE**")
| 32.871795
| 118
| 0.607904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 835
| 0.217109
|
495f237d0412eaba51c51ff1ebcf25e4b6ae6465
| 2,803
|
py
|
Python
|
font/gen.py
|
smaji-org/cjkv_info_sample
|
12440b938a58b2384a3c9d11c0897dd4101d6fe6
|
[
"MIT"
] | null | null | null |
font/gen.py
|
smaji-org/cjkv_info_sample
|
12440b938a58b2384a3c9d11c0897dd4101d6fe6
|
[
"MIT"
] | null | null | null |
font/gen.py
|
smaji-org/cjkv_info_sample
|
12440b938a58b2384a3c9d11c0897dd4101d6fe6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding:utf-8 -*-
import os
import argparse
import glob
from functools import partial
import fontforge
import psMat
import source
opt_parser= argparse.ArgumentParser()
opt_parser.add_argument("--cjkv_info", type= str,
help= u"the path of cjkv_info")
opt_parser.add_argument("--region", type= str,
help= u"the region from where to select characters")
opt_parser.add_argument("--start", type= partial(int, base=16),
help= u"the start point of unicode")
opt_parser.add_argument("--end", type= partial(int, base=16),
help= u"the end point of unicode")
opt_parser.add_argument("--name", type= str,
help= u"the name of the new font")
opt_parser.add_argument("--adjust", type= int,
help= u"adjust the position of the outline")
opt_parser.add_argument("--output", type= str,
help= u"the path and filename of the new font")
class Opts:
def __init__(self):
self.cjkv_info= "."
self.region= "China"
self.start= 0
self.end= 0
self.name= "my font"
self.adjust= 0
self.output= (".", "out.ttf")
def setup_opt():
opts= Opts()
args= opt_parser.parse_args()
opts.cjkv_info= args.cjkv_info or opts.cjkv_info
opts.region= args.region or opts.region
opts.start= args.start or opts.start
opts.end= args.end or opts.end
opts.name= args.name or opts.name
opts.adjust= args.adjust or opts.adjust
if args.output:
d= os.path.dirname(args.output) or opts.output[0]
f= os.path.basename(args.output) or opts.output[1]
opts.output= (d, f)
try:
os.mkdir(opts.output[0])
except OSError:
pass
return opts
def get_code(path):
basename= os.path.basename(path)
(root, ext)= os.path.splitext(basename)
code_tag= root.split("_")
code= int(code_tag[0], 16)
return code
def read_src(path):
with open(path, "r") as f:
src= f.readline()
return src
def get_region(src):
return source.rev.get(src.split("-")[0])
def is_region(region):
return region == opts.region
def filter_src(path):
code= get_code(path)
if opts.start <= code and code <= opts.end:
src= read_src(path)
region= get_region(src)
return is_region(region)
else:
return False
opts= setup_opt()
src_files= glob.glob(os.path.join(opts.cjkv_info, "data", "*", "*.src"))
src_files= filter(filter_src, src_files)
newfont= fontforge.font()
newfont.em= 1024
newfont.fontname= opts.name
for src_file in src_files:
code= get_code(src_file);
glyph= newfont.createChar(code)
(root, ext)= os.path.splitext(src_file)
glyph.importOutlines(root + ".svg")
glyph.transform(psMat.translate(0, opts.adjust))
newfont.generate(os.path.join(opts.output[0], opts.output[1]))
| 26.951923
| 72
| 0.662861
| 224
| 0.079914
| 0
| 0
| 0
| 0
| 0
| 0
| 402
| 0.143418
|
496001f2e20c60b98e9d4d0701aee95ac8df87b1
| 3,692
|
py
|
Python
|
alarm_control_panel.py
|
rs1932/homeassistant-ring_alarm_component
|
b65b8ee1bc7e7408c3bc1adb6fd4e3f4ebf330d6
|
[
"Apache-2.0"
] | 4
|
2019-09-07T23:15:54.000Z
|
2020-04-20T22:47:37.000Z
|
alarm_control_panel.py
|
rs1932/homeassistant-ring_alarm_component
|
b65b8ee1bc7e7408c3bc1adb6fd4e3f4ebf330d6
|
[
"Apache-2.0"
] | 3
|
2019-09-10T00:03:24.000Z
|
2020-10-02T13:26:08.000Z
|
alarm_control_panel.py
|
rs1932/homeassistant-ring_alarm_component
|
b65b8ee1bc7e7408c3bc1adb6fd4e3f4ebf330d6
|
[
"Apache-2.0"
] | 3
|
2019-11-19T11:03:01.000Z
|
2021-05-12T20:11:16.000Z
|
import logging
import pandas as pd
from homeassistant.components.alarm_control_panel import (
AlarmControlPanel
)
from homeassistant.core import callback
from homeassistant.util import convert
from .ringalarmdevice import RingAlarmDevice
from .constants import *
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED
)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, device):
# for index, device in devices.iterrows():
add_devices([RingAlarmControlPanel(device)], True)
class RingAlarmControlPanel(RingAlarmDevice, AlarmControlPanel):
def __init__(self, ringalarm_device):
super().__init__(ringalarm_device)
try:
if ringalarm_device[DEVICE_ALARM_MODE] == "none":
self._state = STATE_ALARM_DISARMED
except:
pass
try:
if ringalarm_device[DEVICE_ALARM_MODE] == "some":
self._state = STATE_ALARM_ARMED_HOME
except:
pass
try:
if ringalarm_device[DEVICE_ALARM_MODE] == "all":
self._state = STATE_ALARM_ARMED_AWAY
except:
pass
try:
self._tamper_status = ringalarm_device[DEVICE_TAMPER_STATUS]
except:
pass
def update(self):
pass
def alarm_disarm(self, code=None):
"""Send disarm command."""
try:
self.controller.ring_api.send_command_ring(self.ringalarm_device[DEVICE_ZID],
self.ringalarm_device[DEVICE_SOURCE],
'security-panel.switch-mode',
data={'mode': 'none', "bypass": None})
except:
pass
def alarm_arm_home(self, code=None):
"""Send arm home command."""
try:
self.controller.ring_api.send_command_ring(self.ringalarm_device[DEVICE_ZID],
self.ringalarm_device[DEVICE_SOURCE],
'security-panel.switch-mode',
data={'mode': 'some', "bypass": None})
except:
pass
def alarm_arm_away(self, code=None):
"""Send arm away command."""
try:
self.controller.ring_api.send_command_ring(self.ringalarm_device[DEVICE_ZID],
self.ringalarm_device[DEVICE_SOURCE],
'security-panel.switch-mode',
data={'mode': 'all', "bypass": None})
except:
pass
def update_callback(self, data):
try:
if data[DEVICE_ALARM_MODE] == "none":
self._state = STATE_ALARM_DISARMED
except:
pass
try:
if data[DEVICE_ALARM_MODE] == "some":
self._state = STATE_ALARM_ARMED_HOME
except:
pass
try:
if data[DEVICE_ALARM_MODE] == "all":
self._state = STATE_ALARM_ARMED_AWAY
except:
pass
self.schedule_update_ha_state(True)
@property
def changed_by(self):
"""Last change triggered by."""
return None
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return True
@property
def state(self):
"""Get the state of the device."""
return self._state
| 31.288136
| 93
| 0.54117
| 3,104
| 0.840737
| 0
| 0
| 317
| 0.085861
| 0
| 0
| 417
| 0.112947
|
4960a6159309ea3987628a9491068ccbc097d8ac
| 4,224
|
py
|
Python
|
bot/game.py
|
thequeenofspades/AlphaGOLADZero
|
f057f249bcda21ef570a5d8d8753544bf743aaec
|
[
"Apache-2.0"
] | 1
|
2018-11-01T01:56:26.000Z
|
2018-11-01T01:56:26.000Z
|
bot/game.py
|
thequeenofspades/AlphaGOLADZero
|
f057f249bcda21ef570a5d8d8753544bf743aaec
|
[
"Apache-2.0"
] | 7
|
2018-02-17T00:35:26.000Z
|
2018-06-06T23:55:22.000Z
|
bot/game.py
|
thequeenofspades/AlphaGOLADZero
|
f057f249bcda21ef570a5d8d8753544bf743aaec
|
[
"Apache-2.0"
] | null | null | null |
from sys import stdin, stdout, stderr
import traceback
import time
from player import Player
from field.field import Field
class Game:
def __init__(self):
self.time_per_move = -1
self.timebank = -1
self.last_update = None
self.max_rounds = -1
self.round = 0
self.player_names = []
self.players = {}
self.me = None
self.opponent = None
self.field = Field()
def update(self, data):
# start timer
self.last_update = time.time()
for line in data.split('\n'):
line = line.strip()
if len(line) <= 0:
continue
tokens = line.split()
if tokens[0] == "settings":
self.parse_settings(tokens[1], tokens[2])
elif tokens[0] == "update":
if tokens[1] == "game":
self.parse_game_updates(tokens[2], tokens[3])
else:
self.parse_player_updates(tokens[1], tokens[2], tokens[3])
elif tokens[0] == "action":
self.timebank = int(tokens[2])
# Launching bot logic happens after setup finishes
def parse_settings(self, key, value):
if key == "timebank":
self.timebank = int(value)
elif key == "time_per_move":
self.time_per_move = int(value)
elif key == "player_names":
self.player_names = value.split(',')
self.players = {name: Player(name) for name in self.player_names}
elif key == "your_bot":
self.me = self.players[value]
self.opponent = self.players[[name for name in self.player_names if name != value][0]]
elif key == "your_botid":
self.me.id = value
self.opponent.id = str(2 - (int(value) + 1))
elif key == "field_width":
self.field.width = int(value)
elif key == "field_height":
self.field.height = int(value)
elif key == "max_rounds":
self.max_rounds = int(value)
else:
stderr.write('Cannot parse settings input with key {}'.format(key))
def parse_game_updates(self, key, value):
if key == "round":
self.round = int(value)
elif key == "field":
self.field.parse(value)
else:
stderr.write('Cannot parse game update with key {}'.format(key))
def parse_player_updates(self, player_name, key, value):
player = self.players.get(player_name)
if player is None:
stderr.write('Cannot find player with name {}'.format(player_name))
return
if key == "living_cells":
player.living_cells = int(value)
elif key == "move":
player.previous_move = value
else:
stderr.write('Cannot parse {} update with key {}'.format(player_name, key))
def time_remaining(self):
return self.timebank - int(1000 * (time.clock() - self.last_update))
@staticmethod
def print_move(move):
"""issue an order"""
stdout.write('{}\n'.format(move))
stdout.flush()
def run(self, bot):
"""parse input, update game state and call the bot classes do_turn method"""
not_finished = True
data = ''
while not stdin.closed and not_finished:
try:
current_line = stdin.readline().rstrip('\r\n')
if len(current_line) <= 0:
time.sleep(1)
continue
data += current_line + "\n"
if current_line.lower().startswith("action"):
self.update(data)
move = bot.make_move(self)
self.print_move(move)
data = ''
elif current_line.lower().startswith("quit"):
not_finished = False
except EOFError:
break
except KeyboardInterrupt:
raise
except:
# don't raise error or return so that bot attempts to stay alive
traceback.print_exc(file=stderr)
stderr.flush()
| 33
| 98
| 0.529356
| 4,098
| 0.97017
| 0
| 0
| 133
| 0.031487
| 0
| 0
| 578
| 0.136837
|
4960c8757588886d5cee0b290cbf124fc76beb18
| 552
|
py
|
Python
|
config/ConfigSices.py
|
atosborges00/sereno_bot
|
06bedb02847eff050adeadb6bcc5440bcd2283c3
|
[
"FSFAP"
] | null | null | null |
config/ConfigSices.py
|
atosborges00/sereno_bot
|
06bedb02847eff050adeadb6bcc5440bcd2283c3
|
[
"FSFAP"
] | null | null | null |
config/ConfigSices.py
|
atosborges00/sereno_bot
|
06bedb02847eff050adeadb6bcc5440bcd2283c3
|
[
"FSFAP"
] | null | null | null |
from os import path
from config.ConfigPaths import ConfigPaths
class ConfigSices:
""" Sices platform useful URLs """
LOGIN_URL = 'https://monitoramento.sicessolar.com.br/login'
ANALYTICS_PAGE = 'https://monitoramento.sicessolar.com.br/analytics?und={code}'
""" Sices platform default directories """
RAW_DATA_PATH = path.join(ConfigPaths.RAW_DATA_PATH, "sices")
""" Sices platform options settings """
PREFERENCES = {
'download.default_directory': RAW_DATA_PATH,
'safebrowsing.enabled': 'false'}
| 26.285714
| 83
| 0.697464
| 486
| 0.880435
| 0
| 0
| 0
| 0
| 0
| 0
| 288
| 0.521739
|
4961b43029a0917649b5f3a648e7d599051b3b4f
| 757
|
py
|
Python
|
main.py
|
leonli/codename-gen
|
53a66124184e9691d22bfd7db6274f1d44fe0a75
|
[
"MIT"
] | null | null | null |
main.py
|
leonli/codename-gen
|
53a66124184e9691d22bfd7db6274f1d44fe0a75
|
[
"MIT"
] | null | null | null |
main.py
|
leonli/codename-gen
|
53a66124184e9691d22bfd7db6274f1d44fe0a75
|
[
"MIT"
] | null | null | null |
import click
import random
from pyfiglet import Figlet
from termcolor import colored, cprint
import imagenet
@click.command()
@click.option("--count", default=10, help="Yield number of codenames.")
def codename_gen(count):
"""Enjoy the codenames 🍺"""
imagenet_cls = imagenet.imagenet1000_labels()
f = Figlet(font='slant')
print(f.renderText('Codename Gen'))
c_tag = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white']
for _ in range(count):
print(colored(random.choice(imagenet_cls), random.choice(c_tag)))
cprint('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', 'green')
cprint('Done! Enjoy the codenames 🍺', 'grey', 'on_green')
if __name__ == '__main__':
codename_gen()
| 32.913043
| 95
| 0.611625
| 0
| 0
| 0
| 0
| 604
| 0.791612
| 0
| 0
| 275
| 0.360419
|
4961cb44515f6694bce4182b84680ae488d272d1
| 14,882
|
py
|
Python
|
binder/plugins/views/userview.py
|
asma-oueslati/django-binder
|
0a16a928664b4be2b2b8e3f5f65c29301f0096fe
|
[
"MIT"
] | null | null | null |
binder/plugins/views/userview.py
|
asma-oueslati/django-binder
|
0a16a928664b4be2b2b8e3f5f65c29301f0096fe
|
[
"MIT"
] | null | null | null |
binder/plugins/views/userview.py
|
asma-oueslati/django-binder
|
0a16a928664b4be2b2b8e3f5f65c29301f0096fe
|
[
"MIT"
] | null | null | null |
import logging
import json
from abc import ABCMeta, abstractmethod
from django.contrib import auth
from django.contrib.auth import update_session_auth_hash, password_validation
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import ValidationError, PermissionDenied
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from binder.permissions.views import no_scoping_required
from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, \
BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, \
BinderNotFound
from binder.router import list_route, detail_route
from binder.json import JsonResponse
from binder.views import annotate
logger = logging.getLogger(__name__)
class UserBaseMixin:
__metaclass__ = ABCMeta
def respond_with_user(self, request, user_id):
return JsonResponse(
self._get_objs(
annotate(self.get_queryset(request).filter(pk=user_id), request),
request=request,
)[0]
)
class MasqueradeMixin(UserBaseMixin):
__metaclass__ = ABCMeta
@detail_route(name='masquerade')
@no_scoping_required()
def masquerade(self, request, pk=None):
from hijack.helpers import login_user
if request.method != 'POST':
raise BinderMethodNotAllowed()
try:
user = self.model._default_manager.get(pk=pk)
except self.model.DoesNotExist:
raise BinderNotFound()
self._require_model_perm('masquerade', request)
login_user(request, user) # Ignore returned redirect response object
return self.respond_with_user(request, user.id)
@list_route(name='endmasquerade')
@no_scoping_required()
def endmasquerade(self, request):
from hijack.helpers import release_hijack
if request.method != 'POST':
raise BinderMethodNotAllowed()
self._require_model_perm('unmasquerade', request)
release_hijack(request) # Ignore returned redirect response object
return self.respond_with_user(request, request.user.id)
def _logout(self, request):
from hijack.helpers import release_hijack
# Release masquerade on logout if masquerading
try:
release_hijack(request)
except PermissionDenied: # Means we are not hijacked
super()._logout(request)
class UserViewMixIn(UserBaseMixin):
__metaclass__ = ABCMeta
log_request_body = False
token_generator = default_token_generator
default_authentication_backend = None
def _require_model_perm(self, perm_type, request, pk=None):
"""
Overwrite the _require_model_perm, to make sure that you can not modify a superuser as non superuser
We need to be very careful about permission assumptions after this point
"""
# If the user is trying to change a superuser and is not a superuser, disallow
if pk and self.model.objects.get(pk=int(pk)).is_superuser and not request.user.is_superuser:
# Maybe BinderRequestError?
raise BinderForbidden('modify superuser', request.user)
# Everything normal
return super()._require_model_perm(perm_type, request, pk)
def _store__groups(self, obj, field, value, request, pk=None):
"""
Store the groups of the user.
If we get here, the user might not actually have admin permissions;
If the user does not have user change perms, disallow setting groups.
"""
try:
self._require_model_perm('changegroups', request)
return self._store_field(obj, field, value, request, pk=pk)
except BinderForbidden: # convert to read-only error, so the field is ignored
raise BinderReadOnlyFieldError(self.model.__name__, field)
def authenticate(self, request, **kwargs):
return auth.authenticate(request, **kwargs)
def auth_login(self, request, user, backend=None):
return auth.login(request, user, backend=(
backend or
getattr(user, 'backend', None) or
self.default_authentication_backend
))
@method_decorator(sensitive_post_parameters())
@list_route(name='login', unauthenticated=True)
@no_scoping_required()
def login(self, request):
"""
Login the user
Request:
POST user/login/
{
"username": "foo",
"password": "password"
}
Response:
returns the same parameters as GET user/{id}/
"""
if request.method != 'POST':
raise BinderMethodNotAllowed()
try:
decoded = request.body.decode()
body = json.loads(decoded)
username = body.get(self.model.USERNAME_FIELD, '')
password = body.get('password', '')
except Exception:
username = request.POST.get(self.model.USERNAME_FIELD, '')
password = request.POST.get('password', '')
user = self.authenticate(request, **{
self.model.USERNAME_FIELD: username.lower(),
'password': password,
})
self._require_model_perm('login', request)
if user is None:
logger.info('login failed for "{}"'.format(username))
raise BinderNotAuthenticated()
else:
self.auth_login(request, user)
logger.info('login for {}/{}'.format(user.id, user))
return self.respond_with_user(request, user.id)
def _logout(self, request):
auth.logout(request)
@list_route(name='logout')
@no_scoping_required()
def logout(self, request):
"""
Logout the user
Request:
POST /user/logout/
{}
Response:
204
{}
"""
if request.method != 'POST':
raise BinderMethodNotAllowed()
self._require_model_perm('logout', request)
logger.info('logout for {}/{}'.format(request.user.id, request.user))
self._logout(request)
return HttpResponse(status=204)
def get_users(self, request, username):
"""
Given a username, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
Copied from django.contrib.auth.forms.PasswordResetForm
"""
active_users = self.model._default_manager.filter(**{
self.model.USERNAME_FIELD + '__iexact': username,
'is_active': True,
})
return (u for u in active_users if u.has_usable_password())
def _store__username(self, user, field, value, request, pk=None):
"""
Makes sure the username is always stored as a lowercase
"""
if not isinstance(value, str):
raise BinderFieldTypeError(self.model.__name__, field)
return self._store_field(user, field, value.lower(), request, pk=pk)
def filter_deleted(self, queryset, pk, deleted, request=None):
"""
Can be used to filter deleted users, or unfilter them.
"""
if pk or deleted == 'true':
return queryset
if deleted is None:
return queryset.filter(is_active=True)
if deleted == 'only':
return queryset.filter(is_active=False)
raise BinderRequestError(_('Invalid value: deleted=%s.') % request.GET.get('deleted'))
def soft_delete(self, user, undelete=False, request=None):
"""
Allows the user to be soft deleted, and undeleted. What actually needs to be done on soft deletion
can be implemented in
_after_soft_delete
"""
try:
if not user.is_active and not undelete:
raise BinderIsDeleted()
if not not user.is_active and undelete:
raise BinderIsNotDeleted()
except AttributeError:
raise BinderMethodNotAllowed()
user.is_active = undelete
user.save()
self._after_soft_delete(request, user, undelete)
@list_route(name='reset_request', unauthenticated=True)
@no_scoping_required()
def reset_request(self, request):
"""
Adds an endpoint to do a reset request. Generates a token, and calls the _send_reset_mail callback if the reset
request is successful
Request:
POST user/reset_request/
{
'username': 'foo'
}
Response:
204
{
}
"""
if request.method != 'POST':
raise BinderMethodNotAllowed()
self._require_model_perm('reset_password', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
logger.info('password reset attempt for {}'.format(body.get(self.model.USERNAME_FIELD, '')))
for user in self.get_users(request, body.get(self.model.USERNAME_FIELD, '').lower()):
token = self.token_generator.make_token(user)
self._send_reset_mail(request, user, token)
return HttpResponse(status=204)
@never_cache
@list_route(name='send_activation_email', unauthenticated=True)
@no_scoping_required()
def send_activation_email(self, request):
"""
Endpoint that can be used to send an activation mail for an user.
Calls the _send_activation_email callback if the user is succesfully activated
Request:
POST
{
"email": "email"
}
Response:
{
"code": code
}
Possible codes:
sent Mail is send sucessfully
already active User is already active, no mail was send
blacklisted User was not activated
"""
if request.method != 'PUT':
raise BinderMethodNotAllowed()
# For lack of a better check
self._require_model_perm('reset_password', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
logger.info('activation email attempt for {}'.format(body.get('email', '')))
if body.get('email') is None:
raise BinderValidationError({'email': ['missing']})
try:
user = self.model._default_manager.get(email=body.get('email'))
except self.model.DoesNotExist:
raise BinderNotFound()
if user.is_active:
if user.last_login is None:
# TODO: Figure out a way to make this customisable without
# allowing injection of arbitrary URLs (phishing!)
self._send_activation_email(request, user)
response = JsonResponse({'code': 'sent'})
response.status_code = 201
else:
response = JsonResponse({'code': 'already active'})
else:
response = JsonResponse({'code': 'blacklisted'})
response.status_code = 400
return response
@method_decorator(sensitive_post_parameters())
@never_cache
@detail_route(name='activate', unauthenticated=True)
@no_scoping_required()
def activate(self, request, pk=None):
"""
Adds an endpoint to activate an user. Also logs in the user
Request:
PUT user/{id}/activate/
{
"activation_code": string
}
Response:
Same as GET user/{id}/
"""
if request.method != 'PUT':
raise BinderMethodNotAllowed()
self._require_model_perm('activate', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
errors = {}
for item in ['activation_code']:
if body.get(item) is None:
errors[item] = ['missing']
if len(errors) != 0:
raise BinderValidationError(errors)
try:
user = self.model._default_manager.get(pk=pk)
except (TypeError, ValueError, OverflowError, self.model.DoesNotExist):
user = None
if user is None or not self.token_generator.check_token(user, body.get('activation_code')):
raise BinderNotFound()
logger.info('login for {}/{} via successful activation'.format(user.id, user))
user.is_active = True
user.save()
self.auth_login(request, user)
return self.respond_with_user(request, user.id)
@method_decorator(sensitive_post_parameters())
@never_cache
@detail_route(name='reset_password', unauthenticated=True, methods=['PUT'])
@no_scoping_required()
def reset_password(self, request, pk=None):
"""
Resets the password from an reset code
Request:
POST user/reset_password/
{
"reset_code": str,
"password": str
}
Response:
Same as GET user/{id}/
"""
self._require_model_perm('reset_password', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
errors = {item: 'missing' for item in ['reset_code', 'password'] if item not in body}
if errors:
raise BinderValidationError(errors)
return self._reset_pass_for_user(request, int(pk), body['reset_code'], body['password'])
def _reset_pass_for_user(self, request, user_id, token, password):
"""
Helper function that actually resets the password for an user
"""
try:
user = self.model._default_manager.get(pk=user_id)
except (TypeError, ValueError, OverflowError, self.model.DoesNotExist):
user = None
if user is None or not self.token_generator.check_token(user, token):
raise BinderNotFound()
logger.info('login for {}/{} via successful password reset'.format(user.id, user))
try:
password_validation.validate_password(password, user)
except ValidationError as ve:
raise BinderValidationError({'password': ve.messages})
user.set_password(password)
user.save()
self.auth_login(request, user)
return self.respond_with_user(request, user.id)
@method_decorator(sensitive_post_parameters())
@never_cache
@list_route(name='change_password')
@no_scoping_required()
def change_password(self, request):
"""
Change the password from an old password
Request:
POST user/change_password/
{
"old_password": str,
"new_password": str
}
Response:
Same as GET user/{id}/
"""
if request.method != 'PUT':
raise BinderMethodNotAllowed()
self._require_model_perm('change_own_password', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
user = request.user
errors = {}
for item in ['old_password', 'new_password']:
if body.get(item) is None:
errors[item] = ['missing']
if not user.check_password(body.get('old_password')):
errors['old_password'] = ['incorrect']
if len(errors) != 0:
raise BinderValidationError(errors)
password = body.get('new_password')
try:
password_validation.validate_password(password, user)
except ValidationError as ve:
validation_errors = {'new_password': ve.messages}
raise BinderValidationError(validation_errors)
user.set_password(password)
user.save()
logger.info('password changed for {}/{}'.format(user.id, user))
if user == request.user:
"""
No need to change the password of an user that is not our own
"""
update_session_auth_hash(request, user)
return self.respond_with_user(request, user.id)
@abstractmethod
def _after_soft_delete(self, request, user, undelete):
"""
Callback called after an user is softdeleted or softundeleted
"""
pass
@abstractmethod
def _send_reset_mail(self, request, user, token):
"""
Callback to send the actual reset mail using the token.
"""
pass
@abstractmethod
def _send_activation_email(self, request, user):
"""
Callback to send a mail notifying that the user is activated.
"""
pass
| 27.009074
| 118
| 0.73216
| 13,837
| 0.929781
| 0
| 0
| 9,088
| 0.610671
| 0
| 0
| 4,468
| 0.300228
|
4961f5ae237c556abd210443e12861307b7068fa
| 362
|
py
|
Python
|
libra_client/crypto/x25519.py
|
violas-core/violas-client
|
e8798f7d081ac218b78b81fd7eb2f8da92631a16
|
[
"MIT"
] | null | null | null |
libra_client/crypto/x25519.py
|
violas-core/violas-client
|
e8798f7d081ac218b78b81fd7eb2f8da92631a16
|
[
"MIT"
] | null | null | null |
libra_client/crypto/x25519.py
|
violas-core/violas-client
|
e8798f7d081ac218b78b81fd7eb2f8da92631a16
|
[
"MIT"
] | 1
|
2022-01-05T06:49:42.000Z
|
2022-01-05T06:49:42.000Z
|
from libra_client.canoser import DelegateT, BytesT
# Size of a X25519 private key
PRIVATE_KEY_SIZE = 32
# Size of a X25519 public key
PUBLIC_KEY_SIZE = 32
# Size of a X25519 shared secret
SHARED_SECRET_SIZE = 32
class PrivateKey(DelegateT):
delegate_type = BytesT(PRIVATE_KEY_SIZE)
class PublicKey(DelegateT):
delegate_type = BytesT(PUBLIC_KEY_SIZE)
| 22.625
| 50
| 0.787293
| 144
| 0.39779
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.251381
|
49633e3a1fe78865b9e181ac00df94f57d6194b3
| 3,303
|
py
|
Python
|
plenum/test/plugin/demo_plugin/main.py
|
SchwiftyRick/indy-plenum
|
d23b99423eb805971e50446d7e89ada892aa6811
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/plugin/demo_plugin/main.py
|
SchwiftyRick/indy-plenum
|
d23b99423eb805971e50446d7e89ada892aa6811
|
[
"Apache-2.0"
] | 1
|
2021-07-14T17:10:04.000Z
|
2021-07-14T17:10:04.000Z
|
plenum/test/plugin/demo_plugin/main.py
|
SchwiftyRick/indy-plenum
|
d23b99423eb805971e50446d7e89ada892aa6811
|
[
"Apache-2.0"
] | 2
|
2021-02-19T15:36:50.000Z
|
2021-07-20T11:37:54.000Z
|
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.server.client_authn import CoreAuthNr
from plenum.test.plugin.demo_plugin import AUCTION_LEDGER_ID
from plenum.test.plugin.demo_plugin.batch_handlers.auction_batch_handler import AuctionBatchHandler
from plenum.test.plugin.demo_plugin.config import get_config
from plenum.test.plugin.demo_plugin.request_handlers.auction_end_handler import AuctionEndHandler
from plenum.test.plugin.demo_plugin.request_handlers.auction_start_handler import AuctionStartHandler
from plenum.test.plugin.demo_plugin.request_handlers.get_bal_handler import GetBalHandler
from plenum.test.plugin.demo_plugin.request_handlers.place_bid_handler import PlaceBidHandler
from plenum.test.plugin.demo_plugin.storage import get_auction_hash_store, \
get_auction_ledger, get_auction_state
def integrate_plugin_in_node(node):
node.config = get_config(node.config)
hash_store = get_auction_hash_store(node.dataLocation)
ledger = get_auction_ledger(node.dataLocation,
node.config.auctionTransactionsFile,
hash_store, node.config)
state = get_auction_state(node.dataLocation,
node.config.auctionStateDbName,
node.config)
if AUCTION_LEDGER_ID not in node.ledger_ids:
node.ledger_ids.append(AUCTION_LEDGER_ID)
node.ledgerManager.addLedger(AUCTION_LEDGER_ID,
ledger,
postTxnAddedToLedgerClbk=node.postTxnFromCatchupAddedToLedger)
node.on_new_ledger_added(AUCTION_LEDGER_ID)
node.register_state(AUCTION_LEDGER_ID, state)
auctions = {}
node.write_manager.register_req_handler(AuctionStartHandler(node.db_manager, auctions))
node.write_manager.register_req_handler(AuctionEndHandler(node.db_manager, auctions))
node.write_manager.register_req_handler(PlaceBidHandler(node.db_manager, auctions))
node.read_manager.register_req_handler(GetBalHandler(node.db_manager))
# FIXME: find a generic way of registering DBs
node.db_manager.register_new_database(lid=AUCTION_LEDGER_ID,
ledger=ledger,
state=state)
node.write_manager.register_batch_handler(AuctionBatchHandler(node.db_manager),
ledger_id=AUCTION_LEDGER_ID,
add_to_begin=True)
node.write_manager.register_batch_handler(node.write_manager.node_reg_handler,
ledger_id=AUCTION_LEDGER_ID)
node.write_manager.register_batch_handler(node.write_manager.primary_reg_handler,
ledger_id=AUCTION_LEDGER_ID)
node.write_manager.register_batch_handler(node.write_manager.audit_b_handler,
ledger_id=AUCTION_LEDGER_ID)
auction_authnr = CoreAuthNr(node.write_manager.txn_types,
node.read_manager.txn_types,
node.action_manager.txn_types,
node.states[DOMAIN_LEDGER_ID])
node.clientAuthNr.register_authenticator(auction_authnr)
return node
| 58.982143
| 101
| 0.693612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.013927
|
496648c5898f258ebf19c8b06ad31502f0290680
| 5,213
|
py
|
Python
|
biobakery_workflows/document_templates/quality_control_paired_dna_rna.template.py
|
shbrief/biobakery_workflows
|
2037f45caa8e4af9a40b5c1d2886cde15bc00381
|
[
"MIT"
] | 1
|
2020-11-16T20:04:15.000Z
|
2020-11-16T20:04:15.000Z
|
biobakery_workflows/document_templates/quality_control_paired_dna_rna.template.py
|
mlwright97/biobakery_workflows
|
b3e74f25253d7354bebd02936ac25986281e85d6
|
[
"MIT"
] | null | null | null |
biobakery_workflows/document_templates/quality_control_paired_dna_rna.template.py
|
mlwright97/biobakery_workflows
|
b3e74f25253d7354bebd02936ac25986281e85d6
|
[
"MIT"
] | null | null | null |
#+ echo=False
import numpy
from biobakery_workflows import utilities, visualizations, files
from anadama2 import PweaveDocument
document=PweaveDocument()
# get the variables for this document generation task
vars = document.get_vars()
# determine the document format
pdf_format = True if vars["format"] == "pdf" else False
# read in the DNA samples
(dna_paired_columns, dna_orphan_columns), dna_samples, (dna_paired_data, dna_orphan_data) = visualizations.qc_read_counts(document, vars["dna_read_counts"])
# read in the RNA samples
(rna_paired_columns, rna_orphan_columns), rna_samples, (rna_paired_data, rna_orphan_data) = visualizations.qc_read_counts(document, vars["rna_read_counts"])
#' # Quality Control
#' <% visualizations.ShotGun.print_qc_intro_caption("{} DNA and {} RNA ".format(len(dna_samples),len(rna_samples)), rna_paired_columns[2:], paired=True) %>
#+ echo=False
#' ## DNA Samples Quality Control
#' ### DNA Samples Tables of Filtered Reads
#+ echo=False
document.write_table(["# Sample"]+dna_paired_columns, dna_samples, dna_paired_data,
files.ShotGunVis.path("qc_counts_paired",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_paired_data, dna_samples,
dna_paired_columns, "DNA Paired end reads", files.ShotGunVis.path("qc_counts_paired"),
format_data_comma=True)
#' <%= table_message %>
#+ echo=False
document.write_table(["# Sample"]+dna_orphan_columns, dna_samples, dna_orphan_data,
files.ShotGunVis.path("qc_counts_orphan",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_orphan_data, dna_samples,
dna_orphan_columns, "DNA Orphan reads", files.ShotGunVis.path("qc_counts_orphan"),
format_data_comma=True)
#' <%= table_message %>
#' <% if pdf_format: print("\clearpage") %>
#+ echo=False
# plot the microbial reads ratios
dna_microbial_reads, dna_microbial_labels = utilities.microbial_read_proportion_multiple_databases(
dna_paired_data, dna_paired_columns, dna_orphan_data)
document.write_table(["# Sample"]+dna_microbial_labels, dna_samples,
dna_microbial_reads, files.ShotGunVis.path("microbial_counts",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_microbial_reads, dna_samples,
dna_microbial_labels, "DNA microbial read proportion",
files.ShotGunVis.path("microbial_counts"))
#' <%= visualizations.ShotGun.captions["microbial_ratios"] %>
#' <%= table_message %>
#' ### DNA Samples Plots of Filtered Reads
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(dna_paired_data), row_labels=dna_paired_columns,
column_labels=dna_samples, title="DNA Paired end reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(dna_orphan_data), row_labels=dna_orphan_columns,
column_labels=dna_samples, title="DNA Orphan reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#' ## RNA Samples Quality Control
#' ### RNA Samples Tables of Filtered Reads
#+ echo=False
document.write_table(["# Sample"]+rna_paired_columns, rna_samples, rna_paired_data,
files.ShotGunVis.path("rna_qc_counts_paired",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_paired_data, rna_samples,
rna_paired_columns, "RNA Paired end reads", files.ShotGunVis.path("rna_qc_counts_paired"),
format_data_comma=True)
#' <%= table_message %>
#+ echo=False
document.write_table(["# Sample"]+rna_orphan_columns, rna_samples, rna_orphan_data,
files.ShotGunVis.path("rna_qc_counts_orphan",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_orphan_data, rna_samples,
rna_orphan_columns, "RNA Orphan reads", files.ShotGunVis.path("rna_qc_counts_orphan"),
format_data_comma=True)
#' <%= table_message %>
#' <% if pdf_format: print("\clearpage") %>
#+ echo=False
# write and plot the microbial reads ratios
rna_microbial_reads, rna_microbial_labels = utilities.microbial_read_proportion_multiple_databases(
rna_paired_data, rna_paired_columns, rna_orphan_data)
document.write_table(["# Sample"]+rna_microbial_labels, rna_samples,
rna_microbial_reads, files.ShotGunVis.path("rna_microbial_counts",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_microbial_reads, rna_samples,
rna_microbial_labels, "RNA microbial read proportion",
files.ShotGunVis.path("rna_microbial_counts"))
#' <%= visualizations.ShotGun.captions["microbial_ratios"] %>
#' <%= table_message %>
#' ### RNA Samples Plots of Filtered Reads
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(rna_paired_data), row_labels=rna_paired_columns,
column_labels=rna_samples, title="RNA Paired end reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(rna_orphan_data), row_labels=rna_orphan_columns,
column_labels=rna_samples, title="RNA Orphan reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
| 38.330882
| 156
| 0.782851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,834
| 0.351813
|
49665a0b0e4dd98e4c598bd1960650361ca30dc7
| 1,604
|
py
|
Python
|
Other_notebooks/Shapefile_Demo.py
|
gamedaygeorge/datacube-applications-library
|
1b6314ee3465f9f17930391a4c241e981a9e200e
|
[
"Apache-2.0"
] | null | null | null |
Other_notebooks/Shapefile_Demo.py
|
gamedaygeorge/datacube-applications-library
|
1b6314ee3465f9f17930391a4c241e981a9e200e
|
[
"Apache-2.0"
] | null | null | null |
Other_notebooks/Shapefile_Demo.py
|
gamedaygeorge/datacube-applications-library
|
1b6314ee3465f9f17930391a4c241e981a9e200e
|
[
"Apache-2.0"
] | 1
|
2021-02-25T14:19:05.000Z
|
2021-02-25T14:19:05.000Z
|
# Code behind module for Shapefile_Demo.ipynb
################################
##
## Import Statments
##
################################
# Import standard Python modules
import sys
import datacube
import numpy as np
import fiona
import xarray as xr
from rasterio.features import geometry_mask
import shapely
from shapely.ops import transform
from shapely.geometry import shape
from functools import partial
import pyproj
################################
##
## Function Definitions
##
################################
def shapefile_mask(dataset: xr.Dataset, shapefile) -> np.array:
"""Extracts a mask from a shapefile using dataset latitude and longitude extents.
Args:
dataset (xarray.Dataset): The dataset with the latitude and longitude extents.
shapefile (string): The shapefile to be used for extraction.
Returns:
A boolean mask array.
"""
with fiona.open(shapefile, 'r') as source:
collection = list(source)
geometries = []
for feature in collection:
geom = shape(feature['geometry'])
project = partial(
pyproj.transform,
pyproj.Proj(init=source.crs['init']), # source crs
pyproj.Proj(init='epsg:4326')) # destination crs
geom = transform(project, geom) # apply projection
geometries.append(geom)
geobox = dataset.geobox
mask = geometry_mask(
geometries,
out_shape=geobox.shape,
transform=geobox.affine,
all_touched=True,
invert=True)
return mask
| 28.642857
| 86
| 0.596633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 632
| 0.394015
|
496664e291d159374bf05460e543caa52023fc6f
| 272
|
py
|
Python
|
Day 02/Day 02.1.py
|
Mraedis/AoC2021
|
4d198f8b227ce8f8f2f3fd2fed9396d7898e9d2a
|
[
"Apache-2.0"
] | 1
|
2021-11-30T22:41:09.000Z
|
2021-11-30T22:41:09.000Z
|
Day 02/Day 02.1.py
|
Mraedis/AoC2021
|
4d198f8b227ce8f8f2f3fd2fed9396d7898e9d2a
|
[
"Apache-2.0"
] | null | null | null |
Day 02/Day 02.1.py
|
Mraedis/AoC2021
|
4d198f8b227ce8f8f2f3fd2fed9396d7898e9d2a
|
[
"Apache-2.0"
] | null | null | null |
linelist = [line for line in open('Day 02.input').readlines()]
hor = 0
dep = 0
for line in linelist:
mov, amount = line.split(' ')
if mov == 'forward':
hor += int(amount)
else:
dep += int(amount) * (-1 if mov == 'up' else 1)
print(hor * dep)
| 20.923077
| 62
| 0.555147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.110294
|
49670dd8b225f1420aba3e75006477f4bfe63dc1
| 1,879
|
py
|
Python
|
examples/simple1d/driver.py
|
michael-a-hansen/jalapeno
|
2f0d47467a78395e42854e11abebcf1b7721e0be
|
[
"MIT"
] | 1
|
2019-11-09T15:13:38.000Z
|
2019-11-09T15:13:38.000Z
|
examples/simple1d/driver.py
|
michael-a-hansen/jalapeno
|
2f0d47467a78395e42854e11abebcf1b7721e0be
|
[
"MIT"
] | 3
|
2016-10-05T22:57:46.000Z
|
2016-10-06T06:26:22.000Z
|
examples/simple1d/driver.py
|
michael-a-hansen/jalapeno
|
2f0d47467a78395e42854e11abebcf1b7721e0be
|
[
"MIT"
] | null | null | null |
'''
This example provides three examples of a simple plot of 1-D data.
1. a publication-ready single column figure, which is printed to png (600 dpi), pdf, and svg
2. a presentation-ready figure on a black background
Four steps are involved in each figure:
- load/generate the data
- construct a 1d plot (figure, axis, line series) for the spectrum
- size the figure and font
- print the figure to a pdf
'''
import jalapeno.colors.svgcolors as jc
import jalapeno.plots.plots as jpp
import jalapeno.plots.colorscheme as jpc
import numpy as np
# generate the data
x = np.linspace(0, 2*np.pi, 600)
y = np.abs(np.cos(2*x))
# make a 1d plot
fig, ax, line = jpp.make_1d_plot(linecolor=jc.darkorange,
maxx=max(x/np.pi),
maxy=1.01,
xname='x/pi',
yname='cos(2x)')
# plot the data on our 1d plot
line.set_data(x/np.pi,y)
# size the figure and print it to pdf
jpp.SquareFigure().set_size(fig)
jpp.print_fig(fig, 'xy-for-publication', ['pdf', 'png', 'svg'], dpi=600)
# make another 1d plot
fig, ax, line = jpp.make_1d_plot(colorscheme=jpc.FigColors.scheme('black'),
linecolor=jc.coral,
linewidth=4,
showgrid='off',
maxx=max(x/np.pi),
maxy=1.01,
xname='x/pi',
yname='cos(2x)')
# plot the data on our 1d plot
line.set_data(x/np.pi, y)
# size the figure and print it to pdf
jpp.SquareFigure(width=4, fontsize=12).set_size(fig)
jpp.print_fig(fig, 'xy-for-presentation', exts=['pdf']) # way 1, use print_fig and provide exts=['pdf']
jpp.print_fig_to_pdf(fig, 'xy-for-presentation') # way 2, use print_fig_to_pdf
| 33.553571
| 104
| 0.581692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 800
| 0.425758
|
4968c48330c18edd322258fb79f85333f213b40b
| 2,306
|
py
|
Python
|
process/1_embed_keep_ge.py
|
omarmaddouri/GCNCC_1
|
ec858bbe8246e4af15f7b870ca0ccafdea93d627
|
[
"MIT"
] | 4
|
2020-12-03T11:57:15.000Z
|
2021-12-09T05:20:44.000Z
|
process/1_embed_keep_ge.py
|
alkaidone/GCNCC
|
3270b4c2d48e0090a18a0ab1df3b9fd81627029d
|
[
"MIT"
] | 5
|
2020-01-28T23:14:40.000Z
|
2021-08-25T15:55:23.000Z
|
process/1_embed_keep_ge.py
|
alkaidone/GCNCC
|
3270b4c2d48e0090a18a0ab1df3b9fd81627029d
|
[
"MIT"
] | 3
|
2021-11-23T05:13:27.000Z
|
2021-12-30T08:12:48.000Z
|
from __future__ import division
from __future__ import print_function
from pathlib import Path
import sys
project_path = Path(__file__).resolve().parents[1]
sys.path.append(str(project_path))
from keras.layers import Dense, Activation, Dropout
from keras.models import Model, Sequential
from keras.regularizers import l2
from keras.optimizers import Adam
import keras.backend as K
import numpy as np
import time
import tensorflow as tf
import os
from core.utils import *
from core.layers.graph_cnn_layer import GraphCNN
from sklearn.preprocessing import normalize
# Set random seed
seed = 123
np.random.seed(seed)
tf.random.set_seed(seed)
# Settings
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'brc_microarray_usa', 'Dataset string.')
flags.DEFINE_string('embedding_method', 'ge', 'Name of the embedding method.')
#Check dataset availability
if not os.path.isdir("{}/data/parsed_input/{}".format(project_path, FLAGS.dataset)):
sys.exit("{} dataset is not available under data/parsed_input/".format(FLAGS.dataset))
if not os.path.isdir("{}/data/output/{}/embedding/{}".format(project_path, FLAGS.dataset, FLAGS.embedding_method)):
os.makedirs("{}/data/output/{}/embedding/{}".format(project_path, FLAGS.dataset, FLAGS.embedding_method))
print("--------------------------------------------")
print("--------------------------------------------")
print("Hyper-parameters:")
print("Dataset: {}".format(FLAGS.dataset))
print("Embedding method: {}".format(FLAGS.embedding_method))
print("--------------------------------------------")
print("--------------------------------------------")
# Prepare Data
X, A, Y = load_training_data(dataset=FLAGS.dataset)
Y_train, Y_val, Y_test, train_idx, val_idx, test_idx, train_mask = get_splits_for_learning(Y, dataset=FLAGS.dataset)
# Normalize gene expression
X = normalize(X, norm='l1') #for positive non-zero entries, it's equivalent to: X /= X.sum(1).reshape(-1, 1)
#Save the node emmbeddings
np.savetxt("{}/data/output/{}/embedding/{}/embeddings.txt".format(project_path, FLAGS.dataset, FLAGS.embedding_method), X, delimiter="\t")
print("Embeddings saved in /data/output/{}/embedding/{}/embeddings.txt".format(FLAGS.dataset, FLAGS.embedding_method))
| 38.433333
| 139
| 0.685603
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 808
| 0.35039
|
496956f2c24db98208ca44b218ea029a5dcff3f8
| 832
|
py
|
Python
|
docs/modelserving/detect/aif/germancredit/simulate_predicts.py
|
chinhuang007/website
|
c5324e9ee3e7f202c226836de0aca9ebd33b61b2
|
[
"Apache-2.0"
] | 1,146
|
2019-03-27T21:14:34.000Z
|
2021-09-22T08:36:46.000Z
|
docs/modelserving/detect/aif/germancredit/simulate_predicts.py
|
chinhuang007/website
|
c5324e9ee3e7f202c226836de0aca9ebd33b61b2
|
[
"Apache-2.0"
] | 1,803
|
2019-03-27T22:16:02.000Z
|
2021-09-22T15:27:44.000Z
|
docs/modelserving/detect/aif/germancredit/simulate_predicts.py
|
chinhuang007/website
|
c5324e9ee3e7f202c226836de0aca9ebd33b61b2
|
[
"Apache-2.0"
] | 573
|
2019-03-27T21:14:58.000Z
|
2021-09-20T21:15:52.000Z
|
import sys
import json
import time
import requests
if len(sys.argv) < 3:
raise Exception("No endpoint specified. ")
endpoint = sys.argv[1]
headers = {
'Host': sys.argv[2]
}
with open('input.json') as file:
sample_file = json.load(file)
inputs = sample_file["instances"]
# Split inputs into chunks of size 15 and send them to the predict server
print("Sending prediction requests...")
time_before = time.time()
res = requests.post(endpoint, json={"instances": inputs}, headers=headers)
for x in range(0, len(inputs), 15):
query_inputs = inputs[x: x+20]
payload = {"instances": query_inputs}
res = requests.post(endpoint, json=payload, headers=headers)
print(res)
if not res.ok:
res.raise_for_status()
print("TIME TAKEN: ", time.time() - time_before)
print("Last response: ", res.json())
| 25.212121
| 74
| 0.689904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 212
| 0.254808
|
496a50df8aab51c7339b1ff94cb179d8c9744ace
| 1,272
|
py
|
Python
|
sdc/ysdc_dataset_api/utils/serialization.py
|
sty61010/shifts
|
d3bb3086d8f2581f74644585701f4b1db4338483
|
[
"Apache-2.0"
] | 156
|
2021-07-16T08:54:39.000Z
|
2022-03-24T11:49:36.000Z
|
sdc/ysdc_dataset_api/utils/serialization.py
|
sty61010/shifts
|
d3bb3086d8f2581f74644585701f4b1db4338483
|
[
"Apache-2.0"
] | 18
|
2021-07-21T14:02:46.000Z
|
2022-02-26T04:07:12.000Z
|
sdc/ysdc_dataset_api/utils/serialization.py
|
sty61010/shifts
|
d3bb3086d8f2581f74644585701f4b1db4338483
|
[
"Apache-2.0"
] | 41
|
2021-07-21T05:38:07.000Z
|
2022-01-13T15:25:51.000Z
|
import io
import zlib
import numpy as np
def maybe_compress(str, compress):
return zlib.compress(str) if compress else str
def maybe_decompress(str, decompress):
return zlib.decompress(str) if decompress else str
def serialize_numpy(arr: np.ndarray, compress: bool = False) -> str:
"""Serializes numpy array to string with optional zlib compression.
Args:
arr (np.ndarray): Numpy array to serialize.
compress (bool, optional): Whether to compress resulting string with zlib or not.
Defaults to False.
Returns:
str: serialized string
"""
buf = io.BytesIO()
assert isinstance(arr, np.ndarray)
np.save(buf, arr)
result = buf.getvalue()
return maybe_compress(result, compress)
def deserialize_numpy(serialized_string: str, decompress: bool = False) -> np.ndarray:
"""Deserializes numpy array from compressed string.
Args:
serialized_string (str): Serialized numpy array
decompress (bool, optional): Whether to decompress string with zlib before laoding.
Defaults to False.
Returns:
np.ndarray: deserialized numpy array
"""
str = maybe_decompress(serialized_string, decompress)
buf = io.BytesIO(str)
return np.load(buf)
| 27.06383
| 91
| 0.687893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 612
| 0.481132
|
496b0cdd9c9c0a2581d8be6db775211985c0614c
| 278
|
py
|
Python
|
hubspot/discovery/crm/extensions/videoconferencing/discovery.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 117
|
2020-04-06T08:22:53.000Z
|
2022-03-18T03:41:29.000Z
|
hubspot/discovery/crm/extensions/videoconferencing/discovery.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 62
|
2020-04-06T16:21:06.000Z
|
2022-03-17T16:50:44.000Z
|
hubspot/discovery/crm/extensions/videoconferencing/discovery.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 45
|
2020-04-06T16:13:52.000Z
|
2022-03-30T21:33:17.000Z
|
import hubspot.crm.extensions.videoconferencing as api_client
from ....discovery_base import DiscoveryBase
class Discovery(DiscoveryBase):
@property
def settings_api(self) -> api_client.SettingsApi:
return self._configure_api_client(api_client, "SettingsApi")
| 30.888889
| 68
| 0.78777
| 168
| 0.604317
| 0
| 0
| 132
| 0.47482
| 0
| 0
| 13
| 0.046763
|
496b6450d30926a47dd2ad486b0b0f71fa6e56dd
| 10,910
|
py
|
Python
|
train_margin.py
|
youmingdeng/DMLPlayground
|
37070c10278597a4413303061d60d69ce2c4f87e
|
[
"Apache-2.0"
] | 1
|
2021-11-11T16:05:56.000Z
|
2021-11-11T16:05:56.000Z
|
train_margin.py
|
youmingdeng/DMLPlayground
|
37070c10278597a4413303061d60d69ce2c4f87e
|
[
"Apache-2.0"
] | null | null | null |
train_margin.py
|
youmingdeng/DMLPlayground
|
37070c10278597a4413303061d60d69ce2c4f87e
|
[
"Apache-2.0"
] | 1
|
2020-04-01T04:50:36.000Z
|
2020-04-01T04:50:36.000Z
|
from __future__ import division
import logging
import mxnet as mx
import numpy as np
from mxnet import autograd as ag, nd
from mxnet import gluon
from tqdm import tqdm
from common.evaluate import evaluate
from common.parser import TrainingParser
from common.utils import average_results, format_results, get_context, parse_steps, get_lr, append_postfix
from dataset import get_dataset_iterator, get_dataset
from dataset.dataloader import DatasetIterator
from models import get_feature_model
from models.marginmodels import MarginNet, MarginLoss
def parse_args():
parser = TrainingParser(description='Distance metric learning with marginloss and distance-weighted sampling.',
default_logfile='train_margin.log', default_model_prefix='margin_loss_model')
parser.add_argument('--batch-size', type=int, default=125,
help='Number of samples in a batch per compute unit. Default is 125.'
'Must be divisible with batch-k.')
parser.add_argument('--batch-k', type=int, default=5,
help='number of images per class in a batch. default is 5.')
parser.add_argument('--epochs', type=int, default=20,
help='number of training epochs. default is 20.')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate. default is 0.0001.')
parser.add_argument('--lr-beta', type=float, default=0.1,
help='learning rate for the beta in margin based loss. default is 0.1.')
parser.add_argument('--margin', type=float, default=0.2,
help='margin for the margin based loss. default is 0.2.')
parser.add_argument('--beta', type=float, default=1.2,
help='initial value for beta. default is 1.2.')
parser.add_argument('--nu', type=float, default=0.0,
help='regularization parameter for beta. default is 0.0.')
parser.add_argument('--factor', type=float, default=0.5,
help='learning rate schedule factor. default is 0.5.')
parser.add_argument('--steps', type=str, default='12,14,16,18',
help='epochs to update learning rate. default is 12,14,16,18.')
parser.add_argument('--wd', type=float, default=0.00001,
help='weight decay rate. default is 0.00001.')
parser.add_argument('--iteration-per-epoch', type=int, default=200,
help='Number of iteration per epoch. default=200.')
opt = parser.parse_args()
if opt.logfile.lower() != 'none':
logging.basicConfig(filename=append_postfix(opt.logfile, opt.log_postfix), level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
return opt
def validate(net, val_data, ctx, use_threads=True):
"""Test a model."""
outputs = []
labels = []
ctx_cpu = mx.cpu()
for batch in tqdm(val_data, desc='Computing test embeddings'):
data = mx.gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = mx.gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
for x in data:
outputs.append(net(x).as_in_context(ctx_cpu))
labels += [l.as_in_context(ctx_cpu) for l in label]
outputs = mx.nd.concatenate(outputs, axis=0)
labels = mx.nd.concatenate(labels, axis=0)
return evaluate(outputs, labels, val_data._dataset.num_classes(), use_threads=use_threads)
def train(net, beta, opt, train_dataloader, val_dataloader, batch_size, context, run_id):
"""Training function."""
if not opt.skip_pretrain_validation:
validation_results = validate(net, val_dataloader, context, use_threads=opt.num_workers > 0)
for name, val_acc in validation_results:
logging.info('Pre-train validation: %s=%f' % (name, val_acc))
steps = parse_steps(opt.steps, opt.epochs, logging)
opt_options = {'learning_rate': opt.lr, 'wd': opt.wd}
if opt.optimizer == 'sgd':
opt_options['momentum'] = 0.9
if opt.optimizer == 'adam':
opt_options['epsilon'] = 1e-7
trainer = gluon.Trainer(net.collect_params(), opt.optimizer, opt_options, kvstore=opt.kvstore)
train_beta = not isinstance(beta, float)
if train_beta:
# Jointly train class-specific beta
beta.initialize(mx.init.Constant(opt.beta), ctx=context)
trainer_beta = gluon.Trainer(beta.collect_params(), 'sgd',
{'learning_rate': opt.lr_beta, 'momentum': 0.9}, kvstore=opt.kvstore)
loss = MarginLoss(batch_size, opt.batch_k, beta, margin=opt.margin, nu=opt.nu, train_beta=train_beta)
if not opt.disable_hybridize:
loss.hybridize()
best_results = [] # R@1, NMI
for epoch in range(1, opt.epochs + 1):
prev_loss, cumulative_loss = 0.0, 0.0
# Learning rate schedule.
trainer.set_learning_rate(get_lr(opt.lr, epoch, steps, opt.factor))
logging.info('Epoch %d learning rate=%f', epoch, trainer.learning_rate)
if train_beta:
trainer_beta.set_learning_rate(get_lr(opt.lr_beta, epoch, steps, opt.factor))
logging.info('Epoch %d beta learning rate=%f', epoch, trainer_beta.learning_rate)
p_bar = tqdm(train_dataloader, desc='[Run %d/%d] Epoch %d' % (run_id, opt.number_of_runs, epoch),
total=opt.iteration_per_epoch)
for batch in p_bar:
data = gluon.utils.split_and_load(batch[0][0], ctx_list=context, batch_axis=0)
label = gluon.utils.split_and_load(batch[1][0].astype('float32'), ctx_list=context, batch_axis=0)
Ls = []
with ag.record():
for x, y in zip(data, label):
embedings = net(x)
L = loss(embedings, y)
Ls.append(L)
cumulative_loss += nd.mean(L).asscalar()
for L in Ls:
L.backward()
trainer.step(batch[0].shape[1])
if opt.lr_beta > 0.0:
trainer_beta.step(batch[0].shape[1])
p_bar.set_postfix({'loss': cumulative_loss - prev_loss})
prev_loss = cumulative_loss
logging.info('[Epoch %d] training loss=%f' % (epoch, cumulative_loss))
validation_results = validate(net, val_dataloader, context, use_threads=opt.num_workers > 0)
for name, val_acc in validation_results:
logging.info('[Epoch %d] validation: %s=%f' % (epoch, name, val_acc))
if (len(best_results) == 0) or (validation_results[0][1] > best_results[0][1]):
best_results = validation_results
if opt.save_model_prefix.lower() != 'none':
filename = '%s.params' % opt.save_model_prefix
logging.info('Saving %s.' % filename)
net.save_parameters(filename)
logging.info('New best validation: R@1: %f NMI: %f' % (best_results[0][1], best_results[-1][1]))
return best_results
def train_margin(opt):
logging.info(opt)
# Set random seed
mx.random.seed(opt.seed)
np.random.seed(opt.seed)
# Setup computation context
context = get_context(opt.gpus, logging)
# Adjust batch size to each compute context
batch_size = opt.batch_size * len(context)
run_results = []
# Get model
if opt.model == 'inception-bn':
feature_net, feature_params = get_feature_model(opt.model, ctx=context)
feature_net.collect_params().load(feature_params, ctx=context, ignore_extra=True)
data_shape = 224
scale_image_data = False
elif opt.model == 'resnet50_v2':
feature_params = None
feature_net = mx.gluon.model_zoo.vision.resnet50_v2(pretrained=True, ctx=context).features
data_shape = 224
scale_image_data = True
else:
raise RuntimeError('Unsupported model: %s' % opt.model)
net = MarginNet(feature_net, opt.embed_dim)
if opt.model == 'resnet50_v2':
# Use a smaller learning rate for pre-trained convolutional layers.
for v in net.base_net.collect_params().values():
if 'conv' in v.name:
setattr(v, 'lr_mult', 0.01)
# Get data iterators
train_dataset, val_dataset = get_dataset(opt.dataset, opt.data_path, data_shape=data_shape, use_crops=opt.use_crops,
use_aug=True, scale_image_data=scale_image_data)
train_dataiter, _ = get_dataset_iterator(opt.dataset, opt.data_path, batch_k=opt.batch_k, batch_size=batch_size,
data_shape=data_shape, use_crops=opt.use_crops,
scale_image_data=scale_image_data, batchify=False)
train_dataloader = mx.gluon.data.DataLoader(DatasetIterator(train_dataiter, opt.iteration_per_epoch, 'next'),
batch_size=1, shuffle=False, num_workers=opt.num_workers,
last_batch='keep')
val_dataloader = mx.gluon.data.DataLoader(val_dataset, batch_size=opt.batch_size, shuffle=False,
num_workers=opt.num_workers, last_batch='keep')
logging.info('Training with %d classes, validating with %d classes' % (
train_dataset.num_classes(), val_dataset.num_classes()))
# main run loop for multiple training runs
for run in range(1, opt.number_of_runs + 1):
logging.info('Starting run %d/%d' % (run, opt.number_of_runs))
# Re-init embedding layers and reload pretrained layers
if opt.model == 'inception-bn':
net.init(mx.init.Xavier(magnitude=0.2), ctx=context, init_basenet=False)
net.base_net.collect_params().load(feature_params, ctx=context, ignore_extra=True)
elif opt.model == 'resnet50_v2':
net.init(mx.init.Xavier(magnitude=2), ctx=context, init_basenet=False)
net.base_net = mx.gluon.model_zoo.vision.resnet50_v2(pretrained=True, ctx=context).features
else:
raise RuntimeError('Unknown model type: %s' % opt.model)
if not opt.disable_hybridize:
net.hybridize()
if opt.lr_beta > 0.0:
logging.info('Learning beta margin')
beta = mx.gluon.nn.Embedding(train_dataset.num_classes(), 1)
else:
beta = opt.beta
run_result = train(net, beta, opt, train_dataloader, val_dataloader, batch_size, context, run)
run_results.append(run_result)
logging.info('Run %d finished with %f' % (run, run_result[0][1]))
logging.info(
'Average validation of %d runs:\n%s' % (opt.number_of_runs, format_results(average_results(run_results))))
if __name__ == '__main__':
train_margin(parse_args())
| 44.530612
| 120
| 0.634372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,949
| 0.178643
|
496d5dcb74bbf5f2fa198d1e5b24c0ea5fec7ece
| 6,187
|
py
|
Python
|
doc/tools/doc_merge.py
|
N0hbdy/godot
|
d4a222cd9d849a63f0535f70cbf78700bc5c815b
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 39
|
2018-12-17T07:11:37.000Z
|
2021-09-28T10:02:45.000Z
|
doc/tools/doc_merge.py
|
N0hbdy/godot
|
d4a222cd9d849a63f0535f70cbf78700bc5c815b
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 38
|
2021-07-29T01:15:35.000Z
|
2022-03-20T01:01:28.000Z
|
doc/tools/doc_merge.py
|
N0hbdy/godot
|
d4a222cd9d849a63f0535f70cbf78700bc5c815b
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 3
|
2021-09-06T18:28:23.000Z
|
2021-09-11T11:59:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import xml.etree.ElementTree as ET
tree = ET.parse(sys.argv[1])
old_doc = tree.getroot()
tree = ET.parse(sys.argv[2])
new_doc = tree.getroot()
f = file(sys.argv[3], "wb")
tab = 0
old_classes = {}
def write_string(_f, text, newline=True):
for t in range(tab):
_f.write("\t")
_f.write(text)
if (newline):
_f.write("\n")
def escape(ret):
ret = ret.replace("&", "&")
ret = ret.replace("<", ">")
ret = ret.replace(">", "<")
ret = ret.replace("'", "'")
ret = ret.replace("\"", """)
return ret
def inc_tab():
global tab
tab += 1
def dec_tab():
global tab
tab -= 1
write_string(f, '<?xml version="1.0" encoding="UTF-8" ?>')
write_string(f, '<doc version="' + new_doc.attrib["version"] + '">')
def get_tag(node, name):
tag = ""
if (name in node.attrib):
tag = ' ' + name + '="' + escape(node.attrib[name]) + '" '
return tag
def find_method_descr(old_class, name):
methods = old_class.find("methods")
if(methods != None and len(list(methods)) > 0):
for m in list(methods):
if (m.attrib["name"] == name):
description = m.find("description")
if (description != None and description.text.strip() != ""):
return description.text
return None
def find_signal_descr(old_class, name):
signals = old_class.find("signals")
if(signals != None and len(list(signals)) > 0):
for m in list(signals):
if (m.attrib["name"] == name):
description = m.find("description")
if (description != None and description.text.strip() != ""):
return description.text
return None
def find_constant_descr(old_class, name):
if (old_class is None):
return None
constants = old_class.find("constants")
if(constants != None and len(list(constants)) > 0):
for m in list(constants):
if (m.attrib["name"] == name):
if (m.text.strip() != ""):
return m.text
return None
def write_class(c):
class_name = c.attrib["name"]
print("Parsing Class: " + class_name)
if (class_name in old_classes):
old_class = old_classes[class_name]
else:
old_class = None
category = get_tag(c, "category")
inherits = get_tag(c, "inherits")
write_string(f, '<class name="' + class_name + '" ' + category + inherits + '>')
inc_tab()
write_string(f, "<brief_description>")
if (old_class != None):
old_brief_descr = old_class.find("brief_description")
if (old_brief_descr != None):
write_string(f, escape(old_brief_descr.text.strip()))
write_string(f, "</brief_description>")
write_string(f, "<description>")
if (old_class != None):
old_descr = old_class.find("description")
if (old_descr != None):
write_string(f, escape(old_descr.text.strip()))
write_string(f, "</description>")
methods = c.find("methods")
if(methods != None and len(list(methods)) > 0):
write_string(f, "<methods>")
inc_tab()
for m in list(methods):
qualifiers = get_tag(m, "qualifiers")
write_string(f, '<method name="' + escape(m.attrib["name"]) + '" ' + qualifiers + '>')
inc_tab()
for a in list(m):
if (a.tag == "return"):
typ = get_tag(a, "type")
write_string(f, '<return' + typ + '>')
write_string(f, '</return>')
elif (a.tag == "argument"):
default = get_tag(a, "default")
write_string(f, '<argument index="' + a.attrib["index"] + '" name="' + escape(a.attrib["name"]) + '" type="' + a.attrib["type"] + '"' + default + '>')
write_string(f, '</argument>')
write_string(f, '<description>')
if (old_class != None):
old_method_descr = find_method_descr(old_class, m.attrib["name"])
if (old_method_descr):
write_string(f, escape(escape(old_method_descr.strip())))
write_string(f, '</description>')
dec_tab()
write_string(f, "</method>")
dec_tab()
write_string(f, "</methods>")
signals = c.find("signals")
if(signals != None and len(list(signals)) > 0):
write_string(f, "<signals>")
inc_tab()
for m in list(signals):
write_string(f, '<signal name="' + escape(m.attrib["name"]) + '">')
inc_tab()
for a in list(m):
if (a.tag == "argument"):
write_string(f, '<argument index="' + a.attrib["index"] + '" name="' + escape(a.attrib["name"]) + '" type="' + a.attrib["type"] + '">')
write_string(f, '</argument>')
write_string(f, '<description>')
if (old_class != None):
old_signal_descr = find_signal_descr(old_class, m.attrib["name"])
if (old_signal_descr):
write_string(f, escape(old_signal_descr.strip()))
write_string(f, '</description>')
dec_tab()
write_string(f, "</signal>")
dec_tab()
write_string(f, "</signals>")
constants = c.find("constants")
if(constants != None and len(list(constants)) > 0):
write_string(f, "<constants>")
inc_tab()
for m in list(constants):
write_string(f, '<constant name="' + escape(m.attrib["name"]) + '" value="' + m.attrib["value"] + '">')
old_constant_descr = find_constant_descr(old_class, m.attrib["name"])
if (old_constant_descr):
write_string(f, escape(old_constant_descr.strip()))
write_string(f, "</constant>")
dec_tab()
write_string(f, "</constants>")
dec_tab()
write_string(f, "</class>")
for c in list(old_doc):
old_classes[c.attrib["name"]] = c
for c in list(new_doc):
write_class(c)
write_string(f, '</doc>\n')
| 28.643519
| 170
| 0.537902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,014
| 0.163892
|
496d668dab143daad188848fbd26c751e580633a
| 357
|
py
|
Python
|
contentcuration/contentcuration/migrations/0059_merge.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 1
|
2019-03-30T18:14:25.000Z
|
2019-03-30T18:14:25.000Z
|
contentcuration/contentcuration/migrations/0059_merge.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 4
|
2016-05-06T17:19:30.000Z
|
2019-03-15T01:51:24.000Z
|
contentcuration/contentcuration/migrations/0059_merge.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 4
|
2016-10-18T22:49:08.000Z
|
2019-09-17T11:20:51.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-03-29 19:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0058_auto_20170223_1636'),
('contentcuration', '0057_assessmentitem_deleted'),
]
operations = [
]
| 21
| 59
| 0.680672
| 208
| 0.582633
| 0
| 0
| 0
| 0
| 0
| 0
| 158
| 0.442577
|
496df5ac2b816d0d93ed95d0c8119c0af62b55d9
| 91
|
py
|
Python
|
controller/ORCA_CLEAN/execute.py
|
nestorcalvo/Backend-AudioClean
|
7edb373c518193bc5643e9524d78d9ba32163b3f
|
[
"MIT"
] | null | null | null |
controller/ORCA_CLEAN/execute.py
|
nestorcalvo/Backend-AudioClean
|
7edb373c518193bc5643e9524d78d9ba32163b3f
|
[
"MIT"
] | null | null | null |
controller/ORCA_CLEAN/execute.py
|
nestorcalvo/Backend-AudioClean
|
7edb373c518193bc5643e9524d78d9ba32163b3f
|
[
"MIT"
] | null | null | null |
from predict import predict
if __name__ == "__main__":
# predict()
print("A ")
| 15.166667
| 27
| 0.604396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.307692
|
496f6fa945313ae8eb812d0256476b19fbb908f6
| 174
|
py
|
Python
|
fperms_iscore/main.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | 1
|
2019-10-07T12:40:38.000Z
|
2019-10-07T12:40:38.000Z
|
fperms_iscore/main.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | 3
|
2019-08-09T14:10:21.000Z
|
2022-02-01T13:48:01.000Z
|
fperms_iscore/main.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | null | null | null |
from is_core.main import DjangoUiRestCore
from fperms_iscore.mixins import PermCoreMixin
class PermDjangoUiRestCore(PermCoreMixin, DjangoUiRestCore):
abstract = True
| 19.333333
| 60
| 0.833333
| 81
| 0.465517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
496f9fb09ed8ca073a1b323b69ca4902f734d230
| 1,269
|
py
|
Python
|
distanceCalc.py
|
jmoehler/CityDistance
|
0a7eb898db8ea0dbada43239652ae4aad935dda3
|
[
"MIT"
] | null | null | null |
distanceCalc.py
|
jmoehler/CityDistance
|
0a7eb898db8ea0dbada43239652ae4aad935dda3
|
[
"MIT"
] | null | null | null |
distanceCalc.py
|
jmoehler/CityDistance
|
0a7eb898db8ea0dbada43239652ae4aad935dda3
|
[
"MIT"
] | null | null | null |
from math import cos, acos, pi, sqrt, sin
class City:
def __init__(self, name, lat, lon, temp):
self.name = name
self.lat = lat
self.lon = lon
self.temp = temp
def describe(self):
print("Die Koordinaten von %s sind %f Lat und %f Lon. Die Temperatur beträgt %.1f Grad Celcius." %(self.name, self.lat, self.lon, self.temp))
def info(self):
print("Die Koordinaten von %s sind %.1f Lat und %.1f Lon." %(self.name, self.lat, self.lon))
def __str__(self):
return "Die Koordinaten von %s sind %.1f Lat und %.1f Lon." %(self.name, self.lat, self.lon)
def __repr__(self):
return self.__str__()
def diffGamma(alpha, beta):
# differenz von winkel alpha zu Winkel beta
dGamma = alpha - beta
dGammaRad = pi / 180 * dGamma
# Erdradius in km
r = 6378
# länge auf lat berechnen
return r*sqrt(2*(1-cos(dGammaRad)))
def distanceBase(dilat, dilon):
# insgesammte länge berechnen
return sqrt(dilon**2 + dilat**2)
def distance(city1, city2):
dilat = diffGamma(city1.lat, city2.lat)
dilon = diffGamma(city1.lon, city2.lon)
return distanceBase(dilat, dilon)
def tempDiff(city1, city2):
return abs(city1.temp - city2.temp)
| 34.297297
| 149
| 0.624901
| 627
| 0.492925
| 0
| 0
| 0
| 0
| 0
| 0
| 326
| 0.256289
|
496fe4328017b0a5588279aa7e57db6731bb4964
| 95
|
py
|
Python
|
zoo/auditing/apps.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | 90
|
2018-11-20T10:58:24.000Z
|
2022-02-19T16:12:46.000Z
|
zoo/auditing/apps.py
|
uliana291/the-zoo
|
a15a4162c39553abe91224f4feff5d3b66f9413e
|
[
"MIT"
] | 348
|
2018-11-21T09:22:31.000Z
|
2021-11-03T13:45:08.000Z
|
zoo/auditing/apps.py
|
aexvir/the-zoo
|
7816afb9a0a26c6058b030b4a987c73e952d92bd
|
[
"MIT"
] | 11
|
2018-12-08T18:42:07.000Z
|
2021-02-21T06:27:58.000Z
|
from django.apps import AppConfig
class AuditingConfig(AppConfig):
name = "zoo.auditing"
| 15.833333
| 33
| 0.757895
| 58
| 0.610526
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.147368
|
49706257061fd5cb42e071e2e21ada1c26eefe8c
| 593
|
py
|
Python
|
graviteeio_cli/commands/apim/apis/definition.py
|
Shaker5191/graviteeio-cli
|
318748bb8e631743ea58afaee24333249ca3d227
|
[
"Apache-2.0"
] | null | null | null |
graviteeio_cli/commands/apim/apis/definition.py
|
Shaker5191/graviteeio-cli
|
318748bb8e631743ea58afaee24333249ca3d227
|
[
"Apache-2.0"
] | null | null | null |
graviteeio_cli/commands/apim/apis/definition.py
|
Shaker5191/graviteeio-cli
|
318748bb8e631743ea58afaee24333249ca3d227
|
[
"Apache-2.0"
] | null | null | null |
import click
from .definition_group.apply import apply
from .definition_group.diff import diff
from .definition_group.generate import generate
from .definition_group.create import create
# from .definition_group.lint import lint
@click.group(short_help="Manage API definition configuration")
@click.pass_context
def definition(ctx):
"""This group allow handling API definition commands from templating and value files"""
pass
definition.add_command(apply)
definition.add_command(diff)
definition.add_command(create)
definition.add_command(generate)
# definition.add_command(lint)
| 26.954545
| 91
| 0.819562
| 0
| 0
| 0
| 0
| 204
| 0.344013
| 0
| 0
| 195
| 0.328836
|
497231bff7e8e9d345553a23f55adb1bd3c5a759
| 1,761
|
py
|
Python
|
graphx.py
|
clever-username/baseball-card-inventory
|
9940ba746072892961b7ade586e63f7deb26d2e6
|
[
"MIT"
] | 1
|
2021-05-18T21:32:43.000Z
|
2021-05-18T21:32:43.000Z
|
graphx.py
|
clever-username/baseball-card-inventory
|
9940ba746072892961b7ade586e63f7deb26d2e6
|
[
"MIT"
] | null | null | null |
graphx.py
|
clever-username/baseball-card-inventory
|
9940ba746072892961b7ade586e63f7deb26d2e6
|
[
"MIT"
] | 2
|
2015-05-18T14:52:01.000Z
|
2015-05-19T18:21:51.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple color picker program."""
BANNER = """ .::::::::::::::::::::::::::::::::::::::::::::::::.
.. .... ..
.. ...... ... ..
|S .F.Cards.. F.G ia
nt sS.F.Gi||BASE BAL LB
AS EBALLBASEBA||S .F. Gi
an tsS.F.Giants S. F.
Gi ||BASEBALLBA SE BA
LL BASEBA||N.Y.Yankees.F .Gia nt
sS .F.Gi||BASEBALLBASEBALLBASEB A|
|S .F.MetsS.F.GiantsS.F.Gi||BASE BA
LL BA SEBALLBASEBA||S.T.L.Cards.Reds S.
F. Gi||B ASEBALLBASEBALLBASEBA||S.F.GiantsS.F .G
ia nt sS.F.Gi||BASEBALLBASEBALLBASEBA||S.F .G
ia ntsT.B.Rayss.F.Gi||BASEBALL BA
S EBALLBASEBA|'`''''''''''' S
:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
____ ____ ____ _ ____ ____ ____
| __ )| __ ) / ___| / \ | _ \| _ \/ ___|
| _ \| _ \ _____| | / _ \ | |_) | | | \___ \
| |_) | |_) |_____| |___ / ___ \| _ <| |_| |___) |
|____/|____/ \____/_/ \_|_| \_|____/|____/
"""
| 60.724138
| 76
| 0.244747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,747
| 0.99205
|
4972f556700ff0374ba0d495d120ef3679c33357
| 1,176
|
py
|
Python
|
src/examples/colors.py
|
schneiderfelipe/kay
|
a7bf69e3bbd1b845286667b20eb1fba88faf9ea4
|
[
"MIT"
] | 14
|
2021-11-18T14:56:48.000Z
|
2022-03-26T08:02:13.000Z
|
src/examples/colors.py
|
getcuia/cuia
|
685d258b3cb366d40100e6a563661b307aef5ae3
|
[
"MIT"
] | 8
|
2021-11-25T13:47:12.000Z
|
2022-03-25T12:01:09.000Z
|
src/examples/colors.py
|
schneiderfelipe/kay
|
a7bf69e3bbd1b845286667b20eb1fba88faf9ea4
|
[
"MIT"
] | null | null | null |
"""An example of using colors module."""
import asyncio
from typing import Text
import cuia
class ColorfulExample(cuia.Store):
"""A store class that shows how to use colors."""
def __str__(self) -> Text:
"""Show me some colors."""
res = "Colors (backgrounds):\n\n"
res += "\033[40mBlack!\n"
res += "\033[41mRed!\n"
res += "\033[42mGreen!\n"
res += "\033[43mYellow!\n"
res += "\033[44mBlue!\n"
res += "\033[45mMagenta!\n"
res += "\033[46mCyan!\n"
res += "\033[47mWhite!\n"
res += "\033[m\n"
res += "Colors (foregrounds):\n\n"
res += "\033[38;5;240mBlack!\n" # some medium shade of gray
res += "\033[91mRed!\n"
res += "\033[92mGreen!\n"
res += "\033[93mYellow!\n"
res += "\033[38;2;100;149;237mBlue!\n" # cornflower blue
res += "\033[95mMagenta!\n"
res += "\033[96mCyan!\n"
res += "\033[97mWhite!\n"
return res
async def main() -> None:
"""Run the application."""
program = cuia.Program(ColorfulExample())
await program.start()
if __name__ == "__main__":
asyncio.run(main())
| 26.727273
| 68
| 0.536565
| 895
| 0.761054
| 0
| 0
| 0
| 0
| 128
| 0.108844
| 565
| 0.480442
|
49733958b3756fb3220a69c0ceb6f8c4a2dd5ef8
| 2,571
|
py
|
Python
|
app/voxity/channel.py
|
voxity/vox-ui-api
|
9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5
|
[
"MIT"
] | null | null | null |
app/voxity/channel.py
|
voxity/vox-ui-api
|
9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5
|
[
"MIT"
] | null | null | null |
app/voxity/channel.py
|
voxity/vox-ui-api
|
9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from flask import current_app
from . import connectors, check_respons, pager_dict
from .objects.channel import Channel
def get_base_url():
return current_app.config['BASE_URL'] + '/channels/'
def create(exten):
con = connectors()
if con is not None:
return con.post(
get_base_url(),
data={'exten': exten}
).json()
return None
def get(ret_object=False, **kwargs):
"""
:retyp: list
:return: device list
"""
con = connectors()
if con:
resp = con.get(get_base_url(), params=kwargs)
if check_respons(resp):
ret = resp.json().get('result', [])
ret = Channel.litst_obj_from_list(ret, **kwargs)
if not ret_object:
r = []
for c in ret:
r.append(c.to_dict())
return r
else:
return ret
return None
def get_local_filter(ret_object=False, **kwargs):
ret = list()
if not kwargs:
return get(ret_object=ret_object)
else:
channels = get()
for c in channels:
conditions = list()
for k in kwargs.keys():
if (kwargs[k].count('*') == 2 and
kwargs[k][0] == '*' and
kwargs[k][-1] == '*' and
len(kwargs[k]) > 2):
conditions.append(str(c[k]).lower() in str(kwargs[k]).lower())
else:
conditions.append(str(c[k]).lower() == str(kwargs[k]).lower())
if any(conditions) and ret_object:
c = Channel(**c)
ret.append(c)
elif any(conditions) and not ret_object:
ret.append(c)
return ret
def get_id(d_id, ret_object=False):
"""
:param str d_ind: device id
:retype: dict|Channel
:return: one device
"""
con = connectors()
if con:
resp = con.get(get_base_url() + d_id)
if check_respons(resp):
ret = resp.json().get('data', [])
if not ret_object:
return ret
else:
return Channel(**ret)
return None
def get_log(**kwargs):
con = connectors()
if con is not None:
resp = connectors().get(
current_app.config['BASE_URL'] + '/calls/logs',
params=kwargs
)
data = {}
data['list'] = resp.json()['result']
data['pager'] = pager_dict(resp.headers)
return data
return None
| 25.205882
| 78
| 0.525088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 265
| 0.103073
|
49736e792f64fdf62a5e05e4cdd1a7fca2758ba4
| 1,637
|
py
|
Python
|
chapter 2 - linked list/2.7.py
|
anuraagdjain/cracking_the_coding_interview
|
09083b4c464f41d5752c7ca3d27ab7c992793619
|
[
"MIT"
] | null | null | null |
chapter 2 - linked list/2.7.py
|
anuraagdjain/cracking_the_coding_interview
|
09083b4c464f41d5752c7ca3d27ab7c992793619
|
[
"MIT"
] | null | null | null |
chapter 2 - linked list/2.7.py
|
anuraagdjain/cracking_the_coding_interview
|
09083b4c464f41d5752c7ca3d27ab7c992793619
|
[
"MIT"
] | null | null | null |
from linkedlist import LinkedList
from node import Node
class IRes:
def __init__(self, result, node):
self.result = result
self.node = node
def print_nodes(n):
while(n != None):
print(n.data)
n = n.next
def tail_and_size(n):
ctr = 0
while n.next:
ctr += 1
n = n.next
return ctr, n
def remove_start(n, limit):
for i in range(limit):
n = n.next
return n
def intersection(a, b):
a_res, b_res = tail_and_size(a), tail_and_size(b)
# if tail are different, no need to compare further
if a_res[1] != b_res[1]:
return IRes(False, None)
list_diff = abs(a_res[0]-b_res[0])
# remove start nodes from longer list to ensure both are of same size
if a_res[0] > b_res[0]:
a = remove_start(a, list_diff)
else:
b = remove_start(b, list_diff)
while a != None and b != None:
if a == b:
return IRes(True, a)
a = a.next
b = b.next
return IRes(False, None)
if __name__ == "__main__":
a = Node(1)
b = Node(2)
c = Node(7)
d = Node(6)
e = Node(4)
f = Node(9)
g = Node(5)
h = Node(1)
i = Node(3)
x = Node(1)
y = Node(2)
z = Node(7)
z.next = y
y.next = x
i.next = h
h.next = g
g.next = f
f.next = c # with intersection
# f.next = z # without intersection
e.next = d
d.next = c
c.next = b
b.next = a
result = intersection(i, e)
if result.result:
print("Intersection found at node instance: " + str(result.node))
else:
print("No intersection")
| 18.602273
| 73
| 0.542456
| 103
| 0.06292
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.14661
|
4973e2d2ceab6b66fabf235caf79153e33be991a
| 2,307
|
py
|
Python
|
app_core/api/comments.py
|
Great-Li-Xin/LiCMS
|
9d7f78647766b49a325123f4b5ad59d6a1808eb7
|
[
"MIT"
] | 9
|
2020-02-18T01:50:17.000Z
|
2020-05-26T09:25:41.000Z
|
app_core/api/comments.py
|
realJustinLee/LiCMS
|
9d7f78647766b49a325123f4b5ad59d6a1808eb7
|
[
"MIT"
] | 1
|
2021-04-19T15:26:20.000Z
|
2021-04-19T15:26:20.000Z
|
app_core/api/comments.py
|
Great-Li-Xin/LiCMS
|
9d7f78647766b49a325123f4b5ad59d6a1808eb7
|
[
"MIT"
] | 5
|
2020-02-18T01:50:19.000Z
|
2020-05-26T09:25:45.000Z
|
from flask import jsonify, request, g, url_for, current_app
from app_core import db
from app_core.api import api
from app_core.api.decorators import permission_required
from app_core.models import Post, Permission, Comment
@api.route('/comments/')
def get_comments():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['LICMS_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
_prev = None
if pagination.has_prev:
_prev = url_for('api.get_comments', page=page - 1)
_next = None
if pagination.has_next:
_next = url_for('api.get_comments', page=page + 1)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': _prev,
'next': _next,
'count': pagination.total
})
@api.route('/comments/<int:comment_id>')
def get_comment(comment_id):
comment = Comment.query.get_or_404(comment_id)
return jsonify(comment.to_json())
@api.route('/posts/<int:post_id>/comments/')
def get_post_comments(post_id):
post = Post.query.get_or_404(post_id)
page = request.args.get('page', 1, type=int)
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['LICMS_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
_prev = None
if pagination.has_prev:
_prev = url_for('api.get_post_comments', post_id=post_id, page=page - 1)
_next = None
if pagination.has_next:
_next = url_for('api.get_post_comments', post_id=post_id, page=page + 1)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': _prev,
'next': _next,
'count': pagination.total
})
@api.route('/posts/<int:post_id>/comments/', methods=['POST'])
@permission_required(Permission.COMMENT)
def new_post_comment(post_id):
post = Post.query.get_or_404(post_id)
comment = Comment.from_json(request.json)
comment.author = g.current_user
comment.post = post
db.session.add(comment)
db.session.commit()
return jsonify(comment.to_json()), 201, {'Location': url_for('api.get_comment', comment_id=comment.id)}
| 33.926471
| 107
| 0.686173
| 0
| 0
| 0
| 0
| 2,071
| 0.897703
| 0
| 0
| 339
| 0.146944
|
4974d4d303e4a516e97419ba5b4f79eb5a463128
| 2,557
|
py
|
Python
|
ipyhop/state.py
|
YashBansod/IPyHOP
|
f3b75b420e470c693606a67cc70bdcb24eccda62
|
[
"BSD-3-Clause"
] | null | null | null |
ipyhop/state.py
|
YashBansod/IPyHOP
|
f3b75b420e470c693606a67cc70bdcb24eccda62
|
[
"BSD-3-Clause"
] | null | null | null |
ipyhop/state.py
|
YashBansod/IPyHOP
|
f3b75b420e470c693606a67cc70bdcb24eccda62
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
File Description: File used for definition of State Class.
"""
# ****************************************** Libraries to be imported ****************************************** #
from copy import deepcopy
# ****************************************** Class Declaration Start ****************************************** #
class State(object):
"""
A state is just a collection of variable bindings.
* state = State('foo') tells IPyHOP to create an empty state object named 'foo'.
To put variables and values into it, you should do assignments such as foo.var1 = val1
"""
def __init__(self, name: str):
self.__name__ = name
# ****************************** Class Method Declaration ****************************************** #
def __str__(self):
if self:
var_str = "\r{state_name}.{var_name} = {var_value}\n"
state_str = ""
for name, val in self.__dict__.items():
if name != "__name__":
_str = var_str.format(state_name=self.__name__, var_name=name, var_value=val)
_str = '\n\t\t'.join(_str[i:i+120] for i in range(0, len(_str), 120))
state_str += _str
return state_str[:-1]
else:
return "False"
# ****************************** Class Method Declaration ****************************************** #
def __repr__(self):
return str(self.__class__) + ", " + self.__name__
# ****************************** Class Method Declaration ****************************************** #
def update(self, state):
self.__dict__.update(state.__dict__)
return self
# ****************************** Class Method Declaration ****************************************** #
def copy(self):
return deepcopy(self)
# ****************************************** Class Declaration End ****************************************** #
# ****************************************** Demo / Test Routine ****************************************** #
if __name__ == '__main__':
print("Test instantiation of State class ...")
test_state = State('test_state')
test_state.test_var_1 = {'key1': 'val1'}
test_state.test_var_2 = {'key1': 0}
test_state.test_var_3 = {'key2': {'key3': 5}, 'key3': {'key2': 5}}
print(test_state)
"""
Author(s): Yash Bansod
Repository: https://github.com/YashBansod/IPyHOP
"""
| 39.953125
| 120
| 0.431756
| 1,578
| 0.617129
| 0
| 0
| 0
| 0
| 0
| 0
| 1,536
| 0.600704
|
4974d677e63b39744893c4f6fa71c6ce00ac7913
| 2,240
|
py
|
Python
|
ckanext/scheming/logic.py
|
vrk-kpa/ckanext-scheming
|
b82e20e04acdc4a71163675f843ac9be74f29d41
|
[
"MIT"
] | null | null | null |
ckanext/scheming/logic.py
|
vrk-kpa/ckanext-scheming
|
b82e20e04acdc4a71163675f843ac9be74f29d41
|
[
"MIT"
] | null | null | null |
ckanext/scheming/logic.py
|
vrk-kpa/ckanext-scheming
|
b82e20e04acdc4a71163675f843ac9be74f29d41
|
[
"MIT"
] | 1
|
2021-12-15T12:50:40.000Z
|
2021-12-15T12:50:40.000Z
|
from ckantoolkit import get_or_bust, side_effect_free, ObjectNotFound
from ckanext.scheming.helpers import (
scheming_dataset_schemas, scheming_get_dataset_schema,
scheming_group_schemas, scheming_get_group_schema,
scheming_organization_schemas, scheming_get_organization_schema,
)
@side_effect_free
def scheming_dataset_schema_list(context, data_dict):
'''
Return a list of dataset types customized with the scheming extension
'''
return list(scheming_dataset_schemas())
@side_effect_free
def scheming_dataset_schema_show(context, data_dict):
'''
Return the scheming schema for a given dataset type
:param type: the dataset type
:param expanded: True to expand presets (default)
'''
t = get_or_bust(data_dict, 'type')
expanded = data_dict.get('expanded', True)
s = scheming_get_dataset_schema(t, expanded)
if s is None:
raise ObjectNotFound()
return s
@side_effect_free
def scheming_group_schema_list(context, data_dict):
'''
Return a list of group types customized with the scheming extension
'''
return list(scheming_group_schemas())
@side_effect_free
def scheming_group_schema_show(context, data_dict):
'''
Return the scheming schema for a given group type
:param type: the group type
:param expanded: True to expand presets (default)
'''
t = get_or_bust(data_dict, 'type')
expanded = data_dict.get('expanded', True)
s = scheming_get_group_schema(t, expanded)
if s is None:
raise ObjectNotFound()
return s
@side_effect_free
def scheming_organization_schema_list(context, data_dict):
'''
Return a list of organization types customized with the scheming extension
'''
return list(scheming_organization_schemas())
@side_effect_free
def scheming_organization_schema_show(context, data_dict):
'''
Return the scheming schema for a given organization type
:param type: the organization type
:param expanded: True to expand presets (default)
'''
t = get_or_bust(data_dict, 'type')
expanded = data_dict.get('expanded', True)
s = scheming_get_organization_schema(t, expanded)
if s is None:
raise ObjectNotFound()
return s
| 28
| 78
| 0.729018
| 0
| 0
| 0
| 0
| 1,923
| 0.858482
| 0
| 0
| 780
| 0.348214
|
49755e37e2029b777679857be7a2f1b70a206d0d
| 2,700
|
py
|
Python
|
omnithinker/api/nytimes.py
|
stuycs-softdev-fall-2013/proj2-pd6-04-omnithinker
|
53bf397ce2f67e7d5c5689486ab75475e99b0eba
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2022-01-18T02:03:15.000Z
|
2022-01-18T02:03:15.000Z
|
omnithinker/api/nytimes.py
|
stuycs-softdev-fall-2013/proj2-pd6-04-omnithinker
|
53bf397ce2f67e7d5c5689486ab75475e99b0eba
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
omnithinker/api/nytimes.py
|
stuycs-softdev-fall-2013/proj2-pd6-04-omnithinker
|
53bf397ce2f67e7d5c5689486ab75475e99b0eba
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import json
from urllib import urlopen
# http://api.nytimes.com/svc/search/v2/articlesearch.json?fq=Obama&FACET_FIELD=day_of_week&BEGIN_DATE=19000101
# &API-KEY=5772CD9A42F195C96DA0E930A7182688:14:68439177
# The original link is above. What happens is because we don't specify an end date, the panda article, which was
# coincidentally published today, becomes the first article that we see and gives us keywords like zoo.
# If we add an end date before then, then we can filter it out.
def ReturnRelatedTopics(Topic):
NYT_API_URL = 'http://api.nytimes.com/svc/search/v2/articlesearch'
API_KEY = "5772CD9A42F195C96DA0E930A7182688:14:68439177"
FORMAT = "json"
FQ = str(Topic)
FACET_FIELD = "day_of_week"
BEGIN_DATE = str(19000101)
END_DATE = str(20131208)
url = ("%s.%s?fq=%s&FACET_FIELD=%s&BEGIN_DATE=%s&END_DATE=%s&API-KEY=%s") % (NYT_API_URL, FORMAT, FQ, FACET_FIELD, BEGIN_DATE, END_DATE, API_KEY)
response = urlopen(url)
Json_Data = json.loads(response.read())
RELTOPICS = list()
for y in Json_Data["response"]["docs"]:
for x in y:
if x == "keywords":
for a in y[x]:
RELTOPICS.append(a["value"])
RELTOPICS.pop(0)
RELTOPICS.pop(0)
RELTOPICS.pop(0)
return RELTOPICS
class Nytimes():
def __init__(self, Topic):
NYT_API_URL = 'http://api.nytimes.com/svc/search/v2/articlesearch'
API_KEY = "5772CD9A42F195C96DA0E930A7182688:14:68439177"
FORMAT = "json"
FQ = str(Topic)
FACET_FIELD = "day_of_week"
BEGIN_DATE = str(19000101)
END_DATE = str(20131208)
url = ("%s.%s?fq=%s&FACET_FIELD=%s&BEGIN_DATE=%s&END_DATE=%s&API-KEY=%s") % (NYT_API_URL, FORMAT, FQ, FACET_FIELD, BEGIN_DATE, END_DATE, API_KEY)
response = urlopen(url)
self.Json_Data = json.loads(response.read())
URL = list()
TITLE = list()
SNIPPET = list()
Counter = 0
for x in self.Json_Data["response"]["docs"]:
#print x
URL.append(x["web_url"])
TITLE.append(x["headline"]["main"])
SNIPPET.append(x["snippet"])
#print(URL)
#print(TITLE)
#print(SNIPPET)
self.Data = zip(URL, TITLE, SNIPPET)
self.counter = 0
#print(Data)
def getArticle(self):
try:
self.counter += 1
return self.Data[self.counter - 1]
except:
return list()
#End of class
if __name__ == '__main__':
#FindArticles("Obama")
print ReturnRelatedTopics("airplane")
| 35.526316
| 153
| 0.605185
| 1,309
| 0.484815
| 0
| 0
| 0
| 0
| 0
| 0
| 1,021
| 0.378148
|
4975838c1788d4788a4a9397bb1062a6a910a29e
| 694
|
py
|
Python
|
tests/test_pydantic_integration.py
|
bsnacks000/yearmonth
|
c6a6084931e6cc4696de5f8a7f8e48ceca83b944
|
[
"MIT"
] | null | null | null |
tests/test_pydantic_integration.py
|
bsnacks000/yearmonth
|
c6a6084931e6cc4696de5f8a7f8e48ceca83b944
|
[
"MIT"
] | null | null | null |
tests/test_pydantic_integration.py
|
bsnacks000/yearmonth
|
c6a6084931e6cc4696de5f8a7f8e48ceca83b944
|
[
"MIT"
] | null | null | null |
from typing import List
from yearmonth.yearmonth import YearMonth
import pydantic
class MyModel(pydantic.BaseModel):
ym: YearMonth
def test_pydantic_validators():
MyModel(ym=(2021, 1))
MyModel(ym='2021-01')
MyModel(ym=('2021', '01'))
def test_pydantic_schema():
schema = MyModel.schema()
check_examples = ['2021-01', (2021, 1)]
check_string = 'ISO 8601 compliant reduced precision calendar date'
assert check_examples == schema['properties']['ym']['examples']
assert check_string == schema['properties']['ym']['description']
def test_pydantic_json():
m = MyModel(ym='2021-01')
assert m.json() == '{"ym": {"year": 2021, "month": 1}}'
| 22.387097
| 71
| 0.665706
| 52
| 0.074928
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.259366
|
4975be83811ebc74df1baade17e5a1895d1cf649
| 353
|
py
|
Python
|
C_D_Playlist.py
|
fairoz-ahmed/Casper_Player
|
f71a26002907e474a9274771565ce781beddcca4
|
[
"MIT"
] | null | null | null |
C_D_Playlist.py
|
fairoz-ahmed/Casper_Player
|
f71a26002907e474a9274771565ce781beddcca4
|
[
"MIT"
] | null | null | null |
C_D_Playlist.py
|
fairoz-ahmed/Casper_Player
|
f71a26002907e474a9274771565ce781beddcca4
|
[
"MIT"
] | null | null | null |
import tkinter.messagebox
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import threading
from pygame import mixer
from mutagen.mp3 import MP3
import os
import easygui
import time
import playlist_window as pw
import Main as main
#from PIL import ImageTk,Image
def redirect(path):
main.play_music(path)
| 20.764706
| 31
| 0.78187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.087819
|
49777977c495be3e64d10459c0324e75b00b5f3b
| 569
|
py
|
Python
|
docker-image/render-template.py
|
osism/generics
|
2dd914f2338c2d60d1595d7cdc4db0c107a9fb47
|
[
"Apache-2.0"
] | null | null | null |
docker-image/render-template.py
|
osism/generics
|
2dd914f2338c2d60d1595d7cdc4db0c107a9fb47
|
[
"Apache-2.0"
] | 3
|
2020-12-10T09:57:02.000Z
|
2020-12-10T09:57:17.000Z
|
docker-image/render-template.py
|
osism/travis
|
2dd914f2338c2d60d1595d7cdc4db0c107a9fb47
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import jinja2
import yaml
with open(".information.yml") as fp:
information = yaml.safe_load(fp)
loader = jinja2.FileSystemLoader(searchpath="")
environment = jinja2.Environment(loader=loader, keep_trailing_newline=True)
template = environment.get_template(sys.argv[1])
result = template.render({
"docker_image_name": information.get("docker_image_name", "NONE"),
"readme_note": information.get("readme_note", None),
"versions": information.get("versions", ["latest"])
})
with open(sys.argv[1], "w+") as fp:
fp.write(result)
| 27.095238
| 75
| 0.72935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.214411
|
49788254641401f0ac3bea81c52abecf9425c9b7
| 58
|
py
|
Python
|
test/__init__.py
|
stungkit/tfidf_matcher
|
24182504d21f1eb978839b700f1c402c6288df2f
|
[
"MIT"
] | 13
|
2020-02-24T18:29:15.000Z
|
2021-12-28T09:41:35.000Z
|
test/__init__.py
|
stungkit/tfidf_matcher
|
24182504d21f1eb978839b700f1c402c6288df2f
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
stungkit/tfidf_matcher
|
24182504d21f1eb978839b700f1c402c6288df2f
|
[
"MIT"
] | 3
|
2020-07-21T04:32:45.000Z
|
2021-10-21T11:00:56.000Z
|
# AUTHOR: Louis Tsiattalou
# DESCRIPTION: Init for Tests.
| 19.333333
| 30
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.965517
|
4978db654876ffc9e3f0801f73bab29baba94038
| 29,541
|
py
|
Python
|
isitek.py
|
will-bainbridge/ISITEK
|
53e90e0511bbd7cd08614b943c1286c56adbee5e
|
[
"MIT"
] | 3
|
2018-06-26T15:04:46.000Z
|
2019-09-14T09:23:44.000Z
|
isitek.py
|
will-bainbridge/ISITEK
|
53e90e0511bbd7cd08614b943c1286c56adbee5e
|
[
"MIT"
] | null | null | null |
isitek.py
|
will-bainbridge/ISITEK
|
53e90e0511bbd7cd08614b943c1286c56adbee5e
|
[
"MIT"
] | 3
|
2016-11-28T12:19:37.000Z
|
2020-02-04T00:18:56.000Z
|
#!/usr/bin/python
################################################################################
import numpy
import os
import cPickle as pickle
import scipy.misc
import scipy.sparse
import scipy.sparse.linalg
import scipy.special
import sys
import time
class Struct:
def __init__(self, **keywords):
self.__dict__.update(keywords)
class Timer(object):
def __init__(self, name=None, multiline=False):
self.name = name
self.multiline = multiline
def __enter__(self):
self.start = time.time()
if self.name:
print '%s ...' % self.name ,
if self.multiline:
print
sys.stdout.flush()
def __exit__(self, type, value, traceback):
if self.multiline:
print ' ...' ,
print 'done in %.3f s' % (time.time() - self.start)
################################################################################
def nodegrid(a,b):
return [ x.T for x in numpy.meshgrid(a,b) ]
def dot_sequence(*args):
if len(args) == 1: return args[0]
else: return numpy.dot( args[0] , dot_sequence(*args[1:]) )
def string_multiple_replace(string,dict):
for s,r in dict.iteritems():
string = string.replace(s,r)
return string
################################################################################
def read_data_file(data_filename):
file = open(data_filename,'rb')
data = pickle.load(file)
file.close()
node = data['node']
face = data['face']
element = data['element']
boundary = data['boundary']
u = data['u']
order = data['order']
return node,face,element,boundary,u,order
#------------------------------------------------------------------------------#
def read_input_file(input_filename):
geometry_filename = []
order = []
boundary = []
initial = []
term = []
wind = []
iterations = []
mesh_size = []
constant = []
file = open(input_filename,'r')
for line in file.readlines():
lineparts = line.split()
if len(lineparts) >= 2 and lineparts[0] == 'geometry_filename':
geometry_filename = lineparts[1]
if len(lineparts) >= 2 and lineparts[0] == 'order':
order = numpy.array([ int(x) for x in lineparts[1:] ])
if len(lineparts) >= 4 and lineparts[0] == 'boundary':
boundary.append(Struct(
face = sum([ list(z) if len(z) == 1 else range(*z) for z in [ tuple( int(y) for y in x.split(':') ) for x in lineparts[1].split(',') ] ],[]) ,
variable = int(lineparts[2]) ,
condition = tuple(sum([ x == y for x in lineparts[3] ]) for y in 'nt') ,
value = float(lineparts[4]) if len(lineparts) >= 5 else 0.0 ))
if len(lineparts) >= 2 and lineparts[0] == 'initial':
initial = lineparts[1:]
if len(lineparts) >= 2 and lineparts[0] == 'constant':
constant = lineparts[1]
if len(lineparts) >= 6 and lineparts[0] == 'term':
term.append(Struct(
equation = int(lineparts[1]) ,
variable = [ int(x) for x in lineparts[2].split(',') ] ,
direction = lineparts[3] ,
differential = [ tuple( sum([ x == y for x in z ]) for y in 'xy' ) for z in lineparts[4].split(',') ] ,
power = [ int(x) for x in lineparts[5].split(',') ] ,
constant = lineparts[6] ,
method = lineparts[7] ))
if len(lineparts) >= 2 and lineparts[0] == 'wind':
wind = eval( 'lambda n,u,v:' + lineparts[1] , {'numpy':numpy} , {} )
if len(lineparts) >= 2 and lineparts[0] == 'iterations':
iterations = int(lineparts[1])
if len(lineparts) >= 2 and lineparts[0] == 'mesh_size':
mesh_size = int(lineparts[1])
file.close()
if len(constant):
constant = dict([ (y[0],float(y[1])) for y in [ x.split('=') for x in constant.split(';') ] ])
else:
constant = {}
if len(term):
for i in range(0,len(term)):
term[i].constant = eval(term[i].constant,{},constant)
if len(initial):
replace = {'pi':'numpy.pi','cos(':'numpy.cos(','sin(':'numpy.sin('}
for i in range(0,len(initial)):
initial[i] = eval( 'lambda x,y: numpy.ones(x.shape)*(' + string_multiple_replace(initial[i],replace) + ')' , {'numpy':numpy} , constant )
return geometry_filename,order,boundary,initial,term,wind,iterations,mesh_size
#------------------------------------------------------------------------------#
def element_sequential_indices(e,element,face):
n = len(element[e].face)
polyline = numpy.array([ list(face[f].node) for f in element[e].face ])
polynode = numpy.unique(polyline)
ones = numpy.ones((n,1))
connect = 1*(ones*polyline[:,0] == (ones*polynode).T) + 2*(ones*polyline[:,1] == (ones*polynode).T)
side = [0]*n
vertex = [0]*n
for i in range(1,n):
temp = connect[connect[:,side[i-1]] == (int(not vertex[i-1])+1),:].flatten() * (numpy.arange(0,n) != side[i-1])
side[i] = temp.nonzero()[0][0]
vertex[i] = temp[side[i]]-1
return [side,vertex]
#------------------------------------------------------------------------------#
def read_geometry(geometry_filename):
# read the geometry file
file = open(geometry_filename,'r')
data = file.readlines()
file.close()
# generate the mesh structures
i = 0
while i < len(data):
if data[i].strip().split()[0] == 'NODES':
nn = int(data[i].strip().split()[1])
node = [ Struct(x=(0.0,0.0)) for _ in range(0,nn) ]
for n in range(0,nn):
node[n].x = tuple( [ float(x) for x in data[i+1+n].strip().split() ] )
i += nn
elif data[i].strip().split()[0] == 'FACES':
nf = int(data[i].strip().split()[1])
face = [ Struct(node=(0,0),border=[],size=1.0,normal=(0.0,0.0),centre=(0.0,0.0),boundary=[],Q=[]) for temp in range(0,nf) ]
for f in range(0,nf):
face[f].node = tuple( [ int(x) for x in data[i+1+f].strip().split() ] )
i += nf
elif data[i].strip().split()[0] == 'CELLS' or data[i].strip().split()[0] == 'ELEMENTS':
ne = int(data[i].strip().split()[1])
element = [ Struct(face=[],orientation=[],size=1.0,area=0.0,centre=(0.0,0.0),unknown=[],V=[],P=[],Q=[],W=[],X=[]) for temp in range(0,ne) ]
for e in range(0,ne):
element[e].face = [ int(x) for x in data[i+1+e].strip().split() ]
i += ne
else:
i += 1
# generate borders
for e in range(0,ne):
for f in element[e].face:
face[f].border.append(e)
# additional element geometry
for e in range(0,ne):
s,t = element_sequential_indices(e,element,face)
index = [ face[element[e].face[i]].node[j] for i,j in zip(s,t) ]
cross = [ node[index[i-1]].x[0]*node[index[i]].x[1]-node[index[i]].x[0]*node[index[i-1]].x[1] for i in range(0,len(element[e].face)) ]
element[e].area = 0.5*sum(cross)
element[e].centre = tuple([ sum([ (node[index[i-1]].x[j]+node[index[i]].x[j])*cross[i] for i in range(0,len(element[e].face)) ])/(6*element[e].area) for j in range(0,2) ])
element[e].orientation = [ 2*t[i]-1 for i in s ]
if element[e].area < 0.0:
element[e].area = -element[e].area
element[e].orientation = [ -x for x in element[e].orientation ]
element[e].size = numpy.sqrt(element[e].area)
# additional face geometry
for f in range(0,nf):
face[f].normal = ( -node[face[f].node[1]].x[1]+node[face[f].node[0]].x[1] , +node[face[f].node[1]].x[0]-node[face[f].node[0]].x[0] )
face[f].size = 0.5*numpy.sqrt(numpy.dot(face[f].normal,face[f].normal))
face[f].centre = tuple([ 0.5*(node[face[f].node[1]].x[i]+node[face[f].node[0]].x[i]) for i in range(0,2) ])
# return
return node,face,element
#------------------------------------------------------------------------------#
def assign_boundaries():
nv = len(order)
for f in range(0,len(face)):
face[f].boundary = [ [] for v in range(0,nv) ]
for b in range(0,len(boundary)):
for f in boundary[b].face:
face[f].boundary[boundary[b].variable].append(b)
#------------------------------------------------------------------------------#
def generate_unknowns():
nv = len(order)
np = order*(order+1)/2
nu = 0
# number by element then variable
# > gives a more diagonally dominant system
for e in range(0,len(element)):
element[e].unknown = [ [] for v in range(0,nv) ]
for v in range(0,nv):
element[e].unknown[v] = range(nu,nu+np[v])
nu += np[v]
## number by variable then element
## > gives a system with visible blocks corresponding to equations
#for e in range(0,len(element)): element[e].unknown = [ [] for v in range(0,nv) ]
#for v in range(0,nv):
# for e in range(0,len(element)):
# element[e].unknown[v] = range(nu,nu+np[v])
# nu += np[v]
return numpy.zeros(nu)
#------------------------------------------------------------------------------#
def generate_constants(order):
max_order = max(order)
ng = 2*max_order-1
gauss_locations,gauss_weights = [ x.real for x in scipy.special.orthogonal.p_roots(ng) ]
#nh = 7
#hammer_locations = numpy.array([
# [0.101286507323456,0.101286507323456],[0.797426958353087,0.101286507323456],[0.101286507323456,0.797426958353087],
# [0.470142064105115,0.470142064105115],[0.059715871789770,0.470142064105115],[0.470142064105115,0.059715871789770],
# [0.333333333333333,0.333333333333333]])
#hammer_weights = 0.5 * numpy.array([
# 0.125939180544827,0.125939180544827,0.125939180544827,0.132394152788506,0.132394152788506,0.132394152788506,
# 0.225000000000000])
#nh = 9
#hammer_locations = numpy.array([
# [0.437525248383384,0.437525248383384],[0.124949503233232,0.437525248383384],[0.437525248383384,0.124949503233232],
# [0.165409927389841,0.037477420750088],[0.037477420750088,0.165409927389841],[0.797112651860071,0.165409927389841],
# [0.165409927389841,0.797112651860071],[0.037477420750088,0.797112651860071],[0.797112651860071,0.037477420750088]])
#hammer_weights = 0.5 * numpy.array([
# 0.205950504760887,0.205950504760887,0.205950504760887,0.063691414286223,0.063691414286223,0.063691414286223,
# 0.063691414286223,0.063691414286223,0.063691414286223])
nh = 12
hammer_locations = numpy.array([
[0.063089014491502,0.063089014491502],[0.873821971016996,0.063089014491502],[0.063089014491502,0.873821971016996],
[0.249286745170910,0.249286745170910],[0.501426509658179,0.249286745170910],[0.249286745170910,0.501426509658179],
[0.310352451033785,0.053145049844816],[0.053145049844816,0.310352451033785],[0.636502499121399,0.310352451033785],
[0.310352451033785,0.636502499121399],[0.053145049844816,0.636502499121399],[0.636502499121399,0.053145049844816]])
hammer_weights = 0.5 * numpy.array([
0.050844906370207,0.050844906370207,0.050844906370207,0.116786275726379,0.116786275726379,0.116786275726379,
0.082851075618374,0.082851075618374,0.082851075618374,0.082851075618374,0.082851075618374,0.082851075618374])
taylor_coefficients = numpy.array([])
taylor_powers = numpy.zeros((0,2),dtype=int)
for i in range(0,2*max_order):
taylor_coefficients = numpy.append(taylor_coefficients,scipy.misc.comb(i*numpy.ones(i+1),numpy.arange(0,i+1))/scipy.misc.factorial(i))
taylor_powers = numpy.append(taylor_powers,numpy.array([range(i,-1,-1),range(0,i+1)],dtype=int).T,axis=0)
powers_taylor = numpy.zeros((2*max_order,2*max_order),dtype=int)
for i in range(0,taylor_powers.shape[0]): powers_taylor[taylor_powers[i][0]][taylor_powers[i][1]] = i
factorial = scipy.misc.factorial(numpy.arange(0,2*max_order))
return gauss_locations,gauss_weights,hammer_locations,hammer_weights,taylor_coefficients,taylor_powers,powers_taylor,factorial
#------------------------------------------------------------------------------#
def basis(x,y,element,n,differential):
if taylor_powers[n,0] < differential[0] or taylor_powers[n,1] < differential[1]:
return numpy.zeros(x.shape)
p = taylor_powers[n]
q = taylor_powers[n]-differential
constant = taylor_coefficients[n] / numpy.power( element.size , sum(p) )
constant = constant * factorial[p[0]] * factorial[p[1]] / ( factorial[q[0]] * factorial[q[1]] )
return constant * numpy.power(x-element.centre[0],q[0]) * numpy.power(y-element.centre[1],q[1])
#------------------------------------------------------------------------------#
def derivative_transform_matrix(A,order):
n = order*(order+1)/2
D = numpy.zeros((n,n))
D[0,0] = 1.0
for i in range(0,order-1):
old = numpy.nonzero(numpy.sum(taylor_powers,axis=1) == i)[0]
temp = numpy.append( taylor_powers[old,:] + [1,0] , taylor_powers[old[taylor_powers[old,0] == 0],:] + [0,1] , axis=0 )
new = powers_taylor[temp[:,0],temp[:,1]]
index = nodegrid(old,old)
D[nodegrid(new,new)] = numpy.append(
A[0,0] * numpy.append( D[index] , numpy.zeros((i+1,1)) , axis=1 ) +
A[0,1] * numpy.append( numpy.zeros((i+1,1)) , D[index] , axis=1 ) ,
A[1,0] * numpy.append( D[old[-1],[old]] , [[0]] , axis=1 ) +
A[1,1] * numpy.append( [[0]] , D[old[-1],[old]] , axis=1 ) , axis=0 )
return D
#------------------------------------------------------------------------------#
def calculate_element_matrices():
nf = len(face)
ne = len(element)
nv = len(order)
max_order = max(order)
np = numpy.array([ len(x) for x in element[0].unknown ])
max_np = max(np)
ng = len(gauss_weights)
nh = len(hammer_weights)
# initialise
if do.pre:
for e in range(0,ne):
element[e].V = numpy.zeros((max_np,max_np))
element[e].P = numpy.zeros((max_np,(len(element[e].face)-2)*nh,max_np))
element[e].Q = [ numpy.zeros((ng,max_np)) for i in range(0,len(element[e].face)) ]
element[e].W = numpy.zeros((len(element[e].face)-2)*nh)
element[e].X = numpy.zeros(((len(element[e].face)-2)*nh,2))
for f in range(0,nf):
face[f].Q = [ [] for v in range(0,nv) ]
# element vandermonde matrices
if do.pre:
for e in range(0,ne):
for i in range(0,max_np):
for j in range(0,max_np):
element[e].V[i,j] = basis(numpy.array(element[e].centre[0]),numpy.array(element[e].centre[1]),element[e],i,taylor_powers[j])
# triangulation and element area quadrature
for e in range(0,ne):
# triangulate
nt = len(element[e].face)-2
v = numpy.zeros((nt,3),dtype=int)
v[:,0] = face[element[e].face[0]].node[0]
j = 0
for i in range(0,len(element[e].face)):
f = element[e].face[i]
o = int(element[e].orientation[i] < 0)
v[j][1:] = numpy.array(face[f].node)[[1-o,o]]
j += not any(v[j][1:] == v[j][0])
if j >= nt: break
# integration locations in and area of the triangles
element[e].X = numpy.zeros(((len(element[e].face)-2)*nh,2))
area = numpy.zeros(nt)
for i in range(0,nt):
d = numpy.array([ [ node[v[i][j]].x[k] - node[v[i][0]].x[k] for k in range(0,2) ] for j in range(1,3) ])
element[e].X[i*nh:(i+1)*nh] = ( numpy.ones((nh,1))*node[v[i][0]].x +
hammer_locations[:,0][numpy.newaxis].T*d[0] +
hammer_locations[:,1][numpy.newaxis].T*d[1] )
area[i] = numpy.cross(d[0,:],d[1,:])
# integration weights
element[e].W = (numpy.array([area]).T*hammer_weights).flatten()
# element FEM numerics matrices
if do.pre:
for e in range(0,ne):
# basis function values at the integration points
for i in range(0,max_np):
for j in range(0,max_np):
element[e].P[i][:,j] = basis(element[e].X[:,0],element[e].X[:,1],element[e],j,taylor_powers[i])
# element DG-FEM numerics matrices
if do.pre:
for e in range(0,ne):
for i in range(0,len(element[e].face)):
f = element[e].face[i]
# integration locations along the face
temp = gauss_locations[numpy.newaxis].T
x = 0.5*(1.0-temp)*node[face[f].node[0]].x + 0.5*(1.0+temp)*node[face[f].node[1]].x
# basis function values at the integration points
for j in range(0,max_np):
element[e].Q[i][:,j] = basis(x[:,0],x[:,1],element[e],j,[0,0])
# face IDG-FEM numerics matrices
for f in range(0,nf):
# adjacent element and boundaries
a = numpy.array(face[f].border)
na = len(a)
b = numpy.array(face[f].boundary,dtype=object)
nb = [ len(i) for i in b ]
if do.pre or (do.re and any(b)):
# rotation to face coordinates
R = numpy.array([[-face[f].normal[0],-face[f].normal[1]],[face[f].normal[1],-face[f].normal[0]]])
R /= numpy.sqrt(numpy.dot(face[f].normal,face[f].normal))
# face locations
x = 0.5*(1.0-gauss_locations[numpy.newaxis].T)*node[face[f].node[0]].x + 0.5*(1.0+gauss_locations[numpy.newaxis].T)*node[face[f].node[1]].x
y = face[f].centre + numpy.dot( x - face[f].centre , R.T )
w = gauss_weights
# adjacent integration locations
xa = [ element[a[i]].X for i in range(0,na) ]
ya = [ face[f].centre + numpy.dot( xa[i] - face[f].centre , R.T ) for i in range(0,na) ]
wa = numpy.append(element[a[0]].W,element[a[1]].W) if na == 2 else element[a[0]].W
for v in range(0,nv):
# face basis indices
temp = nodegrid(range(0,2*order[v]),range(0,2*order[v])) # NOTE # not sufficient for boundary faces with 2 bordering elements
face_taylor = powers_taylor[ numpy.logical_and( temp[0] + na*temp[1] < na*order[v] + nb[v] , temp[1] < order[v] ) ]
# number of interpolation unknowns
ni = len(face_taylor)
# matrices
P = numpy.zeros((na*nh,na*np[v]))
for j in range(0,np[v]):
for k in range(0,na):
P[k*nh:(1+k)*nh,j+k*np[v]] = basis(xa[k][:,0],xa[k][:,1],element[a[k]],j,[0,0])
Q = numpy.zeros((na*nh,ni))
for j in range(0,ni):
for k in range(0,na):
Q[k*nh:(k+1)*nh,j] = basis(ya[k][:,0],ya[k][:,1],face[f],face_taylor[j],[0,0])
A = dot_sequence( P.T , numpy.diag(wa) , Q )
B = dot_sequence( P.T , numpy.diag(wa) , P )
# boundary parts
if nb[v]:
dA = numpy.zeros((nb[v]*order[v],ni))
for i in range(0,nb[v]):
for j in range(0,ni):
for k in range(0,order[v]):
dA[k+i*order[v],j] = basis(
numpy.array(face[f].centre[0]),
numpy.array(face[f].centre[1]),
face[f],face_taylor[j],
[ sum(temp) for temp in zip([0,k],boundary[b[v][i]].condition) ])
dB = numpy.zeros((nb[v]*order[v],nb[v]))
for i in range(0,nb[v]): dB[i*order[v],i] = 1.0
A = numpy.append( A , dA , axis=0 )
B = numpy.append( numpy.append( B , numpy.zeros((B.shape[0],nb[v])) , axis=1 ) ,
numpy.append( numpy.zeros((nb[v]*order[v],B.shape[1])) , dB , axis=1 ) ,
axis=0 )
# solve interpolation problem
D = numpy.linalg.solve(A,B)
# interpolated values
F = numpy.zeros((ng,ni))
face[f].Q[v] = numpy.zeros((np[v],ng,D.shape[1]))
for j in range(0,np[v]):
for k in range(0,ni):
F[:,k] = basis(y[:,0],y[:,1],face[f],face_taylor[k],taylor_powers[j])
face[f].Q[v][j] = numpy.dot( F , D )
# transform differentials to x and y
T = derivative_transform_matrix(numpy.linalg.inv(R),order[v])
for j in range(0,ng): face[f].Q[v][:,j] = numpy.dot( T , face[f].Q[v][:,j] )
#------------------------------------------------------------------------------#
def initialise_unknowns():
ne = len(element)
np = [ len(x) for x in element[0].unknown ]
nv = len(order)
max_order = max(order)
max_order_sq = max_order*max_order
max_np = max(np)
for e in range(0,ne):
x = element[e].centre
delta = numpy.linspace(-0.1*element[e].size/2,0.1*element[e].size/2,max_order)
dx = [ temp.flatten() for temp in nodegrid(delta,delta) ]
p = [ taylor_powers[0:max_np,i] for i in range(0,2) ]
M = ((numpy.ones((max_np,1)) * dx[0]).T ** (numpy.ones((max_order_sq,1)) * p[0]) *
(numpy.ones((max_np,1)) * dx[1]).T ** (numpy.ones((max_order_sq,1)) * p[1]) *
(numpy.ones((max_order_sq,1)) * (scipy.misc.comb(p[0]+p[1],p[0])/scipy.misc.factorial(p[0]+p[1]))))
inv_M = numpy.linalg.pinv(M)
inv_V = numpy.linalg.inv(element[e].V)
for v in range(0,nv):
u[element[e].unknown[v]] = dot_sequence( inv_V , inv_M , initial[v](dx[0]+x[0],dx[1]+x[1]) )[0:np[v]]
#------------------------------------------------------------------------------#
def generate_system():
ne = len(element)
ng = len(gauss_weights)
nh = len(hammer_weights)
np = [ len(x) for x in element[0].unknown ]
nt = len(term)
nv = len(order)
max_np = max(np)
sum_np = sum(np)
sum_np_sq = sum_np*sum_np
# local dense jacobian
L = Struct(i=[],x=[])
# csr system jacobian
J = Struct(p=[],i=[],x=[])
J.p = numpy.zeros(u.shape[0]+1,dtype=int)
for e in range(0,ne):
temp = sum_np
for f in element[e].face: temp += sum_np*(len(face[f].border) == 2)
J.p[numpy.array(sum(element[e].unknown,[]))+1] = temp
J.p = numpy.cumsum(J.p)
J.i = numpy.zeros(J.p[-1],dtype=int)
J.x = numpy.zeros(J.p[-1])
# function vector
F = numpy.zeros(u.shape)
for e in range(0,ne):
# number of faces
nf = len(element[e].face)
# adjacent elements
adj = - numpy.ones(nf,dtype=int)
for i in range(0,nf):
temp = numpy.array(face[element[e].face[i]].border)
temp = temp[temp != e]
if len(temp): adj[i] = temp[0]
n_adj = sum(adj >= 0)
i_adj = numpy.arange(0,nf)[adj >= 0]
# local matrices to add to the system
L.i = numpy.zeros((sum_np,(1+n_adj)*sum_np),dtype=int)
L.i[:,0:sum_np] = numpy.tile( sum(element[e].unknown,[]) , (sum_np,1) )
for i in range(0,n_adj): L.i[:,(i+1)*sum_np:(i+2)*sum_np] = numpy.tile( sum(element[adj[i_adj[i]]].unknown,[]) , (sum_np,1) )
L.x = numpy.zeros(L.i.shape)
# indices into the local matrices
index_e = [ numpy.arange(sum(np[:v]),sum(np[:v+1]))[numpy.newaxis] for v in range(0,nv) ]
index_a = [ [] for i in range(0,nf) ]
for i in range(0,n_adj):
index_a[i_adj[i]] = [ numpy.array([
range(sum(np[:v]),sum(np[:v+1])) +
range((i+1)*sum_np+sum(np[:v]),(i+1)*sum_np+sum(np[:v+1])) ])
for v in range(0,nv) ]
# loop over terms
for t in range(0,nt):
# numbers of variables in the term product sequence
ns = len(term[t].variable)
# direction index
direction = powers_taylor[int(term[t].direction == 'x'),int(term[t].direction == 'y')]
# powers
P = numpy.array(term[t].power)[numpy.newaxis].T
# equation matrix
A = - term[t].constant * dot_sequence( element[e].P[direction][:,0:np[term[t].equation]].T , numpy.diag(element[e].W) )
# calculate the coefficients and values
B = [ [] for s in range(0,ns) ]
X = numpy.zeros((ns,nh))
for s,v in zip(range(0,ns),term[t].variable):
B[s] = element[e].P[powers_taylor[term[t].differential[s]]][:,0:np[v]]
X[s,:] = numpy.dot( B[s] , u[element[e].unknown[v]] )
# add to the local jacobian
Y = X ** P
for s,v in zip(range(0,ns),term[t].variable):
temp = numpy.copy(Y)
temp[s,:] = P[s] * X[s,:] ** (P[s]-1)
L.x[index_e[term[t].equation].T,index_e[v]] += dot_sequence( A , numpy.diag(numpy.prod(temp,axis=0)) , B[s] )
# add to the function vector
F[element[e].unknown[term[t].equation]] += numpy.dot( A , numpy.prod(Y,axis=0) )
# continue if not a flux term
if term[t].direction != 'x' and term[t].direction != 'y': continue
# face components
for i in range(0,nf):
f = element[e].face[i]
a = adj[i]
b = numpy.array(face[f].boundary,dtype=object)
# face normal
normal = element[e].orientation[i] * numpy.array(face[f].normal)
# corresponding face index
if a >= 0: j = numpy.arange(0,len(element[a].face))[numpy.array(element[a].face) == f]
# wind
if a >= 0 and ('u' in term[t].method):
ui = [ dot_sequence( gauss_weights , element[e].Q[i][:,0:np[v]] , u[element[e].unknown[v]] ) for v in range(0,nv) ]
uo = [ dot_sequence( gauss_weights , element[a].Q[j][:,0:np[v]] , u[element[a].unknown[v]] ) for v in range(0,nv) ]
w = wind( normal , ui , uo )
else:
w = True
# equation matrix
A = normal[term[t].direction == 'y'] * term[t].constant * dot_sequence(
element[e].Q[i][:,0:np[term[t].equation]].T , numpy.diag(0.5*gauss_weights) )
# calculate the coefficients and values
B = [ [] for s in range(0,ns) ]
X = numpy.zeros((ns,ng))
for s,v in zip(range(0,ns),term[t].variable):
# where there is an adjacent element
if a >= 0:
# interpolated flux
if term[t].method[s] == 'i' or len(b[v]):
if face[f].border[0] == e: temp = numpy.array(range(0,2*np[v]))
else: temp = numpy.array(range(np[v],2*np[v])+range(0,np[v]))
B[s] = face[f].Q[v][powers_taylor[term[t].differential[s]]][:,temp]
# averaged flux
elif term[t].method[s] == 'a':
B[s] = 0.5*numpy.append(element[e].Q[i][:,0:np[v]],element[a].Q[j][:,0:np[v]],axis=1)
# upwind flux
elif term[t].method[s] == 'u':
B[s] = numpy.zeros((ng,2*np[v]))
if w: B[s][:,0:np[v]] += element[e].Q[i][:,0:np[v]]
else: B[s][:,np[v]:2*np[v]] += element[a].Q[j][:,0:np[v]]
# values
X[s,:] = numpy.dot( B[s] , numpy.append(u[element[e].unknown[v]],u[element[a].unknown[v]]) )
# interpolated flux where there is no adjacent element
else:
B[s] = face[f].Q[v][powers_taylor[term[t].differential[s]]][:,0:np[v]]
X[s,:] = numpy.dot( B[s] , u[element[e].unknown[v]] )
# interpolated flux at boundaries
if len(b[v]):
for k in range(0,len(b[v])):
X[s,:] += boundary[b[v][k]].value * face[f].Q[v][powers_taylor[term[t].differential[s]]][:,(1+(a>=0))*np[v]+k]
# add to the local jacobian
Y = X ** P
for s,v in zip(range(0,ns),term[t].variable):
temp = numpy.copy(Y)
temp[s,:] = P[s] * X[s,:] ** (P[s]-1)
L.x[index_e[term[t].equation].T,index_a[i][v] if a >= 0 else index_e[v]] += dot_sequence(
A , numpy.diag(numpy.prod(temp,axis=0)) , B[s] )
# add to the function vector
F[element[e].unknown[term[t].equation]] += numpy.dot( A , numpy.prod(Y,axis=0) )
# add dense local jacobian to csr global jacobian
index = sum( nodegrid( J.p[sum(element[e].unknown,[])] , numpy.arange(0,L.i.shape[1]) ) ).flatten()
J.i[index] = L.i.flatten()
J.x[index] = L.x.flatten()
# return the global system
return [ scipy.sparse.csr_matrix((J.x,J.i,J.p)) , F ]
#------------------------------------------------------------------------------#
def write_display_file(display_filename,n):
nv = len(order)
np = numpy.array([ len(x) for x in element[0].unknown ])
Q = numpy.linalg.inv(numpy.array([[1,-1,-1,1],[1,1,-1,-1],[1,1,1,1],[1,-1,1,-1]]))
file = open(display_filename,'w')
for e in range(0,len(element)):
s,t = element_sequential_indices(e,element,face)
for i in range(0,len(element[e].face)):
quad = numpy.array( [ element[e].centre ,
face[element[e].face[s[i-1]]].centre ,
node[face[element[e].face[s[i]]].node[t[i]]].x ,
face[element[e].face[s[i]]].centre ] )
a = numpy.dot(Q,quad)
mesh = numpy.append( numpy.mgrid[0:n+1,0:n+1]*(2.0/n)-1.0 , numpy.zeros((nv,n+1,n+1)) , axis=0 )
mesh[0:2] = [ a[0,j] + a[1,j]*mesh[0] + a[2,j]*mesh[1] + a[3,j]*mesh[0]*mesh[1] for j in range(0,2) ]
for j in range(0,max(np)):
phi = basis(mesh[0],mesh[1],element[e],j,[0,0])
for v in numpy.arange(0,nv)[j < np]:
mesh[2+v] += u[element[e].unknown[v][j]]*phi
file.write( '\n\n'.join([ '\n'.join([ ' '.join(['%e']*(2+nv)) % tuple(mesh[:,i,j]) for j in range(0,n+1) ]) for i in range(0,n+1) ]) + '\n\n\n' )
file.close()
#------------------------------------------------------------------------------#
def write_data_file(data_filename):
file = open(data_filename,'wb')
pickle.dump({'node':node,'face':face,'element':element,'boundary':boundary,'order':order,'u':u},file,protocol=pickle.HIGHEST_PROTOCOL)
file.close()
################################################################################
path = sys.argv[1]
action = sys.argv[2].lower()
directory = os.path.dirname(path)
name = os.path.basename(path)
input_filename = directory + os.sep + name + '.input'
data_filename = directory + os.sep + name + '.data'
display_filename = directory + os.sep + name + '.display'
do = Struct(pre = 'p' in action , re = 'r' in action , init = 'i' in action , solve = 's' in action , display = 'd' in action )
#------------------------------------------------------------------------------#
if not do.pre:
with Timer('reading data from "%s"' % data_filename):
node,face,element,boundary,u,order = read_data_file(data_filename)
with Timer('reading input from "%s"' % input_filename):
input_data = read_input_file(input_filename)
if do.pre:
geometry_filename = directory + os.sep + input_data[0]
order = input_data[1]
if do.pre or do.re:
boundary = input_data[2]
if do.init:
initial = input_data[3]
if do.solve:
for i in range(0,len(boundary)): boundary[i].value = input_data[2][i].value
term = input_data[4]
wind = input_data[5]
iterations = input_data[6]
if do.display:
mesh_size = input_data[7]
with Timer('generating constants'):
(gauss_locations,gauss_weights,
hammer_locations,hammer_weights,
taylor_coefficients,taylor_powers,powers_taylor,
factorial) = generate_constants(order)
if do.pre:
with Timer('reading and processing geometry from "%s"' % geometry_filename):
node,face,element = read_geometry(geometry_filename)
with Timer('generating unknowns'):
u = generate_unknowns()
if do.pre or do.re:
with Timer('assigning boundaries to faces'):
assign_boundaries()
with Timer('calculating element matrices'):
calculate_element_matrices()
if do.init:
with Timer('initialising the unknowns'):
initialise_unknowns()
if do.solve:
with Timer('iterating',True):
index = [ numpy.zeros(u.shape,dtype=bool) for v in range(0,len(order)) ]
for e in range(0,len(element)):
for v in range(0,len(order)):
index[v][element[e].unknown[v]] = True
for i in range(0,iterations):
J,f = generate_system()
print ' ' + ' '.join([ '%.4e' % numpy.max(numpy.abs(f[i])) for i in index ])
u += scipy.sparse.linalg.spsolve(J,-f)
if do.display:
with Timer('saving display to "%s"' % display_filename):
write_display_file(display_filename,mesh_size)
if do.pre or do.re or do.init or do.solve:
with Timer('saving data to "%s"' % data_filename):
write_data_file(data_filename)
################################################################################
| 34.35
| 173
| 0.597982
| 484
| 0.016384
| 0
| 0
| 0
| 0
| 0
| 0
| 5,473
| 0.185268
|
497a5c9b65658e4fea7858123fdca1c39b46407f
| 2,343
|
py
|
Python
|
holobot/framework/kernel.py
|
rexor12/holobot
|
89b7b416403d13ccfeee117ef942426b08d3651d
|
[
"MIT"
] | 1
|
2021-05-24T00:17:46.000Z
|
2021-05-24T00:17:46.000Z
|
holobot/framework/kernel.py
|
rexor12/holobot
|
89b7b416403d13ccfeee117ef942426b08d3651d
|
[
"MIT"
] | 41
|
2021-03-24T22:50:09.000Z
|
2021-12-17T12:15:13.000Z
|
holobot/framework/kernel.py
|
rexor12/holobot
|
89b7b416403d13ccfeee117ef942426b08d3651d
|
[
"MIT"
] | null | null | null |
from holobot.framework.lifecycle import LifecycleManagerInterface
from holobot.sdk import KernelInterface
from holobot.sdk.database import DatabaseManagerInterface
from holobot.sdk.integration import IntegrationInterface
from holobot.sdk.ioc.decorators import injectable
from holobot.sdk.logging import LogInterface
from holobot.sdk.system import EnvironmentInterface
from holobot.sdk.utils import when_all
from typing import Tuple
import asyncio
@injectable(KernelInterface)
class Kernel(KernelInterface):
def __init__(self,
log: LogInterface,
database_manager: DatabaseManagerInterface,
environment: EnvironmentInterface,
integrations: Tuple[IntegrationInterface, ...],
lifecycle_manager: LifecycleManagerInterface) -> None:
super().__init__()
self.__event_loop = asyncio.get_event_loop()
self.__log = log.with_name("Framework", "Kernel")
self.__database_manager = database_manager
self.__environment = environment
self.__integrations = integrations
self.__lifecycle_manager = lifecycle_manager
def run(self):
self.__log.info(f"Starting application... {{ Version = {self.__environment.version} }}")
self.__event_loop.run_until_complete(self.__database_manager.upgrade_all())
self.__event_loop.run_until_complete(self.__lifecycle_manager.start_all())
self.__log.debug(f"Starting integrations... {{ Count = {len(self.__integrations)} }}")
integration_tasks = tuple([self.__event_loop.create_task(integration.start()) for integration in self.__integrations])
self.__log.info(f"Started integrations. {{ Count = {len(integration_tasks)} }}")
try:
self.__log.info("Application started.")
self.__event_loop.run_forever()
except KeyboardInterrupt:
self.__log.info("Shutting down due to keyboard interrupt...")
for integration in self.__integrations:
self.__event_loop.run_until_complete(integration.stop())
finally:
self.__event_loop.run_until_complete(when_all(integration_tasks))
self.__event_loop.run_until_complete(self.__lifecycle_manager.stop_all())
self.__event_loop.stop()
self.__event_loop.close()
self.__log.info("Successful shutdown.")
| 45.057692
| 126
| 0.722151
| 1,864
| 0.795561
| 0
| 0
| 1,893
| 0.807939
| 0
| 0
| 309
| 0.131882
|
497a5f4c2e39ef62c200675216c42fbc21c52436
| 34
|
py
|
Python
|
tests/snmp/test_base.py
|
zohassadar/netdisc
|
9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8
|
[
"MIT"
] | null | null | null |
tests/snmp/test_base.py
|
zohassadar/netdisc
|
9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8
|
[
"MIT"
] | null | null | null |
tests/snmp/test_base.py
|
zohassadar/netdisc
|
9ce4d5c2b0f30d36e71118ffbe6b7ffd93e0dfc8
|
[
"MIT"
] | null | null | null |
from netdisc.snmp import snmpbase
| 17
| 33
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
497aef1b3a2cad12da85ea306e770352bb104646
| 13,063
|
py
|
Python
|
venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_svm.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 37
|
2017-08-15T15:02:43.000Z
|
2021-07-23T03:44:31.000Z
|
venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_svm.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 12
|
2018-01-10T05:25:25.000Z
|
2021-11-28T06:55:48.000Z
|
venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_ontap_svm.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 49
|
2017-08-15T09:52:13.000Z
|
2022-03-21T17:11:54.000Z
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_svm
short_description: Manage NetApp Ontap svm
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: Sumit Kumar (sumit4@netapp.com), Archana Ganesan (garchana@netapp.com)
description:
- Create, modify or delete svm on NetApp Ontap
options:
state:
description:
- Whether the specified SVM should exist or not.
choices: ['present', 'absent']
default: 'present'
name:
description:
- The name of the SVM to manage.
required: true
new_name:
description:
- New name of the SVM to be renamed
root_volume:
description:
- Root volume of the SVM. Required when C(state=present).
root_volume_aggregate:
description:
- The aggregate on which the root volume will be created.
- Required when C(state=present).
root_volume_security_style:
description:
- Security Style of the root volume.
- When specified as part of the vserver-create,
this field represents the security style for the Vserver root volume.
- When specified as part of vserver-get-iter call,
this will return the list of matching Vservers.
- The 'unified' security style, which applies only to Infinite Volumes,
cannot be applied to a Vserver's root volume.
- Required when C(state=present)
choices: ['unix', 'ntfs', 'mixed', 'unified']
allowed_protocols:
description:
- Allowed Protocols.
- When specified as part of a vserver-create,
this field represent the list of protocols allowed on the Vserver.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the protocols specified
as part of the allowed-protocols.
- When part of vserver-modify,
this field should include the existing list
along with new protocol list to be added to prevent data disruptions.
- Possible values
- nfs NFS protocol,
- cifs CIFS protocol,
- fcp FCP protocol,
- iscsi iSCSI protocol,
- ndmp NDMP protocol,
- http HTTP protocol,
- nvme NVMe protocol
aggr_list:
description:
- List of aggregates assigned for volume operations.
- These aggregates could be shared for use with other Vservers.
- When specified as part of a vserver-create,
this field represents the list of aggregates
that are assigned to the Vserver for volume operations.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the aggregates specified as part of the aggr-list.
'''
EXAMPLES = """
- name: Create SVM
na_ontap_svm:
state: present
name: ansibleVServer
root_volume: vol1
root_volume_aggregate: aggr1
root_volume_security_style: mixed
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapSVM(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=[
'present', 'absent'], default='present'),
name=dict(required=True, type='str'),
new_name=dict(required=False, type='str'),
root_volume=dict(type='str'),
root_volume_aggregate=dict(type='str'),
root_volume_security_style=dict(type='str', choices=['unix',
'ntfs',
'mixed',
'unified'
]),
allowed_protocols=dict(type='list'),
aggr_list=dict(type='list')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.new_name = p['new_name']
self.root_volume = p['root_volume']
self.root_volume_aggregate = p['root_volume_aggregate']
self.root_volume_security_style = p['root_volume_security_style']
self.allowed_protocols = p['allowed_protocols']
self.aggr_list = p['aggr_list']
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_vserver(self):
"""
Checks if vserver exists.
:return:
True if vserver found
False if vserver is not found
:rtype: bool
"""
vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-info', **{'vserver-name': self.name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
vserver_info.add_child_elem(query)
result = self.server.invoke_successfully(vserver_info,
enable_tunneling=False)
vserver_details = None
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
attributes_list = result.get_child_by_name('attributes-list')
vserver_info = attributes_list.get_child_by_name('vserver-info')
aggr_list = list()
''' vserver aggr-list can be empty by default'''
get_list = vserver_info.get_child_by_name('aggr-list')
if get_list is not None:
aggregates = get_list.get_children()
for aggr in aggregates:
aggr_list.append(aggr.get_content())
protocols = list()
'''allowed-protocols is not empty by default'''
get_protocols = vserver_info.get_child_by_name(
'allowed-protocols').get_children()
for protocol in get_protocols:
protocols.append(protocol.get_content())
vserver_details = {'name': vserver_info.get_child_content(
'vserver-name'),
'aggr_list': aggr_list,
'allowed_protocols': protocols}
return vserver_details
def create_vserver(self):
options = {'vserver-name': self.name, 'root-volume': self.root_volume}
if self.root_volume_aggregate is not None:
options['root-volume-aggregate'] = self.root_volume_aggregate
if self.root_volume_security_style is not None:
options['root-volume-security-style'] = self.root_volume_security_style
vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-create', **options)
try:
self.server.invoke_successfully(vserver_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning SVM %s \
with root volume %s on aggregate %s: %s'
% (self.name, self.root_volume,
self.root_volume_aggregate, to_native(e)),
exception=traceback.format_exc())
def delete_vserver(self):
vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-destroy', **{'vserver-name': self.name})
try:
self.server.invoke_successfully(vserver_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting SVM %s \
with root volume %s on aggregate %s: %s'
% (self.name, self.root_volume,
self.root_volume_aggregate, to_native(e)),
exception=traceback.format_exc())
def rename_vserver(self):
vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-rename', **{'vserver-name': self.name,
'new-name': self.new_name})
try:
self.server.invoke_successfully(vserver_rename,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming SVM %s: %s'
% (self.name, to_native(e)),
exception=traceback.format_exc())
def modify_vserver(self, allowed_protocols, aggr_list):
vserver_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-modify', **{'vserver-name': self.name})
if allowed_protocols:
allowed_protocols = netapp_utils.zapi.NaElement(
'allowed-protocols')
for protocol in self.allowed_protocols:
allowed_protocols.add_new_child('protocol', protocol)
vserver_modify.add_child_elem(allowed_protocols)
if aggr_list:
aggregates = netapp_utils.zapi.NaElement('aggr-list')
for aggr in self.aggr_list:
aggregates.add_new_child('aggr-name', aggr)
vserver_modify.add_child_elem(aggregates)
try:
self.server.invoke_successfully(vserver_modify,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error modifying SVM %s: %s'
% (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
vserver_details = self.get_vserver()
if vserver_details is not None:
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_ontap_zapi(
module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_svm", cserver)
rename_vserver = False
modify_protocols = False
modify_aggr_list = False
obj = open('vserver-log', 'a')
if vserver_details is not None:
if self.state == 'absent':
changed = True
elif self.state == 'present':
if self.new_name is not None and self.new_name != self.name:
rename_vserver = True
changed = True
if self.allowed_protocols is not None:
self.allowed_protocols.sort()
vserver_details['allowed_protocols'].sort()
if self.allowed_protocols != vserver_details['allowed_protocols']:
modify_protocols = True
changed = True
if self.aggr_list is not None:
self.aggr_list.sort()
vserver_details['aggr_list'].sort()
if self.aggr_list != vserver_details['aggr_list']:
modify_aggr_list = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if vserver_details is None:
self.create_vserver()
else:
if rename_vserver:
self.rename_vserver()
if modify_protocols or modify_aggr_list:
self.modify_vserver(
modify_protocols, modify_aggr_list)
elif self.state == 'absent':
self.delete_vserver()
self.module.exit_json(changed=changed)
def main():
v = NetAppOntapSVM()
v.apply()
if __name__ == '__main__':
main()
| 37.645533
| 86
| 0.580724
| 9,396
| 0.719283
| 0
| 0
| 0
| 0
| 0
| 0
| 4,510
| 0.34525
|
497c36a9409e9932ee77efb6c8843fae9cedceac
| 1,200
|
py
|
Python
|
prompts/wizard_of_wikipedia.py
|
andreamad8/FSB
|
a81593590189fa5ad1cc37c5857f974effd9750a
|
[
"MIT"
] | 53
|
2021-10-11T03:24:14.000Z
|
2022-03-30T15:17:23.000Z
|
prompts/wizard_of_wikipedia.py
|
andreamad8/FSB
|
a81593590189fa5ad1cc37c5857f974effd9750a
|
[
"MIT"
] | 1
|
2021-12-26T22:48:38.000Z
|
2022-01-15T18:05:32.000Z
|
prompts/wizard_of_wikipedia.py
|
andreamad8/FSB
|
a81593590189fa5ad1cc37c5857f974effd9750a
|
[
"MIT"
] | 5
|
2022-01-27T09:07:39.000Z
|
2022-03-04T08:58:23.000Z
|
def convert_sample_to_shot_wow(sample, with_knowledge=True):
prefix = "Dialogue:\n"
assert len(sample["dialogue"]) == len(sample["meta"])
for turn, meta in zip(sample["dialogue"],sample["meta"]):
prefix += f"User: {turn[0]}" +"\n"
if with_knowledge:
if len(meta)>0:
prefix += f"KB: {meta[0]}" +"\n"
else:
prefix += f"KB: None" +"\n"
if turn[1] == "":
prefix += f"Assistant:"
return prefix
else:
prefix += f"Assistant: {turn[1]}" +"\n"
return prefix
def convert_sample_to_shot_wow_interact(sample, with_knowledge=True):
prefix = "Dialogue:\n"
assert len(sample["dialogue"]) == len(sample["KB_wiki"])
for turn, meta in zip(sample["dialogue"],sample["KB_wiki"]):
prefix += f"User: {turn[0]}" +"\n"
if with_knowledge:
if len(meta)>0:
prefix += f"KB: {meta[0]}" +"\n"
else:
prefix += f"KB: None" +"\n"
if turn[1] == "":
prefix += f"Assistant:"
return prefix
else:
prefix += f"Assistant: {turn[1]}" +"\n"
return prefix
| 33.333333
| 69
| 0.500833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 294
| 0.245
|
497d558f6807d6cee34934135fc08d3e5e24fbf5
| 487
|
py
|
Python
|
server/apps/api/notice/migrations/0003_alter_event_priority.py
|
NikitaGrishchenko/csp-tender-hack-server
|
56055f51bf472f0f1e56b419a48d993cc91e0f3a
|
[
"MIT"
] | null | null | null |
server/apps/api/notice/migrations/0003_alter_event_priority.py
|
NikitaGrishchenko/csp-tender-hack-server
|
56055f51bf472f0f1e56b419a48d993cc91e0f3a
|
[
"MIT"
] | null | null | null |
server/apps/api/notice/migrations/0003_alter_event_priority.py
|
NikitaGrishchenko/csp-tender-hack-server
|
56055f51bf472f0f1e56b419a48d993cc91e0f3a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-27 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notice', '0002_auto_20211127_0236'),
]
operations = [
migrations.AlterField(
model_name='event',
name='priority',
field=models.IntegerField(choices=[(1, 'Низкий приоритет'), (2, 'Средний приоритет'), (3, 'Высокий приоритет')], verbose_name='Приоритет'),
),
]
| 25.631579
| 151
| 0.616016
| 450
| 0.828729
| 0
| 0
| 0
| 0
| 0
| 0
| 220
| 0.405157
|
497e1c5d29374050c770b786c91bc5c1ccabcd85
| 650
|
py
|
Python
|
gdpr_assist/app_settings.py
|
mserrano07/django-gdpr-assist
|
3c23d0aadadc676c128ef57aebc36570f3936ff1
|
[
"BSD-3-Clause"
] | null | null | null |
gdpr_assist/app_settings.py
|
mserrano07/django-gdpr-assist
|
3c23d0aadadc676c128ef57aebc36570f3936ff1
|
[
"BSD-3-Clause"
] | null | null | null |
gdpr_assist/app_settings.py
|
mserrano07/django-gdpr-assist
|
3c23d0aadadc676c128ef57aebc36570f3936ff1
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Settings
"""
from yaa_settings import AppSettings
class PrivacySettings(AppSettings):
# Name of the model attribute for a privacy definition
GDPR_PRIVACY_CLASS_NAME = "PrivacyMeta"
# Name of the model attribute for the privacy definition instance
GDPR_PRIVACY_INSTANCE_NAME = "_privacy_meta"
# Internal name for the GDPR log database
GDPR_LOG_DATABASE_NAME = "gdpr_log"
# Whether to write to log database during anonymisation.
GDPR_LOG_ON_ANONYMISE = True
# Disable anonymise_db command by default - we don't want people running it
# on production by accident
GDPR_CAN_ANONYMISE_DATABASE = False
| 28.26087
| 79
| 0.755385
| 593
| 0.912308
| 0
| 0
| 0
| 0
| 0
| 0
| 372
| 0.572308
|
497f0f54faebc451ce2dc9315e86227db65fd970
| 2,382
|
py
|
Python
|
config-tests/test_server_details.py
|
mozilla-services/kinto-integration-tests
|
ec5199f5e9c7452c78d8f6fb41dcaa02504f34f7
|
[
"Apache-2.0"
] | 2
|
2017-09-01T19:41:43.000Z
|
2018-11-08T14:42:00.000Z
|
config-tests/test_server_details.py
|
Kinto/kinto-integration-tests
|
ec5199f5e9c7452c78d8f6fb41dcaa02504f34f7
|
[
"Apache-2.0"
] | 89
|
2017-01-25T21:44:26.000Z
|
2021-01-01T08:39:07.000Z
|
config-tests/test_server_details.py
|
mozilla-services/kinto-integration-tests
|
ec5199f5e9c7452c78d8f6fb41dcaa02504f34f7
|
[
"Apache-2.0"
] | 6
|
2017-03-14T13:40:38.000Z
|
2020-04-03T15:32:57.000Z
|
import pytest
import requests
def aslist_cronly(value):
""" Split the input on lines if it's a valid string type"""
if isinstance(value, str):
value = filter(None, [x.strip() for x in value.splitlines()])
return list(value)
def aslist(value, flatten=True):
""" Return a list of strings, separating the input based on newlines
and, if flatten=True (the default), also split on spaces within
each line."""
values = aslist_cronly(value)
if not flatten:
return values
result = []
for value in values:
subvalues = value.split()
result.extend(subvalues)
return result
@pytest.fixture(scope="module")
def api_url(conf, env, request):
api_url = 'dist_api'
if 'settings' in request.node.keywords:
api_url = 'settings_api'
elif 'webextensions' in request.node.keywords:
api_url = 'webextensions_api'
return conf.get(env, api_url)
@pytest.mark.dist
@pytest.mark.settings
@pytest.mark.webextensions
def test_version(conf, env, api_url):
data = requests.get(api_url + '__version__').json()
fields = set(data.keys())
expected_fields = {"source", "commit", "version", "build"}
assert len(fields ^ expected_fields) == 0
@pytest.mark.dist
@pytest.mark.settings
@pytest.mark.webextensions
def test_heartbeat(conf, env, api_url):
res = requests.get(api_url + '__heartbeat__')
assert res.status_code == 200
data = res.json()
fields = set(data.keys())
expected_fields = set(aslist(conf.get(env, 'heartbeat_fields')))
assert len(fields ^ expected_fields) == 0
@pytest.mark.dist
@pytest.mark.settings
@pytest.mark.webextensions
def test_server_info(conf, env, api_url):
res = requests.get(api_url)
data = res.json()
fields = set(data.keys())
expected_fields = {
"url", "project_docs", "project_name", "capabilities",
"project_version", "settings", "http_api_version"
}
assert len(fields ^ expected_fields) == 0
@pytest.mark.dist
@pytest.mark.settings
@pytest.mark.webextensions
def test_contribute(conf, env, api_url):
res = requests.get(api_url + 'contribute.json')
data = res.json()
fields = set(data.keys())
expected_fields = {
"keywords", "participate", "repository",
"description", "urls", "name",
}
assert len(fields ^ expected_fields) == 0
| 25.891304
| 72
| 0.665407
| 0
| 0
| 0
| 0
| 1,717
| 0.720823
| 0
| 0
| 544
| 0.22838
|
49803c62b083c02f67f3cea8900cbba0f19179c1
| 635
|
py
|
Python
|
tests/db/test_factory.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | null | null | null |
tests/db/test_factory.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | null | null | null |
tests/db/test_factory.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import data_pipeline.db.factory as dbfactory
import data_pipeline.constants.const as const
from data_pipeline.db.exceptions import UnsupportedDbTypeError
@pytest.mark.parametrize("dbtype, expect_class", [
(const.ORACLE, "OracleDb"),
(const.MSSQL, "MssqlDb"),
(const.POSTGRES, "PostgresDb"),
(const.GREENPLUM, "GreenplumDb"),
(const.FILE, "FileDb"),
])
def test_build_oracledb(dbtype, expect_class):
db = dbfactory.build(dbtype)
assert type(db).__name__ == expect_class
def test_build_unsupported():
with pytest.raises(ImportError):
db = dbfactory.build("AnUnsupportedDatabase")
| 30.238095
| 62
| 0.738583
| 0
| 0
| 0
| 0
| 342
| 0.538583
| 0
| 0
| 97
| 0.152756
|
49806a87d676d3fa46db3e3b6f5f01048f4d408e
| 5,142
|
py
|
Python
|
etl/data_extraction/scrapers/sozialeinsatz.py
|
Betadinho/einander-helfen
|
272f11397d80ab5267f39a7b36734495f1c00b0c
|
[
"MIT"
] | 7
|
2020-04-23T20:16:11.000Z
|
2022-01-04T14:57:16.000Z
|
etl/data_extraction/scrapers/sozialeinsatz.py
|
Betadinho/einander-helfen
|
272f11397d80ab5267f39a7b36734495f1c00b0c
|
[
"MIT"
] | 361
|
2020-04-23T17:20:14.000Z
|
2022-03-02T11:29:45.000Z
|
etl/data_extraction/scrapers/sozialeinsatz.py
|
Betadinho/einander-helfen
|
272f11397d80ab5267f39a7b36734495f1c00b0c
|
[
"MIT"
] | 1
|
2021-11-29T06:02:52.000Z
|
2021-11-29T06:02:52.000Z
|
import math
import re
from data_extraction.scraper import Scraper
class SozialeinsatzScraper(Scraper):
"""Scrapes the website www.sozialeinsatz.de."""
base_url = 'https://www.sozialeinsatz.de'
debug = True
def parse(self, response, url):
"""Handles the soupified response of a detail page in the predefined way and returns it"""
self.logger.debug('parse()')
content = response.find('div', {'id': 'content'})
title = content.find('h2')
if title.text == 'Error 404':
return None
task = content.find('h2', string=re.compile(r'Stellenbeschreibung.*')).findNext('p')
organization = title.findNext('div', {'class': 'row'}).find('p')
contact = content.find('h2', string=re.compile(r'Ansprechpartner.*')).findNext('p')
details = content.find('h2', string=re.compile(r'Details.*')).findNext('p')
category_string = details.find('strong', string=re.compile(r'Aufgaben.*')).nextSibling
categories = [x.strip() for x in category_string.split(',')]
categories.append(title.find('acronym')['title'])
timing = details.find('strong', string=re.compile(r'Zeitraum.*')).nextSibling
location = None
location_p = content.find('h2', string=re.compile(r'Einsatzort.*')).findNext('p')
if location_p.a is not None and 'q=' in location_p.a['href']:
location = location_p.a['href'].split('q=')[1]
zipcode = None
if location is not None:
if len(re.findall(r'(\d{5})', location)) > 0:
zipcode = re.findall(r'(\d{5})', location)[0]
parsed_object = {
'title': title.text.strip(),
'categories': categories,
'location': location,
'task': task.decode_contents().strip(),
'target_group': None,
'prerequisites': None,
'language_skills': None,
'timing': timing.strip(),
'effort': None,
'opportunities': None,
'organization': organization.decode_contents().strip() if organization is not None else None,
'contact': contact.decode_contents().strip() if contact is not None else None,
'link': url or None,
'source': 'www.sozialeinsatz.de',
'geo_location': None,
}
parsed_object['post_struct'] = {
'title': parsed_object['title'],
'categories': parsed_object['categories'],
'location': {
'country': 'Deutschland',
'zipcode': zipcode,
'city': None,
'street': None,
},
'task': None,
'target_group': None,
'prerequisites': parsed_object['prerequisites'],
'language_skills': parsed_object['language_skills'],
'timing': parsed_object['timing'],
'effort': None,
'opportunities': None,
'organization': None,
'contact': None,
'link': parsed_object['link'],
'source': parsed_object['source'],
'geo_location': parsed_object['geo_location'],
}
return parsed_object
def add_urls(self):
"""Adds all URLs of detail pages, found on the search pages, for the crawl function to scrape"""
self.logger.debug('add_urls()')
import time
index = 1
index_max = None
search_page_url = f'{self.base_url}/stellenangebote/finden?Stellenangebot_page={index}'
next_page_url = search_page_url
while next_page_url:
response = self.soupify(next_page_url)
# Get tags of individual results
detail_a_tags = response.findAll('a', {'class': 'morelink'})
# Get maximum number of pages
if index_max is None:
summary_text = response.find('div', {'class': 'summary'}).text
entries = int(re.findall(r'(\d+).?$', summary_text)[0])
index_max = math.ceil(entries / 25.0)
self.logger.debug(f'Fetched {len(detail_a_tags)} URLs from {next_page_url} [{index}/{index_max}]')
self.update_fetching_progress(index, index_max)
# Iterate links and add, if not already found
for link_tag in detail_a_tags:
current_link = self.base_url + link_tag['href']
if current_link in self.urls:
self.logger.debug(f'func: add_urls, page_index: {index},'
f' search_page: {search_page_url}, '
f'duplicate_index: {current_link}, '
f'duplicate_index: {self.urls.index(current_link)}')
else:
self.urls.append(current_link)
# Get next result page
if index < index_max:
index += 1
next_page_url = f'{self.base_url}/stellenangebote/finden?Stellenangebot_page={index}'
else:
next_page_url = None
time.sleep(self.delay)
| 36.992806
| 110
| 0.556593
| 5,072
| 0.986387
| 0
| 0
| 0
| 0
| 0
| 0
| 1,595
| 0.310191
|
4980cf418b1fec3383b451b2c9e98a8148676569
| 1,671
|
py
|
Python
|
fitbenchmarking/parsing/base_parser.py
|
arm61/fitbenchmarking
|
c745c684e3ca4895a666eb863426746d8f06636c
|
[
"BSD-3-Clause"
] | null | null | null |
fitbenchmarking/parsing/base_parser.py
|
arm61/fitbenchmarking
|
c745c684e3ca4895a666eb863426746d8f06636c
|
[
"BSD-3-Clause"
] | null | null | null |
fitbenchmarking/parsing/base_parser.py
|
arm61/fitbenchmarking
|
c745c684e3ca4895a666eb863426746d8f06636c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Implements the base Parser as a Context Manager.
"""
from abc import ABCMeta, abstractmethod
class Parser:
"""
Base abstract class for a parser.
Further parsers should inherit from this and override the abstract parse()
method.
"""
__metaclass__ = ABCMeta
def __init__(self, filename):
"""
Store the filename for use by enter.
:param filename: The path to the file to be parsed
:type filename: string
"""
self._filename = filename
self.file = None
self.fitting_problem = None
def __enter__(self):
"""
Called when used as a context manager.
Opens the file ready for parsing.
"""
self.file = open(self._filename, 'r')
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Called when used as a context manager.
Closes the file.
:param exc_type: Used if an exception occurs. Contains the
exception type.
:type exc_type: type
:param exc_value: Used if an exception occurs. Contains the exception
value.
:type exc_value: Exception
:param traceback: Used if an exception occurs. Contains the exception
traceback.
:type traceback: traceback
"""
try:
self.file.close()
except AttributeError:
pass
@abstractmethod
def parse(self):
"""
Parse the file into a FittingProblem.
:returns: The parsed problem
:rtype: FittingProblem
"""
raise NotImplementedError
| 26.109375
| 78
| 0.581089
| 1,570
| 0.939557
| 0
| 0
| 209
| 0.125075
| 0
| 0
| 1,101
| 0.658887
|
498189a8b987526464b2fd92c5dba221e497e78b
| 10,223
|
py
|
Python
|
src/offline/news/item-feature-update-batch/src/item-feature-update-batch.py
|
shenshaoyong/recommender-system-dev-workshop-code
|
ce422627181472ad513f473b65bf42410c46304a
|
[
"Apache-2.0"
] | 1
|
2021-07-14T09:15:40.000Z
|
2021-07-14T09:15:40.000Z
|
src/offline/news/item-feature-update-batch/src/item-feature-update-batch.py
|
shenshaoyong/recommender-system-dev-workshop-code
|
ce422627181472ad513f473b65bf42410c46304a
|
[
"Apache-2.0"
] | null | null | null |
src/offline/news/item-feature-update-batch/src/item-feature-update-batch.py
|
shenshaoyong/recommender-system-dev-workshop-code
|
ce422627181472ad513f473b65bf42410c46304a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
# from tqdm import tqdm
import argparse
import glob
import os
import pickle
import boto3
import numpy as np
import pandas as pd
import encoding
import kg
# tqdm.pandas()
# pandarallel.initialize(progress_bar=True)
# bucket = os.environ.get("BUCKET_NAME", " ")
# raw_data_folder = os.environ.get("RAW_DATA", " ")
# logger = logging.getLogger()
# logger.setLevel(logging.INFO)
# tqdm_notebook().pandas()
########################################
# 从s3同步数据
########################################
def sync_s3(file_name_list, s3_folder, local_folder):
for f in file_name_list:
print("file preparation: download src key {} to dst key {}".format(os.path.join(
s3_folder, f), os.path.join(local_folder, f)))
s3client.download_file(bucket, os.path.join(
s3_folder, f), os.path.join(local_folder, f))
def write_to_s3(filename, bucket, key):
print("upload s3://{}/{}".format(bucket, key))
with open(filename, 'rb') as f: # Read in binary mode
# return s3client.upload_fileobj(f, bucket, key)
return s3client.put_object(
ACL='bucket-owner-full-control',
Bucket=bucket,
Key=key,
Body=f
)
def write_str_to_s3(content, bucket, key):
print("write s3://{}/{}, content={}".format(bucket, key, content))
s3client.put_object(Body=str(content).encode(
"utf8"), Bucket=bucket, Key=key, ACL='bucket-owner-full-control')
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', type=str)
parser.add_argument('--prefix', type=str)
parser.add_argument("--region", type=str, help="aws region")
args, _ = parser.parse_known_args()
print("args:", args)
region = None
if args.region:
region = args.region
print("region:", args.region)
boto3.setup_default_session(region_name=args.region)
bucket = args.bucket
prefix = args.prefix
print("bucket={}".format(bucket))
print("prefix='{}'".format(prefix))
s3client = boto3.client('s3')
out_s3_path = "s3://{}/{}/feature/content/inverted-list".format(bucket, prefix)
local_folder = 'info'
if not os.path.exists(local_folder):
os.makedirs(local_folder)
file_name_list = ['complete_dkn_word_embedding.npy']
s3_folder = '{}/model/rank/content/dkn_embedding_latest/'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
file_name_list = ['item.csv']
s3_folder = '{}/system/item-data'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
file_name_list = ['entities_dbpedia.dict', 'relations_dbpedia.dict',
'kg_dbpedia.txt', 'entities_dbpedia_train.dict',
'relations_dbpedia_train.dict', 'kg_dbpedia_train.txt',
]
s3_folder = '{}/model/meta_files/'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
df_filter_item = pd.read_csv('info/item.csv', sep='_!_',
names=['news_id', 'type_code', 'type', 'title', 'keywords', 'popularity', 'new'])
complete_dkn_word_embed = np.load("info/complete_dkn_word_embedding.npy")
# prepare model for batch process
meta_file_prefix = "{}/model/meta_files".format(prefix)
os.environ['GRAPH_BUCKET'] = bucket
os.environ['KG_DBPEDIA_KEY'] = '{}/kg_dbpedia.txt'.format(meta_file_prefix)
os.environ['KG_ENTITY_KEY'] = '{}/entities_dbpedia.dict'.format(
meta_file_prefix)
os.environ['KG_RELATION_KEY'] = '{}/relations_dbpedia.dict'.format(
meta_file_prefix)
os.environ['KG_DBPEDIA_TRAIN_KEY'] = '{}/kg_dbpedia_train.txt'.format(
meta_file_prefix)
os.environ['KG_ENTITY_TRAIN_KEY'] = '{}/entities_dbpedia_train.dict'.format(
meta_file_prefix)
os.environ['KG_RELATION_TRAIN_KEY'] = '{}/relations_dbpedia_train.dict'.format(
meta_file_prefix)
os.environ['KG_ENTITY_INDUSTRY_KEY'] = '{}/entity_industry.txt'.format(
meta_file_prefix)
os.environ['KG_VOCAB_KEY'] = '{}/vocab.json'.format(meta_file_prefix)
os.environ['DATA_INPUT_KEY'] = ''
os.environ['TRAIN_OUTPUT_KEY'] = '{}/model/rank/content/dkn_embedding_latest/'.format(
prefix)
kg_path = os.environ['GRAPH_BUCKET']
dbpedia_key = os.environ['KG_DBPEDIA_KEY']
entity_key = os.environ['KG_ENTITY_KEY']
relation_key = os.environ['KG_RELATION_KEY']
dbpedia_train_key = os.environ['KG_DBPEDIA_TRAIN_KEY']
entity_train_key = os.environ['KG_ENTITY_TRAIN_KEY']
relation_train_key = os.environ['KG_RELATION_TRAIN_KEY']
entity_industry_key = os.environ['KG_ENTITY_INDUSTRY_KEY']
vocab_key = os.environ['KG_VOCAB_KEY']
data_input_key = os.environ['DATA_INPUT_KEY']
train_output_key = os.environ['TRAIN_OUTPUT_KEY']
env = {
'GRAPH_BUCKET': kg_path,
'KG_DBPEDIA_KEY': dbpedia_key,
'KG_ENTITY_KEY': entity_key,
'KG_RELATION_KEY': relation_key,
'KG_DBPEDIA_TRAIN_KEY': dbpedia_train_key,
'KG_ENTITY_TRAIN_KEY': entity_train_key,
'KG_RELATION_TRAIN_KEY': relation_train_key,
'KG_ENTITY_INDUSTRY_KEY': entity_industry_key,
'KG_VOCAB_KEY': vocab_key,
'DATA_INPUT_KEY': data_input_key,
'TRAIN_OUTPUT_KEY': train_output_key
}
print("Kg env: {}".format(env))
graph = kg.Kg(env, region=region) # Where we keep the model when it's loaded
model = encoding.encoding(graph, env, region=region)
news_id_news_feature_dict = {}
map_words = {}
map_entities = {}
def analyze_map(raw_idx, map_dict, filter_idx):
for idx in raw_idx:
if idx == 0:
filter_idx.append(0)
else:
if idx not in map_dict.keys():
map_dict[idx] = len(map_dict) + 1
filter_idx.append(map_dict[idx])
for row in df_filter_item.iterrows():
item_row = row[1]
program_id = str(item_row['news_id'])
title_result = model[item_row['title']]
current_words = title_result[0]
current_entities = title_result[1]
filter_words = []
filter_entities = []
analyze_map(current_words, map_words, filter_words)
analyze_map(current_entities, map_entities, filter_entities)
# filter entities & filter words
program_dict = {
'entities': filter_entities,
'words': filter_words
}
news_id_news_feature_dict[program_id] = program_dict
# clean data for graph train
# path = '/home/ec2-user/workplace/recommender-system-solution/src/offline/news/item-feature-update-batch/aws-gcr-rs-sol-demo-ap-southeast-1-522244679887/sample-data/model/meta_files'
path = "info"
entities_dbpedia = os.path.join(path, 'entities_dbpedia.dict')
relations_dbpedia = os.path.join(path, 'relations_dbpedia.dict')
kg_dbpedia = os.path.join(path, 'kg_dbpedia.txt')
entities_dbpedia_train_path = os.path.join(path, 'entities_dbpedia_train.dict')
relations_dbpedia_train_path = os.path.join(
path, 'relations_dbpedia_train.dict')
kg_dbpedia_train_path = os.path.join(path, 'kg_dbpedia_train.txt')
entities_dbpedia_f = pd.read_csv(
entities_dbpedia, header=None, names=['e', 'e_name'])
relations_dbpedia_f = pd.read_csv(
relations_dbpedia, header=None, names=['e', 'e_name'])
kg_dbpedia_f = pd.read_csv(kg_dbpedia, delimiter='\t',
header=None, names=['h', 'r', 't'])
# map_entities -> train_entites
# constrcut from entites:
entities_dbpedia_slim = {}
relations_dbpedia_slim = {}
entities_dbpedia_train = {}
relations_dbpedia_train = {}
entities_dbpedia_train[0] = '0'
relations_dbpedia_train[0] = '0'
new_list_kg = []
def analyze_map_hrt(idx, map_dict, raw_content, train_dict):
# 原始实体从0开始,所以需要归位进行寻找
idx_test = idx - 1
if idx_test not in map_dict.keys():
map_dict[idx_test] = len(map_dict) + 1
filter_content = raw_content[raw_content.e == idx_test]
train_dict[len(map_dict)] = filter_content['e_name'].values[0]
return map_dict[idx_test]
for raw_entity, new_idx in map_entities.items():
entity_id = raw_entity
map_head_id = analyze_map_hrt(
entity_id, entities_dbpedia_slim, entities_dbpedia_f, entities_dbpedia_train)
kg_found_pd = kg_dbpedia_f[kg_dbpedia_f.h == entity_id]
# print(kg_found_pd)
for found_row in kg_found_pd.iterrows():
relation_id = found_row[1]['r']
tail_id = found_row[1]['t']
map_relation_id = analyze_map_hrt(relation_id, relations_dbpedia_slim, relations_dbpedia_f,
relations_dbpedia_train)
map_tail_id = analyze_map_hrt(
tail_id, entities_dbpedia_slim, entities_dbpedia_f, entities_dbpedia_train)
# create new kg : h-r-t
kg_row = {}
kg_row['h'] = map_head_id
kg_row['r'] = map_relation_id
kg_row['t'] = map_tail_id
new_list_kg.append(kg_row)
kg_dbpedia_slim = pd.DataFrame(new_list_kg)
kg_dbpedia_slim.to_csv(kg_dbpedia_train_path, sep='\t',
header=False, index=False)
with open(entities_dbpedia_train_path, 'w') as f:
for key in entities_dbpedia_train.keys():
f.write("%s,%s\n" % (key, entities_dbpedia_train[key]))
with open(relations_dbpedia_train_path, 'w') as f:
for key in relations_dbpedia_train.keys():
f.write("%s,%s\n" % (key, relations_dbpedia_train[key]))
# slim version
list_word_embedding = []
list_word_embedding.append([0] * 300)
for raw_key, map_v in map_words.items():
list_word_embedding.append(complete_dkn_word_embed[raw_key])
file_name = 'info/dkn_word_embedding.npy'
with open(file_name, "wb") as f:
np.save(f, np.array(list_word_embedding))
write_to_s3(file_name,
bucket,
'{}/model/rank/content/dkn_embedding_latest/dkn_word_embedding.npy'.format(prefix))
write_to_s3(kg_dbpedia_train_path,
bucket,
'{}/kg_dbpedia_train.txt'.format(meta_file_prefix))
write_to_s3(entities_dbpedia_train_path,
bucket,
'{}/entities_dbpedia_train.dict'.format(meta_file_prefix))
write_to_s3(relations_dbpedia_train_path,
bucket,
'{}/relations_dbpedia_train.dict'.format(meta_file_prefix))
file_name = 'info/news_id_news_feature_dict.pickle'
out_file = open(file_name, 'wb')
pickle.dump(news_id_news_feature_dict, out_file)
out_file.close()
# s3_url = S3Uploader.upload(file_name, out_s3_path)
s3_url = write_to_s3(file_name, bucket,
'{}/feature/content/inverted-list/news_id_news_feature_dict.pickle'.format(prefix))
| 35.010274
| 183
| 0.702142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,125
| 0.304314
|
498246054897849d72b07dc078d8b150091d7c85
| 5,054
|
py
|
Python
|
wirepas_backend_client/tools/utils.py
|
bencorrado/backend-client
|
628c9999f8d98b0c1e56d87bfd2dbf1ca1ea138c
|
[
"Apache-2.0"
] | null | null | null |
wirepas_backend_client/tools/utils.py
|
bencorrado/backend-client
|
628c9999f8d98b0c1e56d87bfd2dbf1ca1ea138c
|
[
"Apache-2.0"
] | null | null | null |
wirepas_backend_client/tools/utils.py
|
bencorrado/backend-client
|
628c9999f8d98b0c1e56d87bfd2dbf1ca1ea138c
|
[
"Apache-2.0"
] | 1
|
2021-03-12T17:20:56.000Z
|
2021-03-12T17:20:56.000Z
|
"""
Utils
=======
Contains multipurpose utilities for serializing objects and obtaining
arguments from the command line.
.. Copyright:
Copyright 2019 Wirepas Ltd under Apache License, Version 2.0.
See file LICENSE for full license details.
"""
import binascii
import datetime
import json
import threading
from google.protobuf import json_format
def deferred_thread(fn):
"""
Decorator to handle a request on its own Thread
to avoid blocking the calling Thread on I/O.
It creates a new Thread but it shouldn't impact the performances
as requests are not supposed to be really frequent (few per seconds)
"""
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class JsonSerializer(json.JSONEncoder):
"""
JsonSerializer
Extends the JSONEncoder base class with object handlers
for bytearrya, datetime and proto.
"""
proto_as_json = False
sort_keys = True
indent = 4
def __init__(self, proto_as_json: bool = False, **kwargs):
super(JsonSerializer, self).__init__(**kwargs)
self.proto_as_json = proto_as_json
if "indent" in kwargs:
self.indent = kwargs["indent"]
if "sort_keys" in kwargs:
self.sort_keys = kwargs["sort_keys"]
def default(self, obj) -> str:
"""
Lookup table for serializing objects
Pylint complains about the method signature, but this is the
recommended way of implementing a custom JSON serialization as
seen in:
https://docs.python.org/3/library/json.html#json.JSONEncoder
"""
# pylint: disable=locally-disabled, method-hidden, arguments-differ
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, (bytearray, bytes)):
return binascii.hexlify(obj)
if isinstance(obj, set):
return str(obj)
if hasattr(obj, "DESCRIPTOR"):
if self.proto_as_json is True:
pstr = json_format.MessageToJson(
obj, including_default_value_fields=True
)
else:
pstr = json_format.MessageToDict(
obj, including_default_value_fields=True
)
return pstr
return json.JSONEncoder.default(self, obj)
def serialize(self, obj, flatten=False):
""" returns a json representation of the object """
if flatten:
temp = dict()
for key, value in sorted(obj.items()):
if isinstance(value, dict):
for child_key, child_value in value.items():
temp[f"{key}.{child_key}"] = child_value
obj = temp
jobj = json.dumps(
obj,
cls=JsonSerializer,
sort_keys=self.sort_keys,
indent=self.indent,
)
return jobj
def flatten(input_dict, separator="/", prefix=""):
"""
Flattens a dictionary with nested dictionaries and lists
into a single dictionary.
The key compression is done using the chosen separator.
"""
output_dict = {}
def step(member, parent_key=""):
if isinstance(member, dict):
for key, value in member.items():
step(
value,
f"{parent_key}{separator}{key}"
if parent_key
else str(key),
)
elif isinstance(member, list):
for index, sublist in enumerate(member, start=0):
step(
sublist,
f"{parent_key}{separator}{index}"
if parent_key
else str(index),
)
else:
output_dict[f"{parent_key}"] = member
step(input_dict)
return output_dict
class Signal:
"""Wrapper around and exit signal"""
def __init__(self, signal=None):
super(Signal, self).__init__()
if signal is None:
signal = False
self.signal = signal
def is_set(self) -> bool:
""" Returns the state of the inner event or boolean """
try:
ret = self.signal.is_set()
except AttributeError:
ret = self.signal
return ret
def set(self) -> bool:
""" Sets the event or inner boolean """
try:
ret = self.signal.set()
except AttributeError:
self.signal = True
ret = True
return ret
def chunker(seq, size) -> list():
"""
Splits a sequence in multiple parts
Args:
seq ([]) : an array
size (int) : length of each array part
Returns:
array ([]) : a chunk of SEQ with given SIZE
"""
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
| 25.917949
| 75
| 0.565295
| 2,910
| 0.575782
| 0
| 0
| 0
| 0
| 0
| 0
| 1,749
| 0.346063
|
4984d7b37bc39c03cdb2148c437346639993c3a9
| 25,733
|
py
|
Python
|
pysph/base/tree/point_tree.py
|
nauaneed/pysph-nav
|
66589021f453f25b77549f6f102b6afcc89e338d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-15T11:48:17.000Z
|
2022-03-15T11:48:17.000Z
|
pysph/base/tree/point_tree.py
|
nauaneed/pysph-nav
|
66589021f453f25b77549f6f102b6afcc89e338d
|
[
"BSD-3-Clause"
] | null | null | null |
pysph/base/tree/point_tree.py
|
nauaneed/pysph-nav
|
66589021f453f25b77549f6f102b6afcc89e338d
|
[
"BSD-3-Clause"
] | null | null | null |
from pysph.base.tree.tree import Tree
from pysph.base.tree.helpers import ParticleArrayWrapper, get_helper, \
make_vec_dict, ctype_to_dtype, get_vector_dtype
from compyle.opencl import profile_kernel, DeviceWGSException, get_queue, \
named_profile, get_context
from compyle.array import Array
from pytools import memoize
import sys
import numpy as np
import pyopencl as cl
from pyopencl.scan import GenericScanKernel
import pyopencl.tools
from mako.template import Template
class IncompatibleTreesException(Exception):
pass
@named_profile('neighbor_count_prefix_sum', backend='opencl')
@memoize
def _get_neighbor_count_prefix_sum_kernel(ctx):
return GenericScanKernel(ctx, np.int32,
arguments="__global int *ary",
input_expr="ary[i]",
scan_expr="a+b", neutral="0",
output_statement="ary[i] = prev_item")
@memoize
def _get_macros_preamble(c_type, sorted, dim):
result = Template("""
#define IN_BOUNDS(X, MIN, MAX) ((X >= MIN) && (X < MAX))
#define NORM2(X, Y, Z) ((X)*(X) + (Y)*(Y) + (Z)*(Z))
#define NORM2_2D(X, Y) ((X)*(X) + (Y)*(Y))
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#define AVG(X, Y) (((X) + (Y)) / 2)
#define ABS(X) ((X) > 0 ? (X) : -(X))
#define SQR(X) ((X) * (X))
typedef struct {
union {
float s0;
float x;
};
} float1;
typedef struct {
union {
double s0;
double x;
};
} double1;
% if sorted:
#define PID(idx) (idx)
% else:
#define PID(idx) (pids[idx])
% endif
char contains(${data_t}${dim} node_xmin1, ${data_t}${dim} node_xmax1,
${data_t}${dim} node_xmin2, ${data_t}${dim} node_xmax2)
{
// Check if node n1 contains node n2
char res = 1;
% for i in range(dim):
res = res && (node_xmin1.s${i} <= node_xmin2.s${i}) &&
(node_xmax1.s${i} >= node_xmax2.s${i});
% endfor
return res;
}
char contains_search(${data_t}${dim} node_xmin1,
${data_t}${dim} node_xmax1,
${data_t} node_hmax1,
${data_t}${dim} node_xmin2, ${data_t}${dim} node_xmax2)
{
// Check if node n1 contains node n2 with n1 having
// its search radius extension
${data_t} h = node_hmax1;
char res = 1;
%for i in range(dim):
res = res & (node_xmin1.s${i} - h <= node_xmin2.s${i}) &
(node_xmax1.s${i} + h >= node_xmax2.s${i});
%endfor
return res;
}
char intersects(${data_t}${dim} node_xmin1, ${data_t}${dim} node_xmax1,
${data_t} node_hmax1,
${data_t}${dim} node_xmin2, ${data_t}${dim} node_xmax2,
${data_t} node_hmax2) {
// Check if node n1 'intersects' node n2
${data_t} cdist;
${data_t} w1, w2, wavg = 0;
char res = 1;
${data_t} h = MAX(node_hmax1, node_hmax2);
% for i in range(dim):
cdist = fabs((node_xmin1.s${i} + node_xmax1.s${i}) / 2 -
(node_xmin2.s${i} + node_xmax2.s${i}) / 2);
w1 = fabs(node_xmin1.s${i} - node_xmax1.s${i});
w2 = fabs(node_xmin2.s${i} - node_xmax2.s${i});
wavg = AVG(w1, w2);
res &= (cdist - wavg <= h);
% endfor
return res;
}
""").render(data_t=c_type, sorted=sorted,
dim=dim)
return result
@memoize
def _get_node_bound_kernel_parameters(dim, data_t, xvars):
result = {}
result['setup'] = Template(
r"""
${data_t} xmin[${dim}] = {${', '.join(['INFINITY'] * dim)}};
${data_t} xmax[${dim}] = {${', '.join(['-INFINITY'] * dim)}};
${data_t} hmax = 0;
""").render(dim=dim, data_t=data_t)
result['args'] = Template(
r"""int *pids,
% for v in xvars:
${data_t} *${v},
% endfor
${data_t} *h,
${data_t} radius_scale,
${data_t}${dim} *node_xmin,
${data_t}${dim} *node_xmax,
${data_t} *node_hmax
""").render(dim=dim,
data_t=data_t,
xvars=xvars)
result['leaf_operation'] = Template(
r"""
for (int j=pbound.s0; j < pbound.s1; j++) {
int pid = PID(j);
% for d in range(dim):
xmin[${d}] = fmin(xmin[${d}], ${xvars[d]}[pid]);
xmax[${d}] = fmax(xmax[${d}], ${xvars[d]}[pid]);
% endfor
hmax = fmax(h[pid] * radius_scale, hmax);
}
""").render(dim=dim, xvars=xvars)
result['node_operation'] = Template(
r"""
% for i in range(2 ** dim):
% for d in range(dim):
xmin[${d}] = fmin(
xmin[${d}], node_xmin[child_offset + ${i}].s${d}
);
xmax[${d}] = fmax(
xmax[${d}], node_xmax[child_offset + ${i}].s${d}
);
% endfor
hmax = fmax(hmax, node_hmax[child_offset + ${i}]);
% endfor
""").render(dim=dim)
result['output_expr'] = Template(
"""
% for d in range(dim):
node_xmin[node_idx].s${d} = xmin[${d}];
node_xmax[node_idx].s${d} = xmax[${d}];
% endfor
node_hmax[node_idx] = hmax;
""").render(dim=dim)
return result
@memoize
def _get_leaf_neighbor_kernel_parameters(data_t, dim, args, setup, operation,
output_expr):
result = {
'setup': Template(r"""
${data_t}${dim} node_xmin1;
${data_t}${dim} node_xmax1;
${data_t} node_hmax1;
${data_t}${dim} node_xmin2;
${data_t}${dim} node_xmax2;
${data_t} node_hmax2;
node_xmin1 = node_xmin_dst[cid_dst];
node_xmax1 = node_xmax_dst[cid_dst];
node_hmax1 = node_hmax_dst[cid_dst];
%(setup)s;
""" % dict(setup=setup)).render(
data_t=data_t, dim=dim),
'node_operation': Template("""
node_xmin2 = node_xmin_src[cid_src];
node_xmax2 = node_xmax_src[cid_src];
node_hmax2 = node_hmax_src[cid_src];
if (!intersects(node_xmin1, node_xmax1, node_hmax1,
node_xmin2, node_xmax2, node_hmax2) &&
!contains(node_xmin2, node_xmax2, node_xmin1, node_xmax1)) {
flag = 0;
break;
}
""").render(data_t=data_t),
'leaf_operation': Template("""
node_xmin2 = node_xmin_src[cid_src];
node_xmax2 = node_xmax_src[cid_src];
node_hmax2 = node_hmax_src[cid_src];
if (intersects(node_xmin1, node_xmax1, node_hmax1,
node_xmin2, node_xmax2, node_hmax2) ||
contains_search(node_xmin1, node_xmax1, node_hmax1,
node_xmin2, node_xmax2)) {
%(operation)s;
}
""" % dict(operation=operation)).render(),
'output_expr': output_expr,
'args': Template("""
${data_t}${dim} *node_xmin_src, ${data_t}${dim} *node_xmax_src,
${data_t} *node_hmax_src,
${data_t}${dim} *node_xmin_dst, ${data_t}${dim} *node_xmax_dst,
${data_t} *node_hmax_dst,
""" + args).render(data_t=data_t, dim=dim)
}
return result
# Support for 1D
def register_custom_pyopencl_ctypes():
cl.tools.get_or_register_dtype('float1', np.dtype([('s0', np.float32)]))
cl.tools.get_or_register_dtype('double1', np.dtype([('s0', np.float64)]))
register_custom_pyopencl_ctypes()
class PointTree(Tree):
def __init__(self, pa, dim=2, leaf_size=32, radius_scale=2.0,
use_double=False, c_type='float'):
super(PointTree, self).__init__(pa.get_number_of_particles(), 2 ** dim,
leaf_size)
assert (1 <= dim <= 3)
self.max_depth = None
self.dim = dim
self.powdim = 2 ** self.dim
self.xvars = ('x', 'y', 'z')[:dim]
self.c_type = c_type
self.c_type_src = 'double' if use_double else 'float'
if use_double and c_type == 'float':
# Extend the search radius a little to account for rounding errors
radius_scale = radius_scale * (1 + 2e-7)
# y and z coordinates need to be present for 1D and z for 2D
# This is because the NNPS implementation below assumes them to be
# just set to 0.
self.pa = ParticleArrayWrapper(pa, self.c_type_src,
self.c_type, ('x', 'y', 'z', 'h'))
self.radius_scale = radius_scale
self.use_double = use_double
self.helper = get_helper('tree/point_tree.mako', self.c_type)
self.xmin = None
self.xmax = None
self.hmin = None
self.make_vec = make_vec_dict[c_type][self.dim]
self.ctx = get_context()
def set_index_function_info(self):
self.index_function_args = ["sfc"]
self.index_function_arg_ctypes = ["ulong"]
self.index_function_arg_dtypes = [np.uint64]
self.index_function_consts = ['mask', 'rshift']
self.index_function_const_ctypes = ['ulong', 'char']
self.index_code = "((sfc[i] & mask) >> rshift)"
def _calc_cell_size_and_depth(self):
self.cell_size = self.hmin * self.radius_scale * (1. + 1e-3)
# Logic from gpu_domain_manager.py
if self.cell_size < 1e-6:
self.cell_size = 1
# This lets the tree grow up to log2(128) = 7 layers beyond what it
# could have previously. Pretty arbitrary.
self.cell_size /= 128
max_width = max((self.xmax[i] - self.xmin[i]) for i in range(self.dim))
self.max_depth = int(np.ceil(np.log2(max_width / self.cell_size))) + 1
def _bin(self):
dtype = ctype_to_dtype(self.c_type)
fill_particle_data = self.helper.get_kernel("fill_particle_data",
dim=self.dim,
xvars=self.xvars)
pa_gpu = self.pa.gpu
args = [getattr(pa_gpu, v).dev for v in self.xvars]
args += [dtype(self.cell_size),
self.make_vec(*[self.xmin[i] for i in range(self.dim)]),
self.sfc.dev, self.pids.dev]
fill_particle_data(*args)
def get_index_constants(self, depth):
rshift = np.uint8(self.dim * (self.max_depth - depth - 1))
mask = np.uint64((2 ** self.dim - 1) << rshift)
return mask, rshift
def _adjust_domain_width(self):
# Convert width of domain to a power of 2 multiple of cell size
# (Optimal width for cells)
# Note that this makes sure the width in _all_ dimensions is the
# same. We want our nodes to be cubes ideally.
cell_size = self.hmin * self.radius_scale * (1. + 1e-5)
max_width = np.max(self.xmax - self.xmin)
new_width = cell_size * 2.0 ** int(
np.ceil(np.log2(max_width / cell_size)))
diff = (new_width - (self.xmax - self.xmin)) / 2
self.xmin -= diff
self.xmax += diff
def setup_build(self, xmin, xmax, hmin):
self._setup_build()
self.pa.sync()
self.xmin = np.array(xmin)
self.xmax = np.array(xmax)
self.hmin = hmin
self._adjust_domain_width()
self._calc_cell_size_and_depth()
self._bin()
def build(self, fixed_depth=None):
self._build(self.max_depth if fixed_depth is None else fixed_depth)
self._get_unique_cids_and_count()
def refresh(self, xmin, xmax, hmin, fixed_depth=None):
self.setup_build(xmin, xmax, hmin)
self.build(fixed_depth)
def _sort(self):
"""Set tree as being sorted
The particle array needs to be aligned by the caller!
"""
if not self.sorted:
self.pa.sync()
self.sorted = 1
###########################################################################
# General algorithms
###########################################################################
def set_node_bounds(self):
vector_data_t = get_vector_dtype(self.c_type, self.dim)
dtype = ctype_to_dtype(self.c_type)
self.node_xmin = self.allocate_node_prop(vector_data_t)
self.node_xmax = self.allocate_node_prop(vector_data_t)
self.node_hmax = self.allocate_node_prop(dtype)
params = _get_node_bound_kernel_parameters(self.dim, self.c_type,
self.xvars)
set_node_bounds = self.tree_bottom_up(
params['args'], params['setup'], params['leaf_operation'],
params['node_operation'], params['output_expr'],
preamble=_get_macros_preamble(self.c_type, self.sorted, self.dim)
)
set_node_bounds = profile_kernel(set_node_bounds, 'set_node_bounds',
backend='opencl')
pa_gpu = self.pa.gpu
dtype = ctype_to_dtype(self.c_type)
args = [self, self.pids.dev]
args += [getattr(pa_gpu, v).dev for v in self.xvars]
args += [pa_gpu.h.dev,
dtype(self.radius_scale),
self.node_xmin.dev, self.node_xmax.dev,
self.node_hmax.dev]
set_node_bounds(*args)
###########################################################################
# Nearest Neighbor Particle Search (NNPS)
###########################################################################
def _leaf_neighbor_operation(self, tree_src, args, setup, operation,
output_expr):
# Template for finding neighboring cids of a cell.
params = _get_leaf_neighbor_kernel_parameters(self.c_type, self.dim,
args,
setup, operation,
output_expr)
kernel = tree_src.leaf_tree_traverse(
params['args'], params['setup'], params['node_operation'],
params['leaf_operation'], params['output_expr'],
preamble=_get_macros_preamble(self.c_type, self.sorted, self.dim)
)
def callable(*args):
return kernel(tree_src, self,
tree_src.node_xmin.dev,
tree_src.node_xmax.dev,
tree_src.node_hmax.dev,
self.node_xmin.dev, self.node_xmax.dev,
self.node_hmax.dev,
*args)
return callable
def find_neighbor_cids(self, tree_src):
neighbor_cid_count = Array(np.uint32, n=self.unique_cid_count + 1,
backend='opencl')
find_neighbor_cid_counts = self._leaf_neighbor_operation(
tree_src,
args="uint2 *pbounds, int *cnt",
setup="int count=0",
operation="""
if (pbounds[cid_src].s0 < pbounds[cid_src].s1)
count++;
""",
output_expr="cnt[i] = count;"
)
find_neighbor_cid_counts = profile_kernel(
find_neighbor_cid_counts, 'find_neighbor_cid_count',
backend='opencl'
)
find_neighbor_cid_counts(tree_src.pbounds.dev,
neighbor_cid_count.dev)
neighbor_psum = _get_neighbor_count_prefix_sum_kernel(self.ctx)
neighbor_psum(neighbor_cid_count.dev)
total_neighbors = int(neighbor_cid_count.dev[-1].get())
neighbor_cids = Array(np.uint32, n=total_neighbors,
backend='opencl')
find_neighbor_cids = self._leaf_neighbor_operation(
tree_src,
args="uint2 *pbounds, int *cnt, int *neighbor_cids",
setup="int offset=cnt[i];",
operation="""
if (pbounds[cid_src].s0 < pbounds[cid_src].s1)
neighbor_cids[offset++] = cid_src;
""",
output_expr=""
)
find_neighbor_cids = profile_kernel(
find_neighbor_cids, 'find_neighbor_cids', backend='opencl')
find_neighbor_cids(tree_src.pbounds.dev,
neighbor_cid_count.dev, neighbor_cids.dev)
return neighbor_cid_count, neighbor_cids
# TODO?: 1D and 2D NNPS not properly supported here.
# Just assuming the other spatial coordinates (y and z in case of 1D,
# and z in case of 2D) are set to 0.
def find_neighbor_lengths_elementwise(self, neighbor_cid_count,
neighbor_cids, tree_src,
neighbor_count):
self.check_nnps_compatibility(tree_src)
pa_gpu_dst = self.pa.gpu
pa_gpu_src = tree_src.pa.gpu
dtype = ctype_to_dtype(self.c_type)
find_neighbor_counts = self.helper.get_kernel(
'find_neighbor_counts_elementwise', sorted=self.sorted
)
find_neighbor_counts(self.unique_cids_map.dev, tree_src.pids.dev,
self.pids.dev,
self.cids.dev,
tree_src.pbounds.dev, self.pbounds.dev,
pa_gpu_src.x.dev, pa_gpu_src.y.dev,
pa_gpu_src.z.dev,
pa_gpu_src.h.dev,
pa_gpu_dst.x.dev, pa_gpu_dst.y.dev,
pa_gpu_dst.z.dev,
pa_gpu_dst.h.dev,
dtype(self.radius_scale),
neighbor_cid_count.dev,
neighbor_cids.dev,
neighbor_count.dev)
def find_neighbors_elementwise(self, neighbor_cid_count, neighbor_cids,
tree_src, start_indices, neighbors):
self.check_nnps_compatibility(tree_src)
pa_gpu_dst = self.pa.gpu
pa_gpu_src = tree_src.pa.gpu
dtype = ctype_to_dtype(self.c_type)
find_neighbors = self.helper.get_kernel(
'find_neighbors_elementwise', sorted=self.sorted)
find_neighbors(self.unique_cids_map.dev, tree_src.pids.dev,
self.pids.dev,
self.cids.dev,
tree_src.pbounds.dev, self.pbounds.dev,
pa_gpu_src.x.dev, pa_gpu_src.y.dev, pa_gpu_src.z.dev,
pa_gpu_src.h.dev,
pa_gpu_dst.x.dev, pa_gpu_dst.y.dev, pa_gpu_dst.z.dev,
pa_gpu_dst.h.dev,
dtype(self.radius_scale),
neighbor_cid_count.dev,
neighbor_cids.dev,
start_indices.dev,
neighbors.dev)
def _is_valid_nnps_wgs(self):
# Max work group size can only be found by building the
# kernel.
try:
find_neighbor_counts = self.helper.get_kernel(
'find_neighbor_counts', sorted=self.sorted, wgs=self.leaf_size
)
find_neighbor = self.helper.get_kernel(
'find_neighbors', sorted=self.sorted, wgs=self.leaf_size
)
except DeviceWGSException:
return False
else:
return True
def find_neighbor_lengths(self, neighbor_cid_count, neighbor_cids,
tree_src, neighbor_count,
use_partitions=False):
self.check_nnps_compatibility(tree_src)
wgs = self.leaf_size
pa_gpu_dst = self.pa.gpu
pa_gpu_src = tree_src.pa.gpu
dtype = ctype_to_dtype(self.c_type)
def find_neighbor_counts_for_partition(partition_cids, partition_size,
partition_wgs, q=None):
find_neighbor_counts = self.helper.get_kernel(
'find_neighbor_counts', sorted=self.sorted, wgs=wgs
)
find_neighbor_counts(partition_cids.dev, tree_src.pids.dev,
self.pids.dev,
self.cids.dev,
tree_src.pbounds.dev, self.pbounds.dev,
pa_gpu_src.x.dev, pa_gpu_src.y.dev,
pa_gpu_src.z.dev,
pa_gpu_src.h.dev,
pa_gpu_dst.x.dev, pa_gpu_dst.y.dev,
pa_gpu_dst.z.dev,
pa_gpu_dst.h.dev,
dtype(self.radius_scale),
neighbor_cid_count.dev,
neighbor_cids.dev,
neighbor_count.dev,
gs=(partition_wgs * partition_size,),
ls=(partition_wgs,),
queue=(get_queue() if q is None else q))
if use_partitions and wgs > 32:
if wgs < 128:
wgs1 = 32
else:
wgs1 = 64
m1, n1 = self.get_leaf_size_partitions(0, wgs1)
find_neighbor_counts_for_partition(m1, n1, min(wgs, wgs1))
m2, n2 = self.get_leaf_size_partitions(wgs1, wgs)
find_neighbor_counts_for_partition(m2, n2, wgs)
else:
find_neighbor_counts_for_partition(
self.unique_cids, self.unique_cid_count, wgs)
def find_neighbors(self, neighbor_cid_count, neighbor_cids, tree_src,
start_indices, neighbors, use_partitions=False):
self.check_nnps_compatibility(tree_src)
wgs = self.leaf_size if self.leaf_size % 32 == 0 else \
self.leaf_size + 32 - self.leaf_size % 32
pa_gpu_dst = self.pa.gpu
pa_gpu_src = tree_src.pa.gpu
dtype = ctype_to_dtype(self.c_type)
def find_neighbors_for_partition(partition_cids, partition_size,
partition_wgs, q=None):
find_neighbors = self.helper.get_kernel('find_neighbors',
sorted=self.sorted,
wgs=wgs)
find_neighbors(partition_cids.dev, tree_src.pids.dev,
self.pids.dev,
self.cids.dev,
tree_src.pbounds.dev, self.pbounds.dev,
pa_gpu_src.x.dev, pa_gpu_src.y.dev, pa_gpu_src.z.dev,
pa_gpu_src.h.dev,
pa_gpu_dst.x.dev, pa_gpu_dst.y.dev, pa_gpu_dst.z.dev,
pa_gpu_dst.h.dev,
dtype(self.radius_scale),
neighbor_cid_count.dev,
neighbor_cids.dev,
start_indices.dev,
neighbors.dev,
gs=(partition_wgs * partition_size,),
ls=(partition_wgs,),
queue=(get_queue() if q is None else q))
if use_partitions and wgs > 32:
if wgs < 128:
wgs1 = 32
else:
wgs1 = 64
m1, n1 = self.get_leaf_size_partitions(0, wgs1)
fraction = (n1 / int(self.unique_cid_count))
if fraction > 0.3:
find_neighbors_for_partition(m1, n1, wgs1)
m2, n2 = self.get_leaf_size_partitions(wgs1, wgs)
assert (n1 + n2 == self.unique_cid_count)
find_neighbors_for_partition(m2, n2, wgs)
return
else:
find_neighbors_for_partition(
self.unique_cids, self.unique_cid_count, wgs)
def check_nnps_compatibility(self, tree):
"""Check if tree types and parameters are compatible for NNPS
Two trees must satisfy a few conditions so that NNPS can be performed
on one tree using the other as reference. In this case, the following
conditions must be satisfied -
1) Currently both should be instances of point_tree.PointTree
2) Both must have the same sortedness
3) Both must use the same floating-point datatype
4) Both must have the same leaf sizes
"""
if not isinstance(tree, PointTree):
raise IncompatibleTreesException(
"Both trees must be of the same type for NNPS"
)
if self.sorted != tree.sorted:
raise IncompatibleTreesException(
"Tree sortedness need to be the same for NNPS"
)
if self.c_type != tree.c_type or self.use_double != tree.use_double:
raise IncompatibleTreesException(
"Tree floating-point data types need to be the same for NNPS"
)
if self.leaf_size != tree.leaf_size:
raise IncompatibleTreesException(
"Tree leaf sizes need to be the same for NNPS (%d != %d)" % (
self.leaf_size, tree.leaf_size)
)
return
| 38.350224
| 80
| 0.522636
| 17,805
| 0.691913
| 0
| 0
| 7,176
| 0.278864
| 0
| 0
| 8,623
| 0.335095
|
49850af7a6ca8eea66c58c865c235297d9610189
| 2,815
|
py
|
Python
|
senti_analysis/data.py
|
hotbaby/sentiment-analysis
|
efb880870d905c4c02528d7d242ba06b90f0e259
|
[
"MIT"
] | null | null | null |
senti_analysis/data.py
|
hotbaby/sentiment-analysis
|
efb880870d905c4c02528d7d242ba06b90f0e259
|
[
"MIT"
] | 2
|
2020-09-25T21:17:58.000Z
|
2022-02-10T00:28:19.000Z
|
senti_analysis/data.py
|
hotbaby/sentiment-analysis
|
efb880870d905c4c02528d7d242ba06b90f0e259
|
[
"MIT"
] | null | null | null |
# encoding: utf8
import numpy as np
import pandas as pd
from collections import OrderedDict
from senti_analysis import config
from senti_analysis import constants
from senti_analysis.preprocess import (load_tokenizer, load_sentences,
encode_sentence, label_transform)
def load_data_set():
"""
Load data set.
:return: train_data_set, validation_data_set, test_data_set
"""
train_data_set = pd.read_csv(config.TRAIN_SET_PATH)
validation_data_set = pd.read_csv(config.VALIDATION_SET_PATH)
test_data_set = pd.read_csv(config.TEST_SET_PATH)
return train_data_set, validation_data_set, test_data_set
def x_data():
train_set = pd.read_csv(config.TRAIN_SET_PATH)
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
tokenizer = load_tokenizer()
train_sentences, val_sentences, test_sentences = load_sentences()
x_train = encode_sentence(train_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH,
tokenizer=tokenizer)
x_val = encode_sentence(val_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH,
tokenizer=tokenizer)
return x_train, x_val
def load_val_data_set():
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
tokenizer = load_tokenizer()
train_sentences, val_sentences, test_sentences = load_sentences()
x_val = encode_sentence(val_sentences, padding=True, max_length=config.MAX_SEQUENCE_LENGTH,
tokenizer=tokenizer)
train_set = pd.read_csv(config.TRAIN_SET_PATH)
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
_, y_val = transform_y_data(train_set, val_set, constants.COLS)
return x_val, y_val
def transform_y_data(train_set, val_set, cols):
y_train = OrderedDict()
y_val = OrderedDict()
for col in cols:
y_train[col] = np.array(label_transform(train_set[col]))
y_val[col] = np.array(label_transform(val_set[col]))
return y_train, y_val
def y_data():
"""
generate y label data.
:return: train_label_data dict, validation_label_data dict
"""
train_set = pd.read_csv(config.TRAIN_SET_PATH)
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
y_train, y_val = transform_y_data(train_set, val_set, constants.COLS)
return y_train, y_val
def validate_data():
val_set = pd.read_csv(config.VALIDATION_SET_PATH)
tokenizer = load_tokenizer()
train_sentences, val_sentences, test_sentences = load_sentences()
x_val = encode_sentence(val_sentences, padding=True,
max_length=config.MAX_SEQUENCE_LENGTH, tokenizer=tokenizer)
y_val = {}
for col in constants.COLS:
y_val[col] = np.array(label_transform(val_set[col]))
return x_val, y_val
| 30.597826
| 99
| 0.713677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.074956
|
4985efb3cec903d0cb0d0b5c74721d37a531530f
| 93
|
py
|
Python
|
pyball/models/config/stats_group.py
|
SebastianDang/PyBall
|
d1965aa01477b5ee0db9c0463ec584a7e3997395
|
[
"MIT"
] | 74
|
2018-03-04T22:58:46.000Z
|
2021-07-06T12:28:50.000Z
|
pyball/models/config/stats_group.py
|
SebastianDang/PyBall
|
d1965aa01477b5ee0db9c0463ec584a7e3997395
|
[
"MIT"
] | 18
|
2018-03-10T19:17:54.000Z
|
2020-01-04T15:42:47.000Z
|
pyball/models/config/stats_group.py
|
SebastianDang/PyBall
|
d1965aa01477b5ee0db9c0463ec584a7e3997395
|
[
"MIT"
] | 13
|
2018-03-06T02:39:38.000Z
|
2020-01-17T04:38:53.000Z
|
from dataclasses import dataclass
@dataclass
class StatsGroup:
displayName: str = None
| 13.285714
| 33
| 0.774194
| 45
| 0.483871
| 0
| 0
| 56
| 0.602151
| 0
| 0
| 0
| 0
|
498724366b10f885fa79f500eaf773989a21c6f1
| 358
|
py
|
Python
|
tests/test_skeleton_says.py
|
thomascobb/skeleton-says
|
e2ea189e075a0847a6679dc066bad47ced5d397a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_skeleton_says.py
|
thomascobb/skeleton-says
|
e2ea189e075a0847a6679dc066bad47ced5d397a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_skeleton_says.py
|
thomascobb/skeleton-says
|
e2ea189e075a0847a6679dc066bad47ced5d397a
|
[
"Apache-2.0"
] | null | null | null |
from skeleton_says import say
skeleton_saying_hello = r"""
-------
( Hello )
-------
\
\ .-.
\(o.o)
|=|
__|__
//.=|=.\\
// .=|=. \\
\\ .=|=. //
\\(_=_)//
(:| |:)
|| ||
() ()
|| ||
|| ||
l42 ==' '==
"""
def test_say_command_says_hello() -> None:
assert say.say("Hello") == skeleton_saying_hello
| 13.259259
| 52
| 0.379888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.589385
|
49880bf16640eed07e42f1ea42b7368e4b515269
| 1,710
|
py
|
Python
|
open_connect/accounts/tests/test_tasks.py
|
lpatmo/actionify_the_news
|
998d8ca6b35d0ef1b16efca70f50e59503f5a62d
|
[
"MIT"
] | 66
|
2015-11-30T20:35:38.000Z
|
2019-06-12T17:40:32.000Z
|
open_connect/accounts/tests/test_tasks.py
|
lpatmo/actionify_the_news
|
998d8ca6b35d0ef1b16efca70f50e59503f5a62d
|
[
"MIT"
] | 18
|
2015-11-30T22:03:05.000Z
|
2019-07-02T00:50:29.000Z
|
open_connect/accounts/tests/test_tasks.py
|
lpatmo/actionify_the_news
|
998d8ca6b35d0ef1b16efca70f50e59503f5a62d
|
[
"MIT"
] | 11
|
2015-11-30T20:56:01.000Z
|
2019-07-01T17:06:09.000Z
|
"""Tests for accounts tasks."""
from datetime import datetime
from unittest import TestCase
from django.conf import settings
from django.utils.timezone import now
from mock import patch
from model_mommy import mommy
from open_connect.accounts.models import Invite
from open_connect.accounts.tasks import (
render_and_send_invite_email
)
from open_connect.mailer.utils import unsubscribe_url
class TestRenderAndSendInviteEmail(TestCase):
"""Test render_and_send_invite_email"""
def test_invite_content(self):
"""Verify the email content is correct."""
invite = mommy.make('accounts.Invite')
self.assertFalse(invite.notified)
with patch('open_connect.accounts.tasks.send_email') as mock:
render_and_send_invite_email(invite.pk)
call_args = mock.call_args[1]
self.assertEqual(call_args['email'], invite.email)
self.assertEqual(call_args['from_email'], settings.DEFAULT_FROM_EMAIL)
# Confirm that the unsubscribe URL is in the message
self.assertIn(unsubscribe_url(invite.email), call_args['text'])
self.assertIn(unsubscribe_url(invite.email), call_args['html'])
invite = Invite.objects.get(pk=invite.pk)
self.assertIsInstance(invite.notified, datetime)
def test_no_duplicate_sends(self):
"""If the invite notification has already been sent, do nothing."""
# pylint: disable=assignment-from-none
invite = mommy.make('accounts.Invite', notified=now())
with patch('open_connect.accounts.tasks.send_email') as mock:
response = render_and_send_invite_email(invite.pk)
self.assertIsNone(response)
self.assertFalse(mock.called)
| 38
| 78
| 0.724561
| 1,310
| 0.766082
| 0
| 0
| 0
| 0
| 0
| 0
| 414
| 0.242105
|
49882b0d53f39e7e8ebf679902e5c955c3e1b55f
| 944
|
py
|
Python
|
tests/inputs/config.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 708
|
2019-10-11T06:23:40.000Z
|
2022-03-31T09:39:08.000Z
|
tests/inputs/config.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 302
|
2019-11-11T22:09:21.000Z
|
2022-03-29T11:21:04.000Z
|
tests/inputs/config.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 122
|
2019-12-04T16:22:53.000Z
|
2022-03-20T09:31:10.000Z
|
# Test cases that are expected to fail, e.g. unimplemented features or bug-fixes.
# Remove from list when fixed.
xfail = {
"namespace_keywords", # 70
"googletypes_struct", # 9
"googletypes_value", # 9
"import_capitalized_package",
"example", # This is the example in the readme. Not a test.
}
services = {
"googletypes_response",
"googletypes_response_embedded",
"service",
"service_separate_packages",
"import_service_input_message",
"googletypes_service_returns_empty",
"googletypes_service_returns_googletype",
"example_service",
"empty_service",
}
# Indicate json sample messages to skip when testing that json (de)serialization
# is symmetrical becuase some cases legitimately are not symmetrical.
# Each key references the name of the test scenario and the values in the tuple
# Are the names of the json files.
non_symmetrical_json = {"empty_repeated": ("empty_repeated",)}
| 32.551724
| 81
| 0.733051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 785
| 0.831568
|
4989cd340b09d2674ba44f9caf4ca76681a1034f
| 1,476
|
py
|
Python
|
examples/wagsley/wagsley/urls.py
|
Blogsley/blogsley
|
0ca17397af5d53c2fac3affb5eacec2f8d941d37
|
[
"MIT"
] | null | null | null |
examples/wagsley/wagsley/urls.py
|
Blogsley/blogsley
|
0ca17397af5d53c2fac3affb5eacec2f8d941d37
|
[
"MIT"
] | null | null | null |
examples/wagsley/wagsley/urls.py
|
Blogsley/blogsley
|
0ca17397af5d53c2fac3affb5eacec2f8d941d37
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.urls import include, path, re_path
from django.contrib import admin
from ariadne_django.views import GraphQLView
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from puput import urls as puput_urls
from search import views as search_views
from wagsley.schema import schema
print(schema)
urlpatterns = [
path('django-admin/', admin.site.urls),
path('admin/', include(wagtailadmin_urls)),
path('documents/', include(wagtaildocs_urls)),
#path('search/', search_views.search, name='search'),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns = urlpatterns + [
path('graphql/', GraphQLView.as_view(schema=schema), name='graphql'),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('allauth.urls')),
path('events/', include('events.urls')),
re_path(r'^comments/', include('django_comments_xtd.urls')),
path("", include(puput_urls)),
path("", include(wagtail_urls)),
path('', include('home.urls')),
]
| 28.941176
| 80
| 0.735095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 327
| 0.221545
|
4989d46fdda2f05efd221caf77a2291b849c31f5
| 1,311
|
py
|
Python
|
tests/unit/core/test_certify_timestamp.py
|
sys-git/certifiable
|
a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8
|
[
"MIT"
] | null | null | null |
tests/unit/core/test_certify_timestamp.py
|
sys-git/certifiable
|
a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8
|
[
"MIT"
] | 311
|
2017-09-14T22:34:21.000Z
|
2022-03-27T18:30:17.000Z
|
tests/unit/core/test_certify_timestamp.py
|
sys-git/certifiable
|
a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `certifiable.core.certify_timestamp` method."""
import datetime
import unittest
from decimal import Decimal
from certifiable import CertifierTypeError
from certifiable.core import certify_timestamp
class CoreCertifyTimestampTestCase(unittest.TestCase):
"""Tests for `certifiable.core.certify_timestamp` method."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_timestamp(self):
for i in [
datetime.datetime.utcnow(),
]:
self.assertIs(
certify_timestamp(
i,
required=True,
),
i,
)
def test_not_timestamp(self):
from tests import TestEnum1
for i in [
0,
True,
False,
3.4,
5L,
complex(6, 7),
Decimal(8),
datetime.date(2017, 11, 1),
TestEnum1.X,
]:
self.assertRaises(
CertifierTypeError,
certify_timestamp,
i,
required=True,
)
if __name__ == '__main__':
unittest.main()
| 22.220339
| 64
| 0.514111
| 999
| 0.762014
| 0
| 0
| 0
| 0
| 0
| 0
| 247
| 0.188406
|
498b4c183ee96795b8b620014ec7c0080e178c36
| 1,477
|
py
|
Python
|
rtc_handle_example/replace/com_replace_impl.py
|
takashi-suehiro/rtmtools
|
56ee92d3b3f2ea73d7fa78dfabe6a098e06f6215
|
[
"MIT"
] | null | null | null |
rtc_handle_example/replace/com_replace_impl.py
|
takashi-suehiro/rtmtools
|
56ee92d3b3f2ea73d7fa78dfabe6a098e06f6215
|
[
"MIT"
] | null | null | null |
rtc_handle_example/replace/com_replace_impl.py
|
takashi-suehiro/rtmtools
|
56ee92d3b3f2ea73d7fa78dfabe6a098e06f6215
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
\file com_replace_idl_examplefile.py
\brief Python example implementations generated from com_replace.idl
\date $Date$
"""
import omniORB
from omniORB import CORBA, PortableServer
import _GlobalIDL, _GlobalIDL__POA
class ComReplace_i (_GlobalIDL__POA.ComReplace):
"""
\class ComReplace_i
Example class implementing IDL interface ComReplace
"""
def __init__(self, repl_rtc):
"""
\brief standard constructor
Initialise member variables here
"""
self.rtc=repl_rtc
# int count_of_replaced_substring()
def replace_count(self):
#raise CORBA.NO_IMPLEMENT(0, CORBA.COMPLETED_NO)
# *** Implement me
# Must return: result
return self.rtc.repl_count
if __name__ == "__main__":
import sys
# Initialise the ORB
orb = CORBA.ORB_init(sys.argv)
# As an example, we activate an object in the Root POA
poa = orb.resolve_initial_references("RootPOA")
# Create an instance of a servant class
servant = ComReplace_i()
# Activate it in the Root POA
poa.activate_object(servant)
# Get the object reference to the object
objref = servant._this()
# Print a stringified IOR for it
print( orb.object_to_string(objref))
# Activate the Root POA's manager
poa._get_the_POAManager().activate()
# Run the ORB, blocking this thread
orb.run()
| 22.723077
| 69
| 0.666892
| 525
| 0.35545
| 0
| 0
| 0
| 0
| 0
| 0
| 797
| 0.539607
|
498d4cc3d6311bb103e45b049930a347b5d6e562
| 588
|
py
|
Python
|
pyknp_eventgraph/utils.py
|
ku-nlp/pyknp-eventgraph
|
927128ac41098bc45637b02a3c2420d345a41347
|
[
"BSD-3-Clause"
] | 7
|
2019-11-23T10:57:35.000Z
|
2021-01-03T22:40:13.000Z
|
pyknp_eventgraph/utils.py
|
ku-nlp/pyknp-eventgraph
|
927128ac41098bc45637b02a3c2420d345a41347
|
[
"BSD-3-Clause"
] | 1
|
2021-11-05T02:19:17.000Z
|
2021-11-05T02:19:17.000Z
|
pyknp_eventgraph/utils.py
|
ku-nlp/pyknp-eventgraph
|
927128ac41098bc45637b02a3c2420d345a41347
|
[
"BSD-3-Clause"
] | null | null | null |
from io import open
from typing import List
from pyknp import KNP, BList
def read_knp_result_file(filename: str) -> List[BList]:
"""Read a KNP result file.
Args:
filename: A filename.
Returns:
A list of :class:`pyknp.knp.blist.BList` objects.
"""
knp = KNP()
blists = []
with open(filename, "rt", encoding="utf-8", errors="replace") as f:
chunk = ""
for line in f:
chunk += line
if line.strip() == "EOS":
blists.append(knp.result(chunk))
chunk = ""
return blists
| 22.615385
| 71
| 0.55102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.29932
|
498d7bdbff51b3b458f67d9c20042b421a42d945
| 2,272
|
py
|
Python
|
freshlybuiltimagebol/OCR_Printed_Text.py
|
komal3120/freshlybuiltimagebol
|
fc46f687e326d53ec485e74a943e45b786dad36d
|
[
"MIT"
] | 3
|
2020-08-01T10:27:58.000Z
|
2020-08-09T20:56:49.000Z
|
freshlybuiltimagebol/OCR_Printed_Text.py
|
komal3120/freshlybuiltimagebol
|
fc46f687e326d53ec485e74a943e45b786dad36d
|
[
"MIT"
] | null | null | null |
freshlybuiltimagebol/OCR_Printed_Text.py
|
komal3120/freshlybuiltimagebol
|
fc46f687e326d53ec485e74a943e45b786dad36d
|
[
"MIT"
] | 1
|
2020-06-28T18:02:52.000Z
|
2020-06-28T18:02:52.000Z
|
from cv2 import fastNlMeansDenoisingColored
from cv2 import cvtColor
from cv2 import bitwise_not,threshold,getRotationMatrix2D
from cv2 import warpAffine,filter2D,imread
from cv2 import THRESH_BINARY,COLOR_BGR2GRAY,THRESH_OTSU
from cv2 import INTER_CUBIC,BORDER_REPLICATE,minAreaRect
from numpy import column_stack,array,where
from matplotlib.pyplot import imshow,xticks,yticks
from pytesseract import image_to_string,pytesseract
from PIL import Image
class ImageProcess:
'''this function is removing noise from the image'''
def remove_noise(image):
image = fastNlMeansDenoisingColored(image,None,20,10,7,21)
return image
'''this function is removing skewness.
first, it calculate the angle and accordingly rotate image'''
def remove_skew(image):
in_gray = cvtColor(image, COLOR_BGR2GRAY)
in_gray = bitwise_not(in_gray)
thresh_pic = threshold(in_gray, 0, 255,THRESH_BINARY | THRESH_OTSU)[1]
coords_x_y = column_stack(where(thresh_pic > 0))
angle = minAreaRect(coords_x_y)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
(h, w) = image.shape[:2]
center_of_pic = (w // 2, h // 2)
M = getRotationMatrix2D(center_of_pic, angle, 1.0)
image = warpAffine(image, M, (w, h),flags=INTER_CUBIC, borderMode=BORDER_REPLICATE)
return image
'''for removing blurness from the image,
this function increase sharpness of the image.'''
def shapness_blur(image):
sharpen_kernel = array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
image = filter2D(image, -1, sharpen_kernel)
return image
'''using pytesseract, this function extracting text from the image.'''
def to_text(image):
try:
pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
string_from_image = image_to_string(image,lang='eng')
except Exception:
pytesseract.tesseract_cmd = r"C:\Program Files(x86)\Tesseract-OCR\tesseract.exe"
string_from_image = image_to_string(image,lang='eng')
return string_from_image
##plot image in output
def plot_image(image):
imshow(image)
xticks([])
yticks([])
| 38.508475
| 92
| 0.676496
| 1,816
| 0.799296
| 0
| 0
| 0
| 0
| 0
| 0
| 451
| 0.198504
|
498dafdb0fb28c8d01da1b1b893e4aaeb5ff08f2
| 5,944
|
py
|
Python
|
program/tests/integration_tests_output/graph/graph.py
|
alienbri/audaces-perps
|
aa5b0e14eae4944dd0a18af60a72b119ff17fd84
|
[
"MIT"
] | null | null | null |
program/tests/integration_tests_output/graph/graph.py
|
alienbri/audaces-perps
|
aa5b0e14eae4944dd0a18af60a72b119ff17fd84
|
[
"MIT"
] | null | null | null |
program/tests/integration_tests_output/graph/graph.py
|
alienbri/audaces-perps
|
aa5b0e14eae4944dd0a18af60a72b119ff17fd84
|
[
"MIT"
] | null | null | null |
import yaml
import matplotlib.pyplot as plt
import math
from jsonmerge import merge
from datetime import datetime
import plotly as ply
import pandas as pd
import plotly.express as px
TRANSFORM = False
PLOT_MEMORY = True
NB_INSTRUCTIONS = 1000
f_value_props = {
# [Color, MinOffset, MaxOffset]
"total_collateral": ["", 0, 1],
"total_fee_balance": ["", 0, 1],
"rebalancing_funds": ["#99cc99", 0, 0.5], #
# "rebalanced_v_coin": ["", 0, 1],
"v_coin_amount": ["", 0, 1],
"v_pc_amount": ["", 0, 1],
"open_shorts_v_coin": ["", 0, 1],
"open_longs_v_coin": ["", 0, 1], #
"insurance_fund": ["#808080", 0.2, 1.2],
"market_price": ["#008080", 0.5, 1.5],
"oracle_price": ["#99cc99", 0.5, 1.5],
"equilibrium_price": ["#ff8000", 0.5, 1], #
# "signer_nonce",
# "market_symbol",
# "oracle_address",
# "admin_address",
# "vault_address",
# "quote_decimals",
# "coin_decimals",
# "total_user_balances",
# "last_funding_timestamp",
# "last_recording_timestamp",
# "funding_samples_offset",
# "funding_samples",
# "funding_history_offset",
# "funding_history",
# "funding_balancing_factors",
# "number_of_instances",
}
m_value_props = {
"gc_list_lengths",
"page_full_ratios",
"longs_depths",
"shorts_depths"
}
market_state_line_header = "INFO - MarketDataPoint"
date_time = datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
infile = open("../log/output.log")
outfile = open(
"../log/formatted_output_{}.log".format(date_time), "a")
market_data_json = []
for line in infile:
if (market_state_line_header in line) or ("DEBUG - Program" in line) or ("DEBUG - tx error:" in line) or ("INFO - Tree:" in line) or ("INFO - Initial Conditions:" in line) or ("INFO - Seed for this run:" in line):
outfile.write(line)
if market_state_line_header in line:
market_state_datapoint_str = line[len(
market_state_line_header):].replace("Instance", "").replace("PageInfo", "") # Stripping header
line_json = yaml.load(market_state_datapoint_str)
market_data_json.append(line_json)
# Extract
market_data = {}
value_names = list(f_value_props.keys())
for key in market_data_json[0]:
if key in value_names:
market_data[key] = [data_point[key] for data_point in market_data_json]
# Normalize
if TRANSFORM:
max_per_value = [max(market_data[key]) for key in value_names]
min_per_value = [min(market_data[key]) for key in value_names]
max_per_value[value_names.index(
"market_price")] = max_per_value[value_names.index("oracle_price")]
min_per_value[value_names.index(
"market_price")] = min_per_value[value_names.index("oracle_price")]
scaled_market_data = [[((1 - f_value_props[value_names[i]][1]) * (data_value_point - min_per_value[i]) / abs((max_per_value[i] / f_value_props[value_names[i]][2]) - min_per_value[i])) + f_value_props[value_names[i]][1] for data_value_point in market_data[value_names[i]]]
for i in range(len(value_names))]
else:
max_per_value = [max(market_data[key]) for key in value_names]
total_max = max(max_per_value)
scaling_factors = [int(round(math.log10(total_max / value_max)))
if value_max != 0 else 1 for value_max in max_per_value]
scaled_market_data = [[(10 ** scaling_factors[i]) * data_value_point for data_value_point in market_data[value_names[i]]]
for i in range(len(value_names))]
# Plotting
if PLOT_MEMORY:
nb_lines = min(len(market_data_json), NB_INSTRUCTIONS)
df = pd.DataFrame(market_data_json)
print(df.columns)
print(df.shape)
df["shorts_depths"] = [k[0] for k in df["shorts_depths"]]
df["longs_depths"] = [k[0] for k in df["longs_depths"]]
df["gc_list_lengths"] = [k[0] for k in df["gc_list_lengths"]]
for k in range(len(df["page_full_ratios"][0][0])):
df[f"page_{k}_full_ratio"] = [l[0][k] for l in df["page_full_ratios"]]
df.drop("page_full_ratios", axis=1)
df = df.stack().reset_index()
print(df)
fig = px.line(df, x="level_0", y=0, color="level_1")
fig.show()
# print([len(m["page_full_ratios"]) for m in market_data_json])
page_full_ratios = [
market_data_json[i]["page_full_ratios"][0] for i in range(nb_lines)]
longs_depths = [
market_data_json[i]["longs_depths"] for i in range(nb_lines)
]
shorts_depths = [
market_data_json[i]["shorts_depths"] for i in range(nb_lines)
]
for k in range(len(market_data_json[0]["page_full_ratios"][0])):
plt.plot([page_full_ratios[i][k] for i in range(nb_lines)], label=(
"page_full_ratios for page " + str(k)))
plt.plot()
gc_list_lenghts = [
market_data_json[i]["gc_list_lengths"][0] for i in range(nb_lines)] # TODO Mult instances
# plt.plot([gc_list_lenghts[i] for i in range(nb_lines)], label=(
# "gc_list_length"))
plt.plot(longs_depths, label=("longs_depths"))
plt.plot(shorts_depths, label=("shorts_depths"))
elif TRANSFORM:
for (i, key) in enumerate(value_names):
if f_value_props[key][0] != "":
plt.plot(scaled_market_data[i][:NB_INSTRUCTIONS], label=(
key + " x1e"), color=f_value_props[key][0])
else:
plt.plot(scaled_market_data[i][:NB_INSTRUCTIONS], label=(
key + " x1e"))
else:
for (i, key) in enumerate(value_names):
if f_value_props[key][0] != "":
plt.plot(scaled_market_data[i], label=(
key + " x1e" + str(scaling_factors[i])), color=f_value_props[key][0])
else:
plt.plot(scaled_market_data[i], label=(
key + " x1e"))
plt.legend(prop={'size': 15})
plt.show() # block=False)
# plt.savefig("../log/graph_{}.png".format(date_time), dpi=440)
# gc_list_lengths: [0], page_full_ratios: [[], [0.0, 0.0, 0.0, 0.0, 0.0]]
| 37.383648
| 275
| 0.640646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,743
| 0.293237
|
498e87a7732f2915dd699629baa71c985e250298
| 143
|
py
|
Python
|
app/sett.py
|
AndreyKaBelka/MoneySaver
|
5ad2bc60379aeb0b02e71760e22b260bf29982e2
|
[
"Apache-2.0"
] | null | null | null |
app/sett.py
|
AndreyKaBelka/MoneySaver
|
5ad2bc60379aeb0b02e71760e22b260bf29982e2
|
[
"Apache-2.0"
] | 3
|
2020-12-12T23:33:36.000Z
|
2020-12-12T23:35:54.000Z
|
app/sett.py
|
AndreyKaBelka/MoneySaver
|
5ad2bc60379aeb0b02e71760e22b260bf29982e2
|
[
"Apache-2.0"
] | null | null | null |
class Settings:
BOT_KEY = ""
HOST_NAME = "127.0.0.1"
USER_NAME = "root"
USER_PASS = "Andrey171200"
SQL_NAME = "moneysaver"
| 20.428571
| 30
| 0.608392
| 142
| 0.993007
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.314685
|
498ebed60829fc81050f096acf226151f138af86
| 525
|
py
|
Python
|
oTree/consent/__init__.py
|
jleutgeb/privilege
|
2a4f15c98d94d9f1dbf1c4685c5e96d018d58abc
|
[
"MIT"
] | null | null | null |
oTree/consent/__init__.py
|
jleutgeb/privilege
|
2a4f15c98d94d9f1dbf1c4685c5e96d018d58abc
|
[
"MIT"
] | 11
|
2021-05-06T09:45:30.000Z
|
2022-03-01T17:48:35.000Z
|
oTree/consent/__init__.py
|
jleutgeb/privilege
|
2a4f15c98d94d9f1dbf1c4685c5e96d018d58abc
|
[
"MIT"
] | null | null | null |
from otree.api import *
c = Currency
doc = """
Simple Consent App
Players may only continue after clicking the consent button.
"""
class Constants(BaseConstants):
name_in_url = 'consent'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
consent = models.BooleanField(choices=[[True, 'Ja']])
# PAGES
class Consent(Page):
form_model = "player"
form_fields = ["consent"]
page_sequence = [Consent]
| 14.583333
| 61
| 0.693333
| 340
| 0.647619
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.238095
|
498efc2d71a44fd1bc6d2b0987f9eff5df4001b1
| 1,192
|
py
|
Python
|
src/pytornado/_util.py
|
airinnova/pytornado
|
6127f45af60ab05f15b441bc134089a7e7a59669
|
[
"Linux-OpenIB"
] | 16
|
2019-08-13T18:49:14.000Z
|
2022-01-11T15:41:12.000Z
|
src/pytornado/_util.py
|
airinnova/pytornado
|
6127f45af60ab05f15b441bc134089a7e7a59669
|
[
"Linux-OpenIB"
] | 24
|
2019-09-11T14:48:01.000Z
|
2022-03-18T08:17:52.000Z
|
src/pytornado/_util.py
|
airinnova/pytornado
|
6127f45af60ab05f15b441bc134089a7e7a59669
|
[
"Linux-OpenIB"
] | 5
|
2019-09-20T18:45:45.000Z
|
2020-12-08T01:44:43.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2019-2020 Airinnova AB and the FramAT authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
"""
Utils
"""
from numbers import Number
class Schemas:
any_int = {'type': int}
any_num = {'type': Number}
pos_int = {'type': int, '>': 0}
pos_number = {'type': Number, '>': 0}
string = {'type': str, '>': 0}
vector3x1 = {'type': list, 'min_len': 3, 'max_len': 3, 'item_types': Number}
vector6x1 = {'type': list, 'min_len': 6, 'max_len': 6, 'item_types': Number}
| 34.057143
| 80
| 0.59396
| 348
| 0.291946
| 0
| 0
| 0
| 0
| 0
| 0
| 904
| 0.758389
|
498f0ce62fa86447888328db5c5d83ceabc8b302
| 587
|
py
|
Python
|
test/application/test_auth.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | null | null | null |
test/application/test_auth.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | 22
|
2018-02-06T19:53:11.000Z
|
2021-04-30T20:35:01.000Z
|
test/application/test_auth.py
|
Ashaba/API-Monitor
|
533eb6698fcb5decb48f746784af6894844b3c69
|
[
"MIT"
] | null | null | null |
from test.base import BaseTestCase, user_payload
import json
class TestAuth(BaseTestCase):
def test_authenticate(self):
response = self.client.post('/auth', data=json.dumps(user_payload), content_type='application/json')
response_data = json.loads(response.data)
self.assert200(response)
self.assertEqual(response_data["status"], "success")
def test_auth_with_no_data(self):
response = self.client.post('/auth', content_type='application/json')
self.assert400(response)
response_data = json.loads(response.data)
self.assertEqual(response_data["status"], "fail")
| 32.611111
| 102
| 0.76661
| 523
| 0.890971
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.13799
|
498fe8e984fc4170d05d05875ef23082a63dec00
| 5,918
|
py
|
Python
|
JumpscaleCore/core/generator/JSGenerator.py
|
grimpy/jumpscaleX_core
|
c24d6d47fccc0801e578fedb376ef110f7a00bad
|
[
"Apache-2.0"
] | null | null | null |
JumpscaleCore/core/generator/JSGenerator.py
|
grimpy/jumpscaleX_core
|
c24d6d47fccc0801e578fedb376ef110f7a00bad
|
[
"Apache-2.0"
] | null | null | null |
JumpscaleCore/core/generator/JSGenerator.py
|
grimpy/jumpscaleX_core
|
c24d6d47fccc0801e578fedb376ef110f7a00bad
|
[
"Apache-2.0"
] | null | null | null |
import os
import fnmatch
from pathlib import Path
from jinja2 import Template
from .Metadata import Metadata
class JSGenerator:
def __init__(self, j):
"""
"""
self._j = j
self._generated = False
def _check_process_file(self, path):
bname = os.path.basename(path)
if bname.startswith("_"):
return False
IGNORE = ["/template", "JSLoader.py", "SystemFSDecorators.py", "FixerReplace"]
for item in IGNORE:
if path.find(item) != -1:
return False
return True
def lib_link(self, path):
"""
look for ".jumpscalemodules" and link the parent directory to the JSX lib dir
:param path:
:return:
"""
j = self._j
# can use j here because will never be used in first step
for path in j.sal.fs.listFilesInDir(path, True, filter=".jumpscalemodules"):
dpath = j.sal.fs.getDirName(path)
target = j.core.tools.text_replace("{DIR_BASE}/lib/jumpscale/%s" % j.sal.fs.getBaseName(dpath))
j.sal.fs.symlink(dpath, target, True)
def generate(self, methods_find=False, action_method=None, action_args={}, path=None):
"""
walk over all found jumpscale libraries
look for the classes where there is a __jslocation__ inside these are classes which need to be loaded
:param reset:
:return:
"""
self.md = Metadata(self._j)
# find the directory in which we have all repo's of threefoldtech
if path:
rootDir = path
else:
rootDir = os.path.dirname(self._j.core.dir_jumpscale.rstrip("/"))
p = Path(rootDir)
for dpath in p.iterdir():
if not dpath.is_dir():
continue
if dpath.name.startswith("."):
continue
for dpath2 in dpath.iterdir():
jsmodpath = os.path.join(os.fspath(dpath2), ".jumpscalemodules")
if not os.path.exists(jsmodpath):
continue
js_lib_path = os.path.join(os.fspath(dpath2))
# NOW WE HAVE FOUND A SET OF JUMPSCALE MODULES
jumpscale_repo_name = os.path.basename(dpath2)
for dirName, subdirList, fileList in os.walk(os.fspath(dpath2), followlinks=True):
if dirName.find("egg-info") != -1:
self._j.shell()
if dirName.find("Jumpscale/core") is not -1:
continue
if dirName.find("notebooks/") is not -1:
continue
# skip the core files, they don't need to be read
for item in fnmatch.filter(fileList, "*.py"):
path = os.path.join(dirName, item)
self._log("process", path)
if self._check_process_file(path):
# self._log("process_ok:")
self.md.jsmodule_get(
path=path,
jumpscale_repo_name=jumpscale_repo_name,
js_lib_path=js_lib_path,
methods_find=methods_find,
action_method=action_method,
action_args=action_args,
)
self.md.groups_load() # make sure we find all groups
# self._j.shell()
self._render()
self.report()
return action_args
def _log(self, cat, msg=""):
print("- %-15s %s" % (cat, msg))
pass
def _render(self):
# create the jumpscale dir if it does not exist yet
dpath = "%s/jumpscale/" % self._j.dirs.TMPDIR
if not os.path.exists(dpath):
os.makedirs(dpath)
# write the __init__ file otherwise cannot include
dpath = "%s/jumpscale/__init__.py" % self._j.dirs.TMPDIR
file = open(dpath, "w")
file.write("")
file.close()
if self._j.application._check_debug():
template_name = "template_jumpscale_debug.py"
else:
template_name = "template_jumpscale.py"
template_path = os.path.join(os.path.dirname(__file__), "templates", template_name)
template = Path(template_path).read_text()
t = Template(template)
C = t.render(md=self.md)
dpath = self._j.core.application._lib_generation_path
file = open(dpath, "w")
file.write(C)
file.close()
self._generated = True
def report(self):
"""
kosmos "j.core.jsgenerator.report()"
write reports to /tmp/jumpscale/code_report.md
:return:
"""
# if self._generated is False:
# self.generate()
for name, jsgroup in self.md.jsgroups.items():
path = "%s/jumpscale/code_report_%s.md" % (self._j.dirs.TMPDIR, jsgroup.name)
file = open(path, "w")
file.write(jsgroup.markdown)
file.close()
self.report_errors()
self.report_line_changes()
def report_errors(self):
out = ""
for cat, obj, error, trace in self._j.application.errors_init:
out += "## %s:%s\n\n" % (cat, obj)
out += "%s\n\n" % error
out += "%s\n\n" % trace
path = "%s/jumpscale/ERRORS_report.md" % (self._j.dirs.TMPDIR)
file = open(path, "w")
file.write(out)
file.close()
return len(self._j.application.errors_init)
def report_line_changes(self):
out = ""
for item in self.md.line_changes:
out += str(item)
path = "%s/jumpscale/LINECHANGES_report.md" % (self._j.dirs.TMPDIR)
file = open(path, "w")
file.write(out)
file.close()
| 34.811765
| 109
| 0.535992
| 5,804
| 0.980737
| 0
| 0
| 0
| 0
| 0
| 0
| 1,398
| 0.236228
|
49905454a4a778d8f4095622f9b3c6a78a737493
| 76,810
|
py
|
Python
|
h1/api/recovery_project_plan_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/api/recovery_project_plan_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/api/recovery_project_plan_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.event import Event
from h1.model.inline_response400 import InlineResponse400
from h1.model.plan import Plan
from h1.model.recovery_project_plan_create import RecoveryProjectPlanCreate
from h1.model.recovery_project_plan_update import RecoveryProjectPlanUpdate
from h1.model.resource_service import ResourceService
from h1.model.tag import Tag
from h1.model.tag_array import TagArray
class RecoveryProjectPlanApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __recovery_project_plan_create(
self,
project_id,
location_id,
recovery_project_plan_create,
**kwargs
):
"""Create recovery/plan # noqa: E501
Create plan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_create(project_id, location_id, recovery_project_plan_create, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
recovery_project_plan_create (RecoveryProjectPlanCreate):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Plan
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['recovery_project_plan_create'] = \
recovery_project_plan_create
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_create = _Endpoint(
settings={
'response_type': (Plan,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan',
'operation_id': 'recovery_project_plan_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'recovery_project_plan_create',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'recovery_project_plan_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'recovery_project_plan_create':
(RecoveryProjectPlanCreate,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'recovery_project_plan_create': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__recovery_project_plan_create
)
def __recovery_project_plan_delete(
self,
project_id,
location_id,
plan_id,
**kwargs
):
"""Delete recovery/plan # noqa: E501
Delete plan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_delete(project_id, location_id, plan_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}',
'operation_id': 'recovery_project_plan_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_delete
)
def __recovery_project_plan_event_get(
self,
project_id,
location_id,
plan_id,
event_id,
**kwargs
):
"""Get recovery/plan.event # noqa: E501
Get recovery/plan.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_event_get(project_id, location_id, plan_id, event_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
event_id (str): eventId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Event
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
kwargs['event_id'] = \
event_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_event_get = _Endpoint(
settings={
'response_type': (Event,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/event/{eventId}',
'operation_id': 'recovery_project_plan_event_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'event_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
'event_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'event_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
'event_id': 'eventId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'event_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_event_get
)
def __recovery_project_plan_event_list(
self,
project_id,
location_id,
plan_id,
**kwargs
):
"""List recovery/plan.event # noqa: E501
List recovery/plan.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_event_list(project_id, location_id, plan_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
Keyword Args:
limit (float): $limit. [optional] if omitted the server will use the default value of 100
skip (float): $skip. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Event]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_event_list = _Endpoint(
settings={
'response_type': ([Event],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/event',
'operation_id': 'recovery_project_plan_event_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'limit',
'skip',
],
'required': [
'project_id',
'location_id',
'plan_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'limit',
]
},
root_map={
'validations': {
('limit',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'limit':
(float,),
'skip':
(float,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
'limit': '$limit',
'skip': '$skip',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'limit': 'query',
'skip': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_event_list
)
def __recovery_project_plan_get(
self,
project_id,
location_id,
plan_id,
**kwargs
):
"""Get recovery/plan # noqa: E501
Returns a single plan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_get(project_id, location_id, plan_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Plan
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_get = _Endpoint(
settings={
'response_type': (Plan,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}',
'operation_id': 'recovery_project_plan_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_get
)
def __recovery_project_plan_list(
self,
project_id,
location_id,
**kwargs
):
"""List recovery/plan # noqa: E501
List plan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_list(project_id, location_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
Keyword Args:
name (str): Filter by name. [optional]
tag_value (str): Filter by tag.value. [optional]
tag_key (str): Filter by tag.key. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Plan]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_list = _Endpoint(
settings={
'response_type': ([Plan],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan',
'operation_id': 'recovery_project_plan_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'name',
'tag_value',
'tag_key',
],
'required': [
'project_id',
'location_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'name':
(str,),
'tag_value':
(str,),
'tag_key':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'name': 'name',
'tag_value': 'tag.value',
'tag_key': 'tag.key',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'name': 'query',
'tag_value': 'query',
'tag_key': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_list
)
def __recovery_project_plan_service_get(
self,
project_id,
location_id,
plan_id,
service_id,
**kwargs
):
"""Get recovery/plan.service # noqa: E501
Get recovery/plan.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_service_get(project_id, location_id, plan_id, service_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
service_id (str): serviceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ResourceService
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_service_get = _Endpoint(
settings={
'response_type': (ResourceService,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/service/{serviceId}',
'operation_id': 'recovery_project_plan_service_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'service_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
'service_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'service_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
'service_id': 'serviceId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'service_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_service_get
)
def __recovery_project_plan_service_list(
self,
project_id,
location_id,
plan_id,
**kwargs
):
"""List recovery/plan.service # noqa: E501
List recovery/plan.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_service_list(project_id, location_id, plan_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[ResourceService]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_service_list = _Endpoint(
settings={
'response_type': ([ResourceService],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/service',
'operation_id': 'recovery_project_plan_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_service_list
)
def __recovery_project_plan_tag_create(
self,
project_id,
location_id,
plan_id,
tag,
**kwargs
):
"""Create recovery/plan.tag # noqa: E501
Create recovery/plan.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_tag_create(project_id, location_id, plan_id, tag, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
tag (Tag):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
kwargs['tag'] = \
tag
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_tag_create = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/tag',
'operation_id': 'recovery_project_plan_tag_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'tag',
],
'required': [
'project_id',
'location_id',
'plan_id',
'tag',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'tag':
(Tag,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'tag': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__recovery_project_plan_tag_create
)
def __recovery_project_plan_tag_delete(
self,
project_id,
location_id,
plan_id,
tag_id,
**kwargs
):
"""Delete recovery/plan.tag # noqa: E501
Delete recovery/plan.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_tag_delete(project_id, location_id, plan_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_tag_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/tag/{tagId}',
'operation_id': 'recovery_project_plan_tag_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_tag_delete
)
def __recovery_project_plan_tag_get(
self,
project_id,
location_id,
plan_id,
tag_id,
**kwargs
):
"""Get recovery/plan.tag # noqa: E501
Get recovery/plan.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_tag_get(project_id, location_id, plan_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_tag_get = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/tag/{tagId}',
'operation_id': 'recovery_project_plan_tag_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_tag_get
)
def __recovery_project_plan_tag_list(
self,
project_id,
location_id,
plan_id,
**kwargs
):
"""List recovery/plan.tag # noqa: E501
List recovery/plan.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_tag_list(project_id, location_id, plan_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_tag_list = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/tag',
'operation_id': 'recovery_project_plan_tag_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_tag_list
)
def __recovery_project_plan_tag_put(
self,
project_id,
location_id,
plan_id,
tag_array,
**kwargs
):
"""Replace recovery/plan.tag # noqa: E501
Replace recovery/plan.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_tag_put(project_id, location_id, plan_id, tag_array, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
tag_array (TagArray):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
kwargs['tag_array'] = \
tag_array
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_tag_put = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/tag',
'operation_id': 'recovery_project_plan_tag_put',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'tag_array',
],
'required': [
'project_id',
'location_id',
'plan_id',
'tag_array',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'tag_array':
(TagArray,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'tag_array': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__recovery_project_plan_tag_put
)
def __recovery_project_plan_update(
self,
project_id,
location_id,
plan_id,
recovery_project_plan_update,
**kwargs
):
"""Update recovery/plan # noqa: E501
Returns modified plan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_update(project_id, location_id, plan_id, recovery_project_plan_update, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
recovery_project_plan_update (RecoveryProjectPlanUpdate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Plan
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
kwargs['recovery_project_plan_update'] = \
recovery_project_plan_update
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_update = _Endpoint(
settings={
'response_type': (Plan,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}',
'operation_id': 'recovery_project_plan_update',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'recovery_project_plan_update',
],
'required': [
'project_id',
'location_id',
'plan_id',
'recovery_project_plan_update',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'recovery_project_plan_update':
(RecoveryProjectPlanUpdate,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'recovery_project_plan_update': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__recovery_project_plan_update
)
| 36.524013
| 137
| 0.442833
| 75,970
| 0.989064
| 0
| 0
| 0
| 0
| 0
| 0
| 41,418
| 0.539227
|
4992878d55b3a8da195170f6eea9d69be14347a9
| 2,059
|
py
|
Python
|
days/day5.py
|
vanHavel/AdventOfCode2021
|
a83ee21cffff56ba3f49de7af5113bf0b11fea7a
|
[
"MIT"
] | null | null | null |
days/day5.py
|
vanHavel/AdventOfCode2021
|
a83ee21cffff56ba3f49de7af5113bf0b11fea7a
|
[
"MIT"
] | null | null | null |
days/day5.py
|
vanHavel/AdventOfCode2021
|
a83ee21cffff56ba3f49de7af5113bf0b11fea7a
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from typing import List, Tuple
from aocd import get_data, submit
DAY = 5
YEAR = 2021
def part1(data: str) -> str:
segments = read(data)
covered = defaultdict(int)
for segment in segments:
x1, y1, x2, y2 = segment
if x1 != x2 and y1 != y2:
continue
if x1 == x2:
for y in range(min(y1, y2), max(y1, y2)+1):
covered[(x1, y)] += 1
elif y1 == y2:
for x in range(min(x1, x2), max(x1, x2)+1):
covered[(x, y1)] += 1
ans = 0
for point in covered:
if covered[point] > 1:
ans += 1
return str(ans)
def part2(data: str) -> str:
segments = read(data)
covered = defaultdict(int)
for segment in segments:
x1, y1, x2, y2 = segment
if x1 == x2:
for y in range(min(y1, y2), max(y1, y2)+1):
covered[(x1, y)] += 1
elif y1 == y2:
for x in range(min(x1, x2), max(x1, x2)+1):
covered[(x, y1)] += 1
else:
if x1 > x2:
x1, y1, x2, y2 = x2, y2, x1, y1
if y1 < y2:
for x in range(x1, x2+1):
covered[(x, y1+(x-x1))] += 1
else:
for x in range(x1, x2+1):
covered[(x, y1-(x-x1))] += 1
ans = 0
for point in covered:
if covered[point] > 1:
ans += 1
return str(ans)
def read(data: str) -> List[Tuple[int, int, int, int]]:
lines = data.splitlines()
segments = []
for line in lines:
left, right = line.split('->')
x1, y1 = left.split(',')
x2, y2 = right.split(',')
segments.append((int(x1), int(y1), int(x2), int(y2)))
return segments
if __name__ == '__main__':
input_data = get_data(day=DAY, year=YEAR)
ans1 = part1(input_data)
print(ans1)
#submit(answer=ans1, day=DAY, year=YEAR, part=1)
ans2 = part2(input_data)
print(ans2)
submit(answer=ans2, day=DAY, year=YEAR, part=2)
| 27.092105
| 61
| 0.4949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.033026
|
49936fb891f5aa506d6883922c089dfe1817eb4b
| 1,108
|
py
|
Python
|
215.kthLargestElementInAnArray2.py
|
ColinTing/Algorithm
|
02c8087503298f050deb0fbee6cb887b3aeb6592
|
[
"MIT"
] | null | null | null |
215.kthLargestElementInAnArray2.py
|
ColinTing/Algorithm
|
02c8087503298f050deb0fbee6cb887b3aeb6592
|
[
"MIT"
] | null | null | null |
215.kthLargestElementInAnArray2.py
|
ColinTing/Algorithm
|
02c8087503298f050deb0fbee6cb887b3aeb6592
|
[
"MIT"
] | null | null | null |
import random
class Solution:
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if nums is None or len(nums)==0:
return nums
nums = self.quickSort(nums,0,len(nums)-1,k)
return nums[k-1]
def partition(self,nums,p,q):
ra = random.randint(p,q)
nums[p],nums[ra] = nums[ra],nums[p]
x = nums[p]
i = p
for j in range(p+1,q+1,1):
if nums[j]>=x: #左边是nums[j]而不是nums[i]
i+=1
nums[i],nums[j] = nums[j],nums[i]
nums[p],nums[i] = nums[i],nums[p]
return i
def quickSort(self,nums,p,q,k):
if p<=q:
r = self.partition(nums,p,q)
if r == k-1:
return nums
if r < k-1:
p = r+1
return self.quickSort(nums,p,q,k)
if r > k-1:
q = r-1
return self.quickSort(nums,p,q,k)
list = [3,2,3,1,2,4,5,5,6]
k = 4
s = Solution()
print(s.findKthLargest(list,k))
| 25.767442
| 51
| 0.445848
| 1,024
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.10625
|
4994b9856023b95cccc4144927c2909950d9bad5
| 383
|
gyp
|
Python
|
binding.gyp
|
mceSystems/node-windows-pac-resolver
|
a1eaaa6b74d4e82218e6d975582aab121e12da6f
|
[
"MIT"
] | 1
|
2021-11-14T01:26:45.000Z
|
2021-11-14T01:26:45.000Z
|
binding.gyp
|
mceSystems/node-windows-pac-resolver
|
a1eaaa6b74d4e82218e6d975582aab121e12da6f
|
[
"MIT"
] | 1
|
2021-08-31T21:38:42.000Z
|
2021-08-31T21:38:42.000Z
|
binding.gyp
|
mceSystems/node-windows-pac-resolver
|
a1eaaa6b74d4e82218e6d975582aab121e12da6f
|
[
"MIT"
] | 1
|
2021-11-14T01:26:12.000Z
|
2021-11-14T01:26:12.000Z
|
{
"targets": [
{
"target_name": "binding",
"sources": [
"native\\winhttpBindings.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")"
],
"libraries": [
"WinHTTP.lib",
"-DelayLoad:node.exe"
],
"msbuild_settings": {
"ClCompile": {
"RuntimeLibrary": "MultiThreaded"
}
}
}
]
}
| 16.652174
| 68
| 0.48564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.626632
|
4994cdca869fe06dd8910a681063b2822b7a3d86
| 2,122
|
py
|
Python
|
diplom_test/data_reader.py
|
CrackedSTone/algorithm-detects-liver-pathology
|
d52d08e4e6931b3502f083f20d6332f7b6839a3b
|
[
"Apache-2.0"
] | 8
|
2019-04-09T07:11:26.000Z
|
2020-02-27T16:51:26.000Z
|
diplom_test/data_reader.py
|
il-yanko/algorithm-detects-liver-pathology
|
d52d08e4e6931b3502f083f20d6332f7b6839a3b
|
[
"Apache-2.0"
] | null | null | null |
diplom_test/data_reader.py
|
il-yanko/algorithm-detects-liver-pathology
|
d52d08e4e6931b3502f083f20d6332f7b6839a3b
|
[
"Apache-2.0"
] | 2
|
2019-04-04T07:13:02.000Z
|
2020-02-06T04:58:34.000Z
|
import glob
import numpy as np
#import cv2
from PIL import Image
#import os.path
class ImgReader:
def __init__(self):
pass
@staticmethod
def read_directory(dir_path, file_format=None):
try:
images = [np.asarray(Image.open(img_path).convert('L'), dtype=np.uint8)
for img_path in glob.glob(dir_path + "*" + (("." + file_format) if file_format else ""))]
print("It was loaded", len(images), "images from", dir_path)
return images
except Exception as e:
print(e)
return
class DataReader:
def __init__(self):
pass
@staticmethod
def read_directory(dir_path, file_format=None):
try:
images = [np.asarray(np.genfromtxt(img_path, delimiter=','), dtype=np.float64)
for img_path in glob.glob(dir_path + "*" + (("." + file_format) if file_format else ""))]
print("It was loaded", len(images), "datafiles from", dir_path)
return images
except Exception as e:
print(e)
return
# ALTERNATIVE LOADER:
'''
# process RGB/grayscale
def rgb_to_gray(rgb):
# scalar product of colors with certain theoretical coefficients according to the YUV system
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114]).round(3).astype(int)
# download folder BMP
def get_all_bmp(full_dir):
# to calculate number of files in the folder
file_number = len(next(os.walk(full_dir))[2])
# print(fileNumber, "files were found")
img_arr = list()
for i in range(1, file_number + 1):
img_arr.append(cv2.imread(full_dir + '/' + str(i) + ".bmp"))
print(len(img_arr), "images were downloaded")
return img_arr
def get_all_img_make_gray(cwd, folder_name):
path = cwd + "/" + folder_name
print("\nPath = ", path)
img_arr = get_all_bmp(path)
for i in range(len(img_arr)):
img_arr[i] = rgb_to_gray(img_arr[i])
return img_arr
'''
# test load .csv
'''
import os.path
cwd = os.getcwd()
a = cwd + "/glcm/auh/csv/"
data = DataReader.read_directory(a)
print(data[0])
'''
| 29.068493
| 111
| 0.615928
| 1,017
| 0.479265
| 0
| 0
| 898
| 0.423186
| 0
| 0
| 1,122
| 0.528746
|
49983ba3d7a780b5fb33eabb069b3531df6c3624
| 3,351
|
py
|
Python
|
docs/conf.py
|
arashbm/dag-python
|
a62761d516daf3a129f6a75359e1b09047ede6f2
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
arashbm/dag-python
|
a62761d516daf3a129f6a75359e1b09047ede6f2
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
arashbm/dag-python
|
a62761d516daf3a129f6a75359e1b09047ede6f2
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Reticula'
copyright = '2022'
author = 'Arash Badie-Modiri'
# The full version, including alpha/beta/rc tags
release = '0.0.4'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.bibtex'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
primary_domain = None
nitpicky = True
rst_prolog = """
.. role:: py(code)
:language: python
:class: highlight
.. role:: cpp(code)
:language: cpp
:class: highlight
"""
# REs for Python signatures with types
import re
typed_py_re = re.compile(
r'''^ ([\w.]*\.)? # class name(s)
(\w+(?: \[[^\]]+\])?) \s* # thing name
(?: \(\s*(.*)\s*\) # optional: arguments
(?:\s* -> \s* (.*))? # return annotation
)? $ # and nothing more
''', re.VERBOSE)
import sphinx.domains.python
sphinx.domains.python.py_sig_re = typed_py_re
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'furo'
pygments_style = "sphinx"
pygments_dark_style = "monokai"
html_title = "Reticula"
import os.path
def read_icon(path: str):
with open(os.path.join(os.path.dirname(__file__), path), 'r') as f:
return f.read()
html_theme_options = {
"source_repository": "https://github.com/reticula-network/reticula-python",
"source_branch": "main",
"source_directory": "docs/",
"footer_icons": [
{
"name": "GitHub",
"url": "https://github.com/reticula-network",
"html": read_icon("github.svg"),
"class": "",
}, {
"name": "PyPi",
"url": "https://pypi.org/project/reticula/",
"html": read_icon("pypi.svg"),
"class": "",
},
],
}
bibtex_bibfiles = ['references.bib']
bibtex_default_style = 'unsrt'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 28.887931
| 79
| 0.617428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,561
| 0.764249
|
4998582ea46c71688c285dfd2591280666ab63f8
| 1,455
|
py
|
Python
|
libs/cloner.py
|
Rookout/log-scanner
|
bd8b940660a9f40068151dfca514e85aa730bfc0
|
[
"Apache-2.0"
] | null | null | null |
libs/cloner.py
|
Rookout/log-scanner
|
bd8b940660a9f40068151dfca514e85aa730bfc0
|
[
"Apache-2.0"
] | 3
|
2021-05-05T18:30:21.000Z
|
2022-03-10T11:32:52.000Z
|
libs/cloner.py
|
Rookout/log-scanner
|
bd8b940660a9f40068151dfca514e85aa730bfc0
|
[
"Apache-2.0"
] | 1
|
2019-12-16T22:27:45.000Z
|
2019-12-16T22:27:45.000Z
|
import os
import sys
import shutil
import random
import stat
import string
import logging
from git import Repo
try: # macOS
BASE_CLONE_LOCATION = os.path.join(os.path.dirname(sys.modules['__main__'].__file__), "current_clone")
except: # Windows
BASE_CLONE_LOCATION = os.path.join(os.getcwd(), "current_clone")
try:
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
except:
logging.basicConfig(level=logging.INFO, format="%(levelname)s - %(message)s")
logging.error("GITHUB_TOKEN must be supplied as environment variable")
quit()
def generate_random_key():
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(12))
def clone_repository(repo_url):
current_clone_location = os.path.join(BASE_CLONE_LOCATION, generate_random_key())
creator = repo_url.split("/")[-2]
project_name = repo_url.split("/")[-1]
tokenized_repo_url = f"https://{GITHUB_TOKEN}:x-oauth-basic@github.com/{creator}/{project_name}"
os.makedirs(current_clone_location, exist_ok=True)
Repo.clone_from(tokenized_repo_url, current_clone_location)
return current_clone_location
# handles deleting readonly files with shutil
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def delete_currently_cloned_repository(current_clone_location):
if (os.path.exists(current_clone_location)):
shutil.rmtree(current_clone_location, onerror=remove_readonly)
| 30.957447
| 106
| 0.754639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 282
| 0.193814
|
499a41cfbffd9bf9473869aaf693707dd595ba03
| 6,671
|
py
|
Python
|
tests/test_formDef.py
|
swhume/odmlib
|
597f71c60f4c6bd8639c92e9fc0ae71b8a5416a7
|
[
"MIT"
] | 9
|
2021-09-15T12:26:30.000Z
|
2022-03-30T10:14:14.000Z
|
tests/test_formDef.py
|
swhume/odmlib
|
597f71c60f4c6bd8639c92e9fc0ae71b8a5416a7
|
[
"MIT"
] | 1
|
2021-09-28T09:05:01.000Z
|
2021-09-28T09:05:01.000Z
|
tests/test_formDef.py
|
swhume/odmlib
|
597f71c60f4c6bd8639c92e9fc0ae71b8a5416a7
|
[
"MIT"
] | 1
|
2021-09-29T04:50:23.000Z
|
2021-09-29T04:50:23.000Z
|
from unittest import TestCase
import json
import odmlib.odm_1_3_2.model as ODM
class TestFormDef(TestCase):
def setUp(self) -> None:
attrs = self.set_formdef_attributes()
self.formdef = ODM.FormDef(**attrs)
def test_add_description(self):
tt1 = ODM.TranslatedText(_content="this is the first test description", lang="en")
tt2 = ODM.TranslatedText(_content="this is the second test description", lang="en")
self.formdef.Description = ODM.Description()
self.formdef.Description.TranslatedText = [tt1, tt2]
self.assertEqual(len(self.formdef.Description.TranslatedText), 2)
self.assertEqual(self.formdef.Description.TranslatedText[1]._content, 'this is the second test description')
def test_add_item_group_ref(self):
igr = ODM.ItemGroupRef(ItemGroupOID="ODM.IG.COMMON", Mandatory="Yes", OrderNumber=1)
self.formdef.ItemGroupRef.append(igr)
igr = ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS_GENERAL", Mandatory="Yes", OrderNumber=2)
self.formdef.ItemGroupRef.append(igr)
igr = ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS", Mandatory="Yes", OrderNumber=3)
self.formdef.ItemGroupRef.append(igr)
self.assertEqual(self.formdef.ItemGroupRef[0].ItemGroupOID, "ODM.IG.COMMON")
self.assertEqual(self.formdef.ItemGroupRef[2].OrderNumber, 3)
def test_append_item_group_ref(self):
fd = ODM.FormDef(OID="ODM.F.VS", Name="Vital Signs Form", Repeating="Yes")
fd.Description = ODM.Description()
fd.Description.TranslatedText.append(ODM.TranslatedText(_content="this is the first test description", lang="en"))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.COMMON", Mandatory="Yes", OrderNumber=1))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS_GENERAL", Mandatory="Yes", OrderNumber=2))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS", Mandatory="Yes", OrderNumber=3))
self.assertEqual(fd.ItemGroupRef[0].ItemGroupOID, "ODM.IG.COMMON")
self.assertEqual(fd.ItemGroupRef[2].OrderNumber, 3)
self.assertEqual(fd.Description.TranslatedText[0]._content, "this is the first test description")
def test_add_alias(self):
self.formdef.Alias.append(ODM.Alias(Context="SDTMIG", Name="VS"))
self.formdef.Alias.append(ODM.Alias(Context="CDASHIG", Name="VS"))
self.assertEqual(len(self.formdef.Alias), 2)
self.assertEqual(self.formdef.Alias[1].Context, "CDASHIG")
def test_add_not_alias(self):
item = ODM.ItemDef(OID="ODM.IT.VSPOS", Name="VS Position", DataType="text")
with self.assertRaises(TypeError):
self.formdef.Alias = [item]
self.formdef.Alias.append(ODM.Alias(Context="SDTMIG", Name="VS"))
# list accepts invalid objects
self.formdef.Alias.append(ODM.ItemDef(OID="ODM.IT.VSDT", Name="VS Date", DataType="text"))
self.assertEqual(len(self.formdef.Alias), 2)
self.assertEqual(self.formdef.Alias[0].Context, "SDTMIG")
def test_to_json(self):
attrs = self.set_formdef_attributes()
fd = ODM.FormDef(**attrs)
tt = ODM.TranslatedText(_content="this is the first test description", lang="en")
fd.Description = ODM.Description()
fd.Description.TranslatedText = [tt]
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.COMMON", Mandatory="Yes", OrderNumber=1))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS_GENERAL", Mandatory="Yes", OrderNumber=2))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS", Mandatory="Yes", OrderNumber=3))
fd.Alias.append(ODM.Alias(Context="SDTMIG", Name="VS"))
fd_json = fd.to_json()
fd_dict = json.loads(fd_json)
print(fd_dict)
self.assertEqual(fd_dict["OID"], "ODM.F.VS")
self.assertDictEqual(fd_dict, self.expected_dict())
def test_to_dict(self):
attrs = self.set_formdef_attributes()
fd = ODM.FormDef(**attrs)
fd.Description = ODM.Description()
fd.Description.TranslatedText.append(ODM.TranslatedText(_content="this is the first test description", lang="en"))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.COMMON", Mandatory="Yes", OrderNumber=1))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS_GENERAL", Mandatory="Yes", OrderNumber=2))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS", Mandatory="Yes", OrderNumber=3))
fd.Alias.append(ODM.Alias(Context="SDTMIG", Name="VS"))
fd_dict = fd.to_dict()
self.assertEqual(fd_dict["OID"], "ODM.F.VS")
self.assertDictEqual(fd_dict, self.expected_dict())
def test_to_xml(self):
attrs = self.set_formdef_attributes()
fd = ODM.FormDef(**attrs)
fd.Description = ODM.Description()
fd.Description.TranslatedText.append(ODM.TranslatedText(_content="this is the first test description", lang="en"))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.COMMON", Mandatory="Yes", OrderNumber=1))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS_GENERAL", Mandatory="Yes", OrderNumber=2))
fd.ItemGroupRef.append(ODM.ItemGroupRef(ItemGroupOID="ODM.IG.VS", Mandatory="Yes", OrderNumber=3))
fd.Alias.append(ODM.Alias(Context="SDTMIG", Name="VS"))
fd_xml = fd.to_xml()
self.assertEqual(fd_xml.attrib["OID"], "ODM.F.VS")
igr = fd_xml.findall("ItemGroupRef")
self.assertEqual(len(igr), 3)
self.assertEqual(igr[0].attrib, {"ItemGroupOID": "ODM.IG.COMMON", "Mandatory": "Yes", "OrderNumber": "1"})
@staticmethod
def set_formdef_attributes():
"""
set some FormDef element attributes using test data
:return: dictionary with FormDef attribute information
"""
return {"OID": "ODM.F.VS", "Name": "Vital Signs Form", "Repeating": "Yes"}
@staticmethod
def expected_dict():
return {'OID': 'ODM.F.VS', 'Name': 'Vital Signs Form', 'Repeating': 'Yes',
'ItemGroupRef': [{'ItemGroupOID': 'ODM.IG.COMMON', 'Mandatory': 'Yes', 'OrderNumber': 1},
{'ItemGroupOID': 'ODM.IG.VS_GENERAL', 'Mandatory': 'Yes', 'OrderNumber': 2},
{'ItemGroupOID': 'ODM.IG.VS', 'Mandatory': 'Yes', 'OrderNumber': 3}],
'Description': {'TranslatedText':
[{'_content': 'this is the first test description', 'lang': 'en'}]},
'Alias': [{'Context': 'SDTMIG', 'Name': 'VS'}]}
| 57.017094
| 122
| 0.665867
| 6,589
| 0.987708
| 0
| 0
| 932
| 0.139709
| 0
| 0
| 1,533
| 0.229801
|
499a70e266d8579796d64d1f4d58f86d8e09e3c3
| 143
|
py
|
Python
|
src/Utilities/__init__.py
|
sigseg5/nometa-tg
|
7d0d9f0cf5d8fd98a3808c07a5c44d30f1b13032
|
[
"MIT"
] | 3
|
2020-12-15T07:44:58.000Z
|
2022-03-11T18:57:44.000Z
|
src/Utilities/__init__.py
|
sigseg5/nometa-tg
|
7d0d9f0cf5d8fd98a3808c07a5c44d30f1b13032
|
[
"MIT"
] | null | null | null |
src/Utilities/__init__.py
|
sigseg5/nometa-tg
|
7d0d9f0cf5d8fd98a3808c07a5c44d30f1b13032
|
[
"MIT"
] | null | null | null |
from src.Utilities import cmd_logger
from src.Utilities import metadata_worker
from src.Utilities import misc
from src.Utilities import runner
| 28.6
| 41
| 0.86014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
499c8c68960d9d5e2ecf3da660784d02e54b3419
| 9,062
|
py
|
Python
|
db_eplusout_reader/processing/esofile_time.py
|
DesignBuilderSoftware/db-esofile-reader
|
a5a80a8069e7eeb30af8ceeca28eb33e9e4f7a50
|
[
"MIT"
] | 1
|
2021-07-15T14:16:10.000Z
|
2021-07-15T14:16:10.000Z
|
db_eplusout_reader/processing/esofile_time.py
|
DesignBuilderSoftware/db-esofile-reader
|
a5a80a8069e7eeb30af8ceeca28eb33e9e4f7a50
|
[
"MIT"
] | 1
|
2022-03-02T08:30:20.000Z
|
2022-03-08T07:57:57.000Z
|
db_eplusout_reader/processing/esofile_time.py
|
DesignBuilderSoftware/db-esofile-reader
|
a5a80a8069e7eeb30af8ceeca28eb33e9e4f7a50
|
[
"MIT"
] | null | null | null |
import calendar
import logging
from collections import namedtuple
from datetime import datetime, timedelta
from db_eplusout_reader.constants import RP, TS, A, D, H, M
from db_eplusout_reader.exceptions import LeapYearMismatch, StartDayMismatch
EsoTimestamp = namedtuple("EsoTimestamp", "month day hour end_minute")
def parse_eso_timestamp(year, month, day, hour, end_minute):
"""
Convert E+ time format to format acceptable by datetime module.
EnergyPlus date and time format is not compatible with
datetime.datetime module. This because hourly information
can be '24' and end minute can be '60' - which is not
allowed.
To get around the issue, logic is in place to
convert raw input into format as required for datetime
(or datetime like) module.
"""
if hour == 24 and end_minute == 60:
shifted_datetime = datetime(year, month, day, hour - 1)
corrected_datetime = shifted_datetime + timedelta(hours=1)
elif end_minute == 60:
# Convert last timestep of an hour
corrected_datetime = datetime(year, month, day, hour, 0)
elif hour == 0:
corrected_datetime = datetime(year, month, day, hour, end_minute)
else:
corrected_datetime = datetime(year, month, day, hour - 1, end_minute)
return corrected_datetime
def get_month_n_days_from_cumulative(monthly_cumulative_days):
"""
Transform consecutive number of days in monthly data to actual number of days.
EnergyPlus monthly results report a total consecutive number of days for each day.
Raw data reports table as 31, 59..., this function calculates and returns
actual number of days for each month 31, 28...
"""
old_num = monthly_cumulative_days.pop(0)
m_actual_days = [old_num]
for num in monthly_cumulative_days:
new_num = num - old_num
m_actual_days.append(new_num)
old_num += new_num
return m_actual_days
def find_num_of_days_annual(ann_num_of_days, rp_num_of_days):
"""Use runperiod data to calculate number of days for each annual period."""
days = rp_num_of_days[0] // len(ann_num_of_days)
return [days for _ in ann_num_of_days]
def get_num_of_days(cumulative_days):
"""Split num of days and date."""
num_of_days = {}
for table, values in cumulative_days.items():
if table == M:
# calculate actual number of days for monthly table
num_of_days[M] = get_month_n_days_from_cumulative(values)
else:
num_of_days[table] = values
# calculate number of days for annual table for
# an incomplete year run or multi year analysis
if A in cumulative_days.keys() and RP in cumulative_days.keys():
num_of_days[A] = find_num_of_days_annual(num_of_days[A], num_of_days[RP])
return num_of_days
def check_year_increment(first_step_data, current_step_data):
"""Check if year value should be incremented inside environment table."""
if first_step_data is current_step_data:
# do not increment first step
return False
return first_step_data >= current_step_data
def generate_datetime_dates(raw_dates, year):
"""Generate datetime index for a given period."""
dates = []
for i in range(0, len(raw_dates)):
# based on the first, current and previous
# steps decide if the year should be incremented
if check_year_increment(raw_dates[0], raw_dates[i]):
year += 1
# year can be incremented automatically when converting to datetime
date = parse_eso_timestamp(year, *raw_dates[i])
dates.append(date)
return dates
def update_start_dates(dates):
"""Set accurate first date for monthly+ tables."""
def set_start_date(orig, refs):
for ref in refs.values():
orig[0] = ref[0].replace(hour=0, minute=0)
return orig
timestep_to_monthly_dates = {k: dates[k] for k in dates if k in [TS, H, D, M]}
if timestep_to_monthly_dates:
for frequency in (M, A, RP):
if frequency in dates:
dates[frequency] = set_start_date(
dates[frequency], timestep_to_monthly_dates
)
return dates
def get_n_days_from_cumulative(cumulative_days):
"""Convert cumulative days to number of days pers step."""
if cumulative_days:
# Separate number of days data if any M to RP table is available
num_of_days = get_num_of_days(cumulative_days)
else:
num_of_days = None
return num_of_days
def validate_year(year, is_leap, date, day):
"""Check if date for given and day corresponds to specified year."""
if calendar.isleap(year) is is_leap:
test_datetime = datetime(year, date.month, date.day)
test_day = test_datetime.strftime("%A")
if day != test_day and day not in (
"SummerDesignDay",
"WinterDesignDay",
):
max_year = datetime.now().year + 10 # give some choices from future
suitable_years = get_allowed_years(
is_leap, date, day, max_year, n_samples=3
)
formatted_day = test_datetime.strftime("%Y-%m-%d")
raise StartDayMismatch(
"Start day '{}' for given day '{}'"
" does not correspond to real calendar day '{}'!"
"\nEither set 'year' kwarg as 'None' to identify year automatically"
" or use one of '{}'.".format(
day, formatted_day, test_day, suitable_years
)
)
else:
raise LeapYearMismatch(
"Specified year '{0}' does not match expected calendar data!"
" Outputs are reported for {1} year"
" but given year '{0}' is {2}."
" Either set 'year' kwarg as 'None' to seek year automatically"
" or use {1} year.".format(
year,
"leap" if is_leap else "standard",
"standard" if is_leap else "leap",
)
)
def is_leap_year_ts_to_d(raw_dates_arr):
"""Check if first year is leap based on timestep, hourly or daily data."""
for tup in raw_dates_arr:
if (tup.month, tup.day) == (2, 29):
return True
if check_year_increment(raw_dates_arr[0], tup):
# stop once first year is covered
return False
return False
def seek_year(is_leap, date, day, max_year):
"""Find first year matching given criteria."""
for year in range(max_year, 0, -1):
if day in ("SummerDesignDay", "WinterDesignDay"):
logging.info("Sizing simulation, setting year to 2002.")
year = 2002
break
if calendar.isleap(year) is is_leap:
test_datetime = datetime(year, date.month, date.day)
test_start_day = test_datetime.strftime("%A")
if day == test_start_day:
break
else:
raise ValueError(
"Failed to automatically find year for following arguments"
" is_leap='{}', date='{}' and day='{}'."
" It seems that there ins't a year between 0 - {} matching"
" date and day of week combination.".format(is_leap, date, day, max_year)
)
return year
def get_allowed_years(
is_leap,
first_date,
first_day,
max_year,
n_samples=4,
):
"""Get a sample of allowed years for given conditions."""
allowed_years = []
for _ in range(n_samples):
year = seek_year(is_leap, first_date, first_day, max_year)
max_year = year - 1
allowed_years.append(year)
return allowed_years
def get_lowest_frequency(all_frequencies):
"""Find the shortest frequency from given ones."""
return next((freq for freq in (TS, H, D, M, A, RP) if freq in all_frequencies))
def convert_raw_dates(raw_dates, year):
"""Transform raw E+ date and time data into datetime.datetime objects."""
dates = {}
for frequency, value in raw_dates.items():
dates[frequency] = generate_datetime_dates(value, year)
return dates
def convert_raw_date_data(
raw_dates, #: Dict[str, List[EsoTimestamp]],
days_of_week, #: Dict[str, List[str]],
year, #: Optional[int],
): # -> Dict[str, List[datetime]]:
"""Convert EnergyPlus dates into standard datetime format."""
lowest_frequency = get_lowest_frequency(list(raw_dates.keys()))
if lowest_frequency in {TS, H, D}:
lowest_frequency_values = raw_dates[lowest_frequency]
is_leap = is_leap_year_ts_to_d(lowest_frequency_values)
first_date = lowest_frequency_values[0]
first_day = days_of_week[lowest_frequency][0]
if year is None:
year = seek_year(is_leap, first_date, first_day, 2020)
else:
validate_year(year, is_leap, first_date, first_day)
else:
# allow any year defined or set EnergyPlus default 2002
year = year if year else 2002
dates = convert_raw_dates(raw_dates, year)
return update_start_dates(dates)
| 36.688259
| 86
| 0.647318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,924
| 0.322666
|
499ce59557a4ca3973fb3d83ed14750b0515612a
| 772
|
py
|
Python
|
setup.py
|
EliRibble/parentopticon
|
8593d7f72fac9706f1bd8e8326ac932f5af95a32
|
[
"MIT"
] | null | null | null |
setup.py
|
EliRibble/parentopticon
|
8593d7f72fac9706f1bd8e8326ac932f5af95a32
|
[
"MIT"
] | null | null | null |
setup.py
|
EliRibble/parentopticon
|
8593d7f72fac9706f1bd8e8326ac932f5af95a32
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="parentopticon",
version="0.0.1",
author="Eli Ribble",
author_email="junk@theribbles.org",
description="A system for controlling kids access to computers.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/eliribble/parentopticon",
packages=setuptools.find_packages(),
install_requires = [
"arrow==0.15.5",
"chryso==2.1",
"flask==1.1.2",
"flask-login==0.5.0",
"Jinja2==2.11.1",
"psutil==5.6.6",
"requests==2.23.0",
"toml==0.10.0",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 24.125
| 66
| 0.682642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 418
| 0.541451
|
499cfa9ec9626bc8ee462071e912f59d22f18419
| 11,701
|
py
|
Python
|
src/race/src/my_lane_detection/slidewindow_ver2.py
|
young43/ISCC_2020
|
2a7187410bceca901bd87b753a91fd35b73ca036
|
[
"MIT"
] | 3
|
2020-11-13T04:59:27.000Z
|
2021-04-02T06:36:03.000Z
|
src/race/src/my_lane_detection/slidewindow_ver2.py
|
yongbeomkwak/ISCC_2021
|
7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015
|
[
"MIT"
] | null | null | null |
src/race/src/my_lane_detection/slidewindow_ver2.py
|
yongbeomkwak/ISCC_2021
|
7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015
|
[
"MIT"
] | 5
|
2020-09-13T09:06:16.000Z
|
2021-06-19T02:31:23.000Z
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
from findpoint import FindPoint
class LineDetector:
def __init__(self,img):
self.frame = None
self.leftx = None
self.rightx = None
# self.output = None
self.frame = 0
self.frame_list = []
self.findpoint = FindPoint(img)
def sliding_window(self,x_start_L,x_start_R,img):
x_location = None
out_img = np.dstack((img,img,img))
height = img.shape[0]
width = img.shape[1]
window_height = 5
nwindows = 30
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
point_list_left = list()
point_list_right = list()
margin = 20
minpix = 10
left_lane_inds = []
right_lane_inds = []
good_left_inds = \
((nonzerox >= x_start_L-20) & (nonzeroy >= 300)& (nonzeroy <= 400) & (nonzerox <= x_start_L+20)).nonzero()[
0]
good_right_inds = ((nonzerox >= x_start_R-40) & (nonzeroy >= 300)& (nonzeroy <= 400) & (
nonzerox <= x_start_R+20)).nonzero()[0]
line_exist_flag = None
y_current = None
x_current = None
good_center_inds = None
p_cut = None
# check the minpix before left start line
# if minpix is enough on left, draw left, then draw right depends on left
# else draw right, then draw left depends on right
# lx_current = 120
# ly_current = 350
# rx_current = 550
# ry_current = 350
if len(good_left_inds) > minpix and len(good_right_inds) > minpix:
line_flag = 3
lx_current = np.int(np.mean(nonzerox[good_left_inds]))
ly_current = np.int(np.mean(nonzeroy[good_left_inds]))
rx_current = np.int(np.mean(nonzerox[good_right_inds]))
ry_current = np.int(np.mean(nonzeroy[good_right_inds]))
elif len(good_left_inds) > minpix:
line_flag = 1
lx_current = np.int(np.mean(nonzerox[good_left_inds]))
ly_current = np.int(np.mean(nonzeroy[good_left_inds]))
rx_current = None
ry_current = None
max_y = y_current
elif len(good_right_inds) > minpix:
line_flag = 2
rx_current = nonzerox[good_right_inds[np.argmax(nonzeroy[good_right_inds])]]
ry_current = np.int(np.max(nonzeroy[good_right_inds]))
lx_current = None
ly_current = None
else:
line_flag = 4
# rx_current
# ry_current
# if line_flag ==3:
# for i in range(len(good_left_inds)):
# cv2.circle(out_img, (nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i]]), 1, (0, 255, 0), -1)
# for i in range(len(good_right_inds)):
# cv2.circle(out_img, (nonzerox[good_right_inds[i]], nonzeroy[good_right_inds[i]]), 1, (255,0, 0), -1)
# for window in range(0, nwindows):
# print('x',x_location)
if line_flag != 4:
# it's just for visualization of the valid inds in the region
for i in range(len(good_left_inds)):
cv2.circle(out_img, (nonzerox[good_left_inds[i]], nonzeroy[good_left_inds[i]]), 1, (0, 255, 0), -1)
for i in range(len(good_right_inds)):
cv2.circle(out_img, (nonzerox[good_right_inds[i]], nonzeroy[good_right_inds[i]]), 1, (255,0, 0), -1)
# window sliding and draw
# print(lx_current)
# print(rx_current)
for window in range(0, nwindows):
# if lx_current and rx_current:
# # print(line_flag)
# cv2.circle(out_img,(lx_current,ly_current-window*window_height-3),3,(0,0,255),-1)
# cv2.circle(out_img,(rx_current,ry_current-window*window_height-3),3,(0,0,255),-1)
# mean_x = (lx_current + rx_current)/2
# cv2.circle(out_img,(mean_x,ry_current-window*window_height-3),3,(0,255,255),-1)
# point_list_left.append((lx_current, ly_current-window*window_height-3))
# point_list_right.append((rx_current,ry_current-window*window_height-3))
if lx_current and rx_current:
cv2.circle(out_img,(lx_current,ly_current-window*window_height-3),3,(0,0,255),-1)
cv2.circle(out_img,(rx_current,ry_current-window*window_height-3),3,(0,0,255),-1)
mean_x = (lx_current + rx_current)/2
cv2.circle(out_img,(mean_x,ry_current-window*window_height-3),3,(0,255,255),-1)
point_list_left.append((lx_current, ly_current-window*window_height-3))
point_list_right.append((rx_current,ry_current-window*window_height-3))
elif lx_current:
cv2.circle(out_img,(lx_current,ly_current-window*window_height-3),3,(0,0,255),-1)
mean_x = (lx_current + width/2)
cv2.circle(out_img,(mean_x,ly_current-window*window_height-3),3,(0,255,255),-1)
point_list_left.append((lx_current, ly_current-window*window_height-3))
elif rx_current:
# cv2.circle(out_img,(lx_current,ly_current-window*window_height-3),3,(0,0,255),-1)
cv2.circle(out_img,(rx_current,ry_current-window*window_height-3),3,(0,0,255),-1)
mean_x = (rx_current-width/2)/2
cv2.circle(out_img,(mean_x,ry_current-window*window_height-3),3,(0,255,255),-1)
# point_list_left.append((lx_current, ly_current-window*window_height-3))
point_list_right.append((rx_current,ry_current-window*window_height-3))
if line_flag == 3:
l_win_y_low = ly_current - (window + 1) * window_height
l_win_y_high = ly_current - (window) * window_height
l_win_x_low = lx_current - margin
l_win_x_high = lx_current + margin
r_win_y_low = ry_current - (window + 1) * window_height
r_win_y_high = ry_current - (window) * window_height
r_win_x_low = rx_current - margin
r_win_x_high = rx_current + margin
# draw rectangle
# 0.33 is for width of the road
cv2.rectangle(out_img, (l_win_x_low, l_win_y_low), (l_win_x_high, l_win_y_high), (0, 255, 0), 1)
cv2.rectangle(out_img, (r_win_x_low, r_win_y_low), (r_win_x_high, r_win_y_high), (255,0, 0), 1)
good_left_inds = ((nonzeroy >= l_win_y_low) & (nonzeroy < l_win_y_high) & (nonzerox >= l_win_x_low) & (
nonzerox < l_win_x_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= r_win_y_low) & (nonzeroy < r_win_y_high) & (nonzerox >= r_win_x_low) & (
nonzerox < r_win_x_high)).nonzero()[0]
# check num of indicies in square and put next location to current
if len(good_left_inds) > minpix:
lx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rx_current = np.int(np.mean(nonzerox[good_right_inds]))
# 338~344 is for recognize line which is yellow line in processed image(you can check in imshow)
# if (l_win_y_low >= 338 and l_win_y_low < 344) and (r_win_y_low >= 338 and r_win_y_low < 344):
# # 0.165 is the half of the road(0.33)
x_location = rx_current - lx_current + 75
elif line_flag == 1:
# rectangle x,y range init
win_y_low = ly_current - (window + 1) * window_height
win_y_high = ly_current - (window) * window_height
win_x_low = lx_current - margin
win_x_high = lx_current + margin
# draw rectangle
# 0.33 is for width of the road
cv2.rectangle(out_img, (win_x_low, win_y_low), (win_x_high, win_y_high), (0, 255, 0), 1)
# cv2.rectangle(out_img, (win_x_low + int(width * 0.33), win_y_low),
# (win_x_high + int(width * 0.33), win_y_high), (255, 0, 0), 1)
# indicies of dots in nonzerox in one square
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_x_low) & (
nonzerox < win_x_high)).nonzero()[0]
# check num of indicies in square and put next location to current
if len(good_left_inds) > minpix:
# x_current = np.int(np.mean(nonzerox[good_left_inds]))
lx_current = np.int(np.mean(nonzerox[good_left_inds]))
# elif nonzeroy[left_lane_inds] != [] and nonzerox[left_lane_inds] != []:
# p_left = np.polyfit(nonzeroy[left_lane_inds], nonzerox[left_lane_inds], 2)
# x_current = np.int(np.polyval(p_left, win_y_high))
# # 338~344 is for recognize line which is yellow line in processed image(you can check in imshow)
# if win_y_low >= 338 and win_y_low < 344:
# # 0.165 is the half of the road(0.33)
# x_location = x_current + 180
elif line_flag ==2:
win_y_low = ry_current - (window + 1) * window_height
win_y_high = ry_current - (window) * window_height
win_x_low = rx_current - margin
win_x_high = rx_current + margin
# cv2.rectangle(out_img, (win_x_low , win_y_low),
# (win_x_high, win_y_high), (0, 255, 0), 1)
cv2.rectangle(out_img, (win_x_low, win_y_low), (win_x_high, win_y_high), (255, 0, 0), 1)
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_x_low) & (
nonzerox < win_x_high)).nonzero()[0]
if len(good_right_inds) > minpix:
# x_current = np.int(np.mean(nonzerox[good_right_inds]))
rx_current = np.int(np.mean(nonzerox[good_right_inds]))
# elif nonzeroy[right_lane_inds] != [] and nonzerox[right_lane_inds] != []:
# p_right = np.polyfit(nonzeroy[right_lane_inds], nonzerox[right_lane_inds], 2)
# x_current = np.int(np.polyval(p_right, win_y_high))
# if win_y_low >= 338 and win_y_low < 344:
# # 0.165 is the half of the road(0.33)
# x_location = x_current - 250
# left_lane_inds.extend(good_left_inds)
# right_lane_inds.extend(good_right_inds)
# left_lane_inds = np.concatenate(left_lane_inds)
# right_lane_inds = np.concatenate(right_lane_inds)
# else:
return out_img, x_location, point_list_left, point_list_right
def main(self,img):
x_start_l,x_start_r = self.findpoint.findpoint(img)
output , x_location, point_list_left, point_list_right = self.sliding_window(x_start_l,x_start_r,img)
return output, x_location, point_list_left, point_list_right
| 52.470852
| 124
| 0.557217
| 11,604
| 0.99171
| 0
| 0
| 0
| 0
| 0
| 0
| 3,275
| 0.279891
|
499d165572daf46e08305c7a946da82bbf43582f
| 767
|
py
|
Python
|
broadcasts/managers.py
|
foolwealth/django-site-broadcasts
|
f870fbf96cde7ea29fc8179e71ab738d2192628f
|
[
"MIT"
] | 5
|
2016-08-08T07:31:53.000Z
|
2020-01-21T00:10:22.000Z
|
broadcasts/managers.py
|
foolwealth/django-site-broadcasts
|
f870fbf96cde7ea29fc8179e71ab738d2192628f
|
[
"MIT"
] | 2
|
2015-05-22T00:47:14.000Z
|
2018-08-15T19:07:21.000Z
|
broadcasts/managers.py
|
bennylope/django-site-broadcasts
|
0c7556462e7aa09a48ccce4ca8d0b4827a2ce190
|
[
"MIT"
] | 2
|
2015-05-21T23:23:16.000Z
|
2018-08-15T17:03:51.000Z
|
from django.db import models
from django.db.models import Q
from django.utils import timezone
class BroadcastManager(models.Manager):
"""
Manager class to show only active broadcast messages
"""
def active(self):
"""Return only active messages"""
return super(BroadcastManager, self).filter(is_published=True)
def current(self):
"""Return only current and active messages"""
return self.active().filter(end_time__gte=timezone.now()).filter(
Q(Q(start_time__lte=timezone.now()) | Q(start_time=None)))
def latest(self):
"""Return the broadcast message to display"""
try:
return self.current().order_by("end_time")[0]
except IndexError:
return None
| 29.5
| 73
| 0.65189
| 670
| 0.873533
| 0
| 0
| 0
| 0
| 0
| 0
| 201
| 0.26206
|
499e17c024651f588861f4597a8d8cf5d56a914e
| 11,114
|
py
|
Python
|
google/cloud/gkehub_v1/types/membership.py
|
googleapis/python-gke-hub
|
9f620c83af1da8f27fc6933716142164d26647f2
|
[
"Apache-2.0"
] | 3
|
2021-06-04T06:10:44.000Z
|
2021-12-30T02:19:30.000Z
|
google/cloud/gkehub_v1/types/membership.py
|
renovate-bot/python-gke-hub
|
9f620c83af1da8f27fc6933716142164d26647f2
|
[
"Apache-2.0"
] | 43
|
2021-03-16T14:10:35.000Z
|
2022-03-07T16:07:33.000Z
|
google/cloud/gkehub_v1/types/membership.py
|
renovate-bot/python-gke-hub
|
9f620c83af1da8f27fc6933716142164d26647f2
|
[
"Apache-2.0"
] | 3
|
2021-03-15T20:46:05.000Z
|
2022-01-29T08:11:13.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.gkehub.v1",
manifest={
"Membership",
"MembershipEndpoint",
"GkeCluster",
"KubernetesMetadata",
"MembershipState",
"Authority",
},
)
class Membership(proto.Message):
r"""Membership contains information about a member cluster.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
endpoint (google.cloud.gkehub_v1.types.MembershipEndpoint):
Optional. Endpoint information to reach this
member.
This field is a member of `oneof`_ ``type``.
name (str):
Output only. The full, unique name of this Membership
resource in the format
``projects/*/locations/*/memberships/{membership_id}``, set
during creation.
``membership_id`` must be a valid RFC 1123 compliant DNS
label:
1. At most 63 characters in length
2. It must consist of lower case alphanumeric characters or
``-``
3. It must start and end with an alphanumeric character
Which can be expressed as the regex:
``[a-z0-9]([-a-z0-9]*[a-z0-9])?``, with a maximum length of
63 characters.
labels (Sequence[google.cloud.gkehub_v1.types.Membership.LabelsEntry]):
Optional. GCP labels for this membership.
description (str):
Output only. Description of this membership, limited to 63
characters. Must match the regex:
``[a-zA-Z0-9][a-zA-Z0-9_\-\.\ ]*``
This field is present for legacy purposes.
state (google.cloud.gkehub_v1.types.MembershipState):
Output only. State of the Membership
resource.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. When the Membership was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. When the Membership was last
updated.
delete_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. When the Membership was deleted.
external_id (str):
Optional. An externally-generated and managed ID for this
Membership. This ID may be modified after creation, but this
is not recommended.
The ID must match the regex:
``[a-zA-Z0-9][a-zA-Z0-9_\-\.]*``
If this Membership represents a Kubernetes cluster, this
value should be set to the UID of the ``kube-system``
namespace object.
last_connection_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. For clusters using Connect, the
timestamp of the most recent connection
established with Google Cloud. This time is
updated every several minutes, not continuously.
For clusters that do not use GKE Connect, or
that have never connected successfully, this
field will be unset.
unique_id (str):
Output only. Google-generated UUID for this resource. This
is unique across all Membership resources. If a Membership
resource is deleted and another resource with the same name
is created, it gets a different unique_id.
authority (google.cloud.gkehub_v1.types.Authority):
Optional. How to identify workloads from this
Membership. See the documentation on Workload
Identity for more details:
https://cloud.google.com/kubernetes-
engine/docs/how-to/workload-identity
"""
endpoint = proto.Field(
proto.MESSAGE, number=4, oneof="type", message="MembershipEndpoint",
)
name = proto.Field(proto.STRING, number=1,)
labels = proto.MapField(proto.STRING, proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
state = proto.Field(proto.MESSAGE, number=5, message="MembershipState",)
create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
delete_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
external_id = proto.Field(proto.STRING, number=9,)
last_connection_time = proto.Field(
proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp,
)
unique_id = proto.Field(proto.STRING, number=11,)
authority = proto.Field(proto.MESSAGE, number=12, message="Authority",)
class MembershipEndpoint(proto.Message):
r"""MembershipEndpoint contains information needed to contact a
Kubernetes API, endpoint and any additional Kubernetes metadata.
Attributes:
gke_cluster (google.cloud.gkehub_v1.types.GkeCluster):
Optional. GKE-specific information. Only
present if this Membership is a GKE cluster.
kubernetes_metadata (google.cloud.gkehub_v1.types.KubernetesMetadata):
Output only. Useful Kubernetes-specific
metadata.
"""
gke_cluster = proto.Field(proto.MESSAGE, number=1, message="GkeCluster",)
kubernetes_metadata = proto.Field(
proto.MESSAGE, number=2, message="KubernetesMetadata",
)
class GkeCluster(proto.Message):
r"""GkeCluster contains information specific to GKE clusters.
Attributes:
resource_link (str):
Immutable. Self-link of the GCP resource for
the GKE cluster. For example:
//container.googleapis.com/projects/my-
project/locations/us-west1-a/clusters/my-cluster
Zonal clusters are also supported.
"""
resource_link = proto.Field(proto.STRING, number=1,)
class KubernetesMetadata(proto.Message):
r"""KubernetesMetadata provides informational metadata for
Memberships representing Kubernetes clusters.
Attributes:
kubernetes_api_server_version (str):
Output only. Kubernetes API server version string as
reported by ``/version``.
node_provider_id (str):
Output only. Node providerID as reported by the first node
in the list of nodes on the Kubernetes endpoint. On
Kubernetes platforms that support zero-node clusters (like
GKE-on-GCP), the node_count will be zero and the
node_provider_id will be empty.
node_count (int):
Output only. Node count as reported by
Kubernetes nodes resources.
vcpu_count (int):
Output only. vCPU count as reported by
Kubernetes nodes resources.
memory_mb (int):
Output only. The total memory capacity as
reported by the sum of all Kubernetes nodes
resources, defined in MB.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time at which these details were last
updated. This update_time is different from the
Membership-level update_time since EndpointDetails are
updated internally for API consumers.
"""
kubernetes_api_server_version = proto.Field(proto.STRING, number=1,)
node_provider_id = proto.Field(proto.STRING, number=2,)
node_count = proto.Field(proto.INT32, number=3,)
vcpu_count = proto.Field(proto.INT32, number=4,)
memory_mb = proto.Field(proto.INT32, number=5,)
update_time = proto.Field(
proto.MESSAGE, number=100, message=timestamp_pb2.Timestamp,
)
class MembershipState(proto.Message):
r"""MembershipState describes the state of a Membership resource.
Attributes:
code (google.cloud.gkehub_v1.types.MembershipState.Code):
Output only. The current state of the
Membership resource.
"""
class Code(proto.Enum):
r"""Code describes the state of a Membership resource."""
CODE_UNSPECIFIED = 0
CREATING = 1
READY = 2
DELETING = 3
UPDATING = 4
SERVICE_UPDATING = 5
code = proto.Field(proto.ENUM, number=1, enum=Code,)
class Authority(proto.Message):
r"""Authority encodes how Google will recognize identities from
this Membership. See the workload identity documentation for
more details: https://cloud.google.com/kubernetes-
engine/docs/how-to/workload-identity
Attributes:
issuer (str):
Optional. A JSON Web Token (JWT) issuer URI. ``issuer`` must
start with ``https://`` and be a valid URL with length <2000
characters.
If set, then Google will allow valid OIDC tokens from this
issuer to authenticate within the workload_identity_pool.
OIDC discovery will be performed on this URI to validate
tokens from the issuer.
Clearing ``issuer`` disables Workload Identity. ``issuer``
cannot be directly modified; it must be cleared (and
Workload Identity disabled) before using a new issuer (and
re-enabling Workload Identity).
workload_identity_pool (str):
Output only. The name of the workload identity pool in which
``issuer`` will be recognized.
There is a single Workload Identity Pool per Hub that is
shared between all Memberships that belong to that Hub. For
a Hub hosted in {PROJECT_ID}, the workload pool format is
``{PROJECT_ID}.hub.id.goog``, although this is subject to
change in newer versions of this API.
identity_provider (str):
Output only. An identity provider that reflects the
``issuer`` in the workload identity pool.
oidc_jwks (bytes):
Optional. OIDC verification keys for this Membership in JWKS
format (RFC 7517).
When this field is set, OIDC discovery will NOT be performed
on ``issuer``, and instead OIDC tokens will be validated
using this field.
"""
issuer = proto.Field(proto.STRING, number=1,)
workload_identity_pool = proto.Field(proto.STRING, number=2,)
identity_provider = proto.Field(proto.STRING, number=3,)
oidc_jwks = proto.Field(proto.BYTES, number=4,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 40.414545
| 110
| 0.65557
| 10,114
| 0.910023
| 0
| 0
| 0
| 0
| 0
| 0
| 8,656
| 0.778838
|
499e67a21d0dc3cde30c8234f79e3aae5c8b02f5
| 1,728
|
py
|
Python
|
tests/test_tasks.py
|
alarig/peeringdb-py
|
917cda69f7bc05be008faa66875827d408328609
|
[
"Apache-2.0"
] | 71
|
2015-11-10T04:55:54.000Z
|
2022-02-25T20:03:48.000Z
|
tests/test_tasks.py
|
alarig/peeringdb-py
|
917cda69f7bc05be008faa66875827d408328609
|
[
"Apache-2.0"
] | 53
|
2016-01-29T12:15:38.000Z
|
2022-03-04T07:03:41.000Z
|
tests/test_tasks.py
|
alarig/peeringdb-py
|
917cda69f7bc05be008faa66875827d408328609
|
[
"Apache-2.0"
] | 28
|
2016-02-03T07:59:34.000Z
|
2022-02-27T19:25:06.000Z
|
# Units tests to directly cover both task wrapper modules -
# not possible with pytest parametrization
import pytest
import sys
from collections import defaultdict
from peeringdb import _tasks_sequential
TASKS_MODS = [_tasks_sequential]
# pre-async compat. import
if sys.version_info >= (3, 5):
from peeringdb import _tasks_async
TASKS_MODS.append(_tasks_async)
# dummy resources for task objects
class ResOne:
tag = "one"
class ResTwo:
tag = "two"
DATA_EXPECTED = {ResOne: [1, 2], ResTwo: [1, 2]}
# dummy context classes parameterized on tasks module
def make_context(tasks):
class Context:
def __init__(self):
self.jobs = defaultdict(dict)
self.db = {}
@tasks.run_task
def do_sync(self, res):
return tasks.gather(self.schedule(res))
def schedule(self, res):
return [self.set_job(res, k) for k in DATA_EXPECTED[res]]
def set_job(self, res, k):
job = self.jobs[res].get(k)
if not job:
job = tasks.UpdateTask(self._sync_impl(res, k), (res, k))
self.jobs[res][k] = job
return job
@tasks.wrap_generator
def _sync_impl(self, res, k):
d = self.db.setdefault(res, [])
# pretend ResOne has dependency on a ResTwo
if res is ResOne:
yield self.set_job(ResTwo, k)
d.append(k)
return Context
@pytest.mark.parametrize("tasks_mod", TASKS_MODS)
def test_basic(tasks_mod):
# generate class
Context = make_context(tasks_mod)
# do a dummy sync
ctx = Context()
for res in DATA_EXPECTED:
ctx.do_sync(res)
assert ctx.db == DATA_EXPECTED
| 24.338028
| 73
| 0.622106
| 886
| 0.512731
| 877
| 0.507523
| 627
| 0.362847
| 0
| 0
| 311
| 0.179977
|
499e8f87034a01b4664449514e2ad3632e9bb2a1
| 1,074
|
py
|
Python
|
dp/kadane.py
|
williamsmj/prakhar1989-algorithms
|
82e64ce9d451b33c1bce64a63276d6341a1f13b0
|
[
"WTFPL"
] | 2,797
|
2015-01-01T15:52:13.000Z
|
2022-03-28T20:52:37.000Z
|
dp/kadane.py
|
williamsmj/prakhar1989-algorithms
|
82e64ce9d451b33c1bce64a63276d6341a1f13b0
|
[
"WTFPL"
] | 35
|
2015-01-07T03:11:18.000Z
|
2021-06-27T09:09:55.000Z
|
dp/kadane.py
|
williamsmj/prakhar1989-algorithms
|
82e64ce9d451b33c1bce64a63276d6341a1f13b0
|
[
"WTFPL"
] | 887
|
2015-01-02T06:38:19.000Z
|
2022-03-26T20:33:11.000Z
|
"""
Problem: The maximum subarray problem is the task of finding the
contiguous subarray within a one-dimensional array of numbers
(containing at least one positive number) which has the largest sum.
Solution:
The recurrence relation that we solve at each step is the following -
Let S[i] = be the max value contigous subsequence till the ith element
of the array.
Then S[i] = max(A[i], A[i] + S[i - 1])
At each step, we have two options
1) We add the ith element to the sum till the i-1th elem
2) We start a new array starting at i
We take a max of both these options and accordingly build up the array.
"""
def max_value_contigous_subsequence(arr):
A = [arr[0]] + [0] * (len(arr) - 1)
max_to_here = arr[0]
for i in range(1, len(arr)):
A[i] = max(arr[i], arr[i] + A[i-1])
max_to_here = max(max_to_here, A[i])
return max_to_here
if __name__ == "__main__":
x = [-2, -3, 4, -1, -2, 1, 5, -3]
y = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
z = [-1, 3, -5, 4, 6, -1, 2, -7, 13, -3]
print map(max_value_contigous_subsequence, [x, y, z])
| 33.5625
| 71
| 0.645251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 622
| 0.579143
|
499ea6990d99f7681e517c981073364d93c42de3
| 3,215
|
py
|
Python
|
online_recommend/full_main.py
|
hfhfn/db_recommend
|
3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963
|
[
"MIT"
] | null | null | null |
online_recommend/full_main.py
|
hfhfn/db_recommend
|
3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963
|
[
"MIT"
] | null | null | null |
online_recommend/full_main.py
|
hfhfn/db_recommend
|
3a9f03157bb81e295f8cff30fbc7ad2a8cfdf963
|
[
"MIT"
] | null | null | null |
from user_portrait import SaveUserProfile
from action_profile_recall import save_inverted_table, SaveUserRecall
from movie_recall import SaveMovieRecall
from movie_portrait import save_topic_weights_normal, save_predata, save_textrank, save_cut_words, save_tfidf, \
save_topK_idf_textrank, save_topK_tfidf_textrank, save_keyword_weights, save_topic_words, save_movie_profile, \
save_topic_weights, get_cv_idf_model
from stat_factor import save_movie_hot_sort, save_movie_hot_factor, save_movie_time, save_movie_year_factor, \
save_movie_score_factor
from action_similar_recall import SaveUserSimilarRecall
from utils import user_recall_db
from content_recall import Update
def movie_protrait_run():
# save_predata()
save_cut_words()
get_cv_idf_model() # 保存cv和idf模型,基于全量数据,需定期更新
# save_textrank()
# save_tfidf()
# save_topK_idf_textrank()
# save_topK_tfidf_textrank()
# save_keyword_weights()
# save_topic_words()
# save_movie_profile()
# save_topic_weights()
# save_topic_weights_normal()
def filter_factor_run(cate_id):
save_movie_hot_sort()
save_movie_hot_factor()
save_movie_score_factor()
save_movie_time()
save_movie_year_factor(cate_id)
pass
def movie_recall_run(channel, cate_id):
mr = SaveMovieRecall(channel=channel, cate_id=cate_id)
# mr.save_movie_vector() # 有默认参数refit为True,默认重新训练模型,False为集群加载已训练好的模型
# mr.save_bkmeans_cluster() # 有默认参数refit为True, 默认重新训练模型,False为集群加载已训练好的模型
# mr.save_cos_similar(start_group=0) # 有默认参数start_group=0, 中间报错可以接着序号运行,修改start_group参数就好
# mr.save_movie_recall()
# mr.save_movie_filter_version_recall()
# mr.save_movie_filter_hot_score_year()
mr.save_movie_latest_recall()
def user_profile_run():
up = SaveUserProfile()
Update().update_user_history(update=False, cal_history=False)
up.save_action_weight()
# up.save_action_weight_normal()
# up.save_action_topic_weight() # 基于 topic_weights
# up.save_action_topic_sort()
# up.save_user_profile()
def user_profile_recall_run(cate_id):
save_inverted_table(cate_id) # 基于 topic_weights
ur = SaveUserRecall(cate_id)
# ur.save_pre_user_recall()
# ur.save_pre2_user_recall()
# ur.save_pre3_user_recall()
# ur.save_user_recall()
ur.save_user_tmp_recall_topK()
ur.save_user_filter_history_recall()
ur.save_user_filter_version_recall()
ur.save_user_recall_hot_score_year_factor()
ur.save_user_profile_latest_recall()
# # 单独生成cate_history, 用于导入mysql作为测试的历史行为
# ur.save_user_history_cate()
# # 以下两个方法基于 merge_action 一般只需要在更新用户历史数据时重新运行,统计行为数据
# ur.save_action_stat()
# ur.save_action_stat_cate()
def user_similar_recall_run(cate_id):
usr = SaveUserSimilarRecall(cate_id)
# usr.save_user_similar_recall()
usr.save_filter_same_recall()
usr.save_filter_history_recall()
usr.save_user_similar_latest_recall()
if __name__ == '__main__':
# 除了上面基于 merge_action 的 3个方法, 其他的方法遵循从上往下的继承链, 前面的改变需要重新运行后面的方法
# movie_protrait_run()
# filter_factor_run()
# movie_recall_run()
user_profile_run()
# user_profile_recall_run()
# user_similar_recall_run()
pass
| 33.489583
| 115
| 0.765163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,629
| 0.451372
|
499ebc213eb730a6668f7fe2c42632f4551f69a9
| 1,962
|
py
|
Python
|
libcst/codemod/commands/strip_strings_from_types.py
|
rowillia/LibCST
|
621d9a949a57a9100b7f2d1465ebd32aaeddb05c
|
[
"Apache-2.0"
] | null | null | null |
libcst/codemod/commands/strip_strings_from_types.py
|
rowillia/LibCST
|
621d9a949a57a9100b7f2d1465ebd32aaeddb05c
|
[
"Apache-2.0"
] | null | null | null |
libcst/codemod/commands/strip_strings_from_types.py
|
rowillia/LibCST
|
621d9a949a57a9100b7f2d1465ebd32aaeddb05c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# pyre-strict
from typing import Union
import libcst
import libcst.matchers as m
from libcst import parse_expression
from libcst.codemod import VisitorBasedCodemodCommand
from libcst.codemod.visitors import AddImportsVisitor
from libcst.metadata import QualifiedNameProvider
class StripStringsCommand(VisitorBasedCodemodCommand):
DESCRIPTION: str = "Converts string type annotations to 3.7-compatible forward references."
METADATA_DEPENDENCIES = (QualifiedNameProvider,)
# We want to gate the SimpleString visitor below to only SimpleStrings inside
# an Annotation.
@m.call_if_inside(m.Annotation())
# We also want to gate the SimpleString visitor below to ensure that we don't
# erroneously strip strings from a Literal.
@m.call_if_not_inside(
m.Subscript(
# We could match on value=m.Name("Literal") here, but then we might miss
# instances where people are importing typing_extensions directly, or
# importing Literal as an alias.
value=m.MatchMetadataIfTrue(
QualifiedNameProvider,
lambda qualnames: any(
qualname.name == "typing_extensions.Literal"
for qualname in qualnames
),
)
)
)
def leave_SimpleString(
self, original_node: libcst.SimpleString, updated_node: libcst.SimpleString
) -> Union[libcst.SimpleString, libcst.BaseExpression]:
AddImportsVisitor.add_needed_import(self.context, "__future__", "annotations")
# Just use LibCST to evaluate the expression itself, and insert that as the
# annotation.
return parse_expression(
updated_node.evaluated_value, config=self.module.config_for_parsing
)
| 38.470588
| 95
| 0.700306
| 1,504
| 0.766565
| 0
| 0
| 1,190
| 0.606524
| 0
| 0
| 785
| 0.400102
|
49a041d58cf1e03640f9ec85a2adef02ee0d008f
| 1,309
|
py
|
Python
|
nasa_fevo/InMemoryCache.py
|
lradomski10m/nasa-fevo
|
92cc11097766e94346bc2b0b0819e9191f8b04bf
|
[
"MIT"
] | null | null | null |
nasa_fevo/InMemoryCache.py
|
lradomski10m/nasa-fevo
|
92cc11097766e94346bc2b0b0819e9191f8b04bf
|
[
"MIT"
] | null | null | null |
nasa_fevo/InMemoryCache.py
|
lradomski10m/nasa-fevo
|
92cc11097766e94346bc2b0b0819e9191f8b04bf
|
[
"MIT"
] | null | null | null |
from typing import Dict, Union
from nasa_fevo.Cache import Cache
from datetime import datetime
CACHE_EXPIRATION_TIMER_MINUTES = 10
# very simple in-memory cache
# meant for small # of items
class InMemoryCache(Cache):
def __init__(self):
self.store: Dict[str, object] = {}
def get(self, key: str) -> Union[object, None]:
val_exp = self.store.get(key, None)
if val_exp is None:
return None
else:
print(f"Cache hit: {key}")
return val_exp[0]
def put(self, key: str, value: object, expiration: datetime) -> None:
if value is None:
raise ValueError("Value mustn't be None")
self.store[key] = (value, expiration)
def purge_expired(self) -> None:
print("Trying to purge ...")
keys_to_delete = []
now = datetime.now()
for item in self.store.items():
key = item[0]
# value = item[1][0]
expiration = item[1][1]
if expiration < now:
# print(f"Purging cache: key={key}, now={now} > exp={expiration}")
keys_to_delete.append(key)
for key in keys_to_delete:
del self.store[key]
def clear(self, key: str):
if key in self.store:
del self.store[key]
| 29.088889
| 82
| 0.571429
| 1,116
| 0.852559
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.157372
|
49a08ee15b6bd0370e65813bd6b2e298574e430e
| 5,079
|
py
|
Python
|
get_embeddings.py
|
PauPerezT/WEBERT
|
e189f84de14de6d4bae785e48c8a36eb1afaa46f
|
[
"Apache-1.1"
] | 3
|
2020-07-28T10:00:44.000Z
|
2021-01-25T17:48:01.000Z
|
get_embeddings.py
|
PauPerezT/WEBERT
|
e189f84de14de6d4bae785e48c8a36eb1afaa46f
|
[
"Apache-1.1"
] | 3
|
2020-12-07T18:45:16.000Z
|
2020-12-07T18:45:27.000Z
|
get_embeddings.py
|
PauPerezT/WEBERT
|
e189f84de14de6d4bae785e48c8a36eb1afaa46f
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 20:45:40 2020
@author: P.A. Perez-Toro
"""
#%%Libraries
import argparse
from utils import create_fold,str2bool
import csv
from tqdm import tqdm
import os
import gc
import numpy as np
import pandas as pd
from WEBERT import BERT, BETO, SciBERT
#%%
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-f','--files_path', default='./texts/',help='File folder of the set of documents', action="store")
parser.add_argument('-sv','--save_path', default='./bert_embeddings/',help='Path to save the embeddings', action="store")
parser.add_argument('-bm','--bert_model', default='Bert',help='Choose between three different BERT models: Bert, Beto and SciBert. By default BERT', choices=('Bert','Beto', 'SciBert'))
parser.add_argument('-d','--dynamic', type=str2bool, nargs='?',const=False, default=True, help='Boolean value to get dynamic features= True. By default True.', choices=(True, False))
parser.add_argument('-st','--static', type=str2bool, nargs='?',const=True, default=False, help='Boolean value to get static features= True from the embeddings such as mean, standard deviation, kurtosis, skeweness, min and max. By default False.', choices=(True, False))
parser.add_argument('-l','--language', default='english',help='Chosen language (only available for BERT model). Here is available only english or spanish. By default english.', choices=('english', 'spanish'))
parser.add_argument('-sw','--stopwords', type=str2bool, nargs='?',const=True, default=False, help='Boolean value, set True if you want to remove stopwords, By default False.' , choices=(True, False))
parser.add_argument('-m','--model', default='base', help='Bert models, two options base and large. By default base.', choices=('base', 'large'))
parser.add_argument('-ca','--cased', type=str2bool, nargs='?',const=True, default=False, help='Boolean value for cased= True o lower-cased= False models. By defaul False.', choices=(True, False))
parser.add_argument('-cu','--cuda', type=str2bool, nargs='?', const=True, default=False, help='Boolean value for using cuda to compute the embeddings (True). By defaul False.', choices=(True, False))
#parser.print_help()
args = parser.parse_args()
files_path=args.files_path
save_path=args.save_path
bert_model=str(args.bert_model)
language=str(args.language)
stopwords=args.stopwords
model=str(args.model)
cased=args.cased
dynamic=args.dynamic
static=args.static
cuda=args.cuda
files=np.hstack(sorted([f for f in os.listdir(files_path) if f.endswith('.txt')]))
file_names=np.hstack([".".join(f.split(".")[:-1]) for f in files ])
folder_path_static=save_path+'/Static/'
folder_path=save_path+'/Dynamic/'
create_fold(folder_path)
create_fold(folder_path_static)
j=0
neurons=768
if (model=='large') & (bert_model!='SciBert'):
neurons=1024
if static:
labelstf=[]
labelstf.append('File')
for n in range (neurons):
labelstf.append('Avg Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('STD Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('Skew Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('Kurt Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('Min Neuron'+str(n+1))
for n in range (neurons):
labelstf.append('Max Neuron'+str(n+1))
with open(folder_path_static+bert_model+'_Static_Features.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(labelstf)
pbar=tqdm(files)
for file in pbar:
pbar.set_description("Processing %s" % file)
data = pd.read_csv(files_path+'/'+file, sep='\t', header=None)
file_name=file_names[j]
data_input=list(data[0])
if bert_model=='Bert':
bert=BERT(data_input,file_name, language=language, stopwords=stopwords,
model=model, cased=cased, cuda=cuda)
elif bert_model=='Beto':
bert=BETO(data_input,file_name, stopwords=stopwords,
model=model, cased=cased, cuda=cuda)
elif bert_model=='SciBert':
bert=SciBERT(data_input,file_name, stopwords=stopwords,
cased=cased, cuda=cuda)
j+=1
if static:
data_stat=bert.get_bert_embeddings(folder_path, dynamic=dynamic, static=static)
with open(folder_path_static+bert_model+'_Static_Features.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(np.hstack((file_name, data_stat)))
gc.collect()
else:
bert.get_bert_embeddings(folder_path, dynamic=dynamic, static=static)
gc.collect()
| 40.309524
| 273
| 0.638905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,444
| 0.284308
|
49a17ebec39db4cc9cf78ab25d40d4459000d689
| 264
|
py
|
Python
|
AiSD_03/Zadanie_7.py
|
DLQuake/Algorytmy_i_struktury_danych
|
210d0b4e868e5cc9d6aa730a2297d8074e4d52a1
|
[
"MIT"
] | null | null | null |
AiSD_03/Zadanie_7.py
|
DLQuake/Algorytmy_i_struktury_danych
|
210d0b4e868e5cc9d6aa730a2297d8074e4d52a1
|
[
"MIT"
] | null | null | null |
AiSD_03/Zadanie_7.py
|
DLQuake/Algorytmy_i_struktury_danych
|
210d0b4e868e5cc9d6aa730a2297d8074e4d52a1
|
[
"MIT"
] | null | null | null |
# Zaimplementować funkcję n_sums(n: int) -> listint, która zwróci wszystkie n-cyfrowe liczby o takich samych sumach na indeksach parzystych i nieparzystych. Przykładowo, dla 3 cyfr będą to liczby m.in. 198, 220, 891, 990
def n_sums(n: int):
print(n_sums(3))
| 44
| 220
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 227
| 0.837638
|
49a34879fe64e92596a7c6eaecaaa74f1636d0c6
| 2,327
|
py
|
Python
|
wsgiservice/xmlserializer.py
|
beekpr/wsgiservice
|
9ba21060ff19cbff984424b184a5b2829fe644bb
|
[
"BSD-2-Clause"
] | 1
|
2018-01-19T10:44:15.000Z
|
2018-01-19T10:44:15.000Z
|
wsgiservice/xmlserializer.py
|
beekpr/wsgiservice
|
9ba21060ff19cbff984424b184a5b2829fe644bb
|
[
"BSD-2-Clause"
] | 2
|
2015-10-12T07:53:57.000Z
|
2016-06-17T11:13:08.000Z
|
wsgiservice/xmlserializer.py
|
beekpr/wsgiservice
|
9ba21060ff19cbff984424b184a5b2829fe644bb
|
[
"BSD-2-Clause"
] | null | null | null |
"""Helper to convert Python data structures into XML. Used so we can return
intuitive data from resource methods which are usable as JSON but can also be
returned as XML.
"""
import re
from xml.sax.saxutils import escape as xml_escape
# Regular expression matching all the illegal XML characters.
RE_ILLEGAL_XML = re.compile(
u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])|([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \
(unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff)))
def dumps(obj, root_tag):
"""Serialize :arg:`obj` to an XML :class:`str`.
"""
xml = _get_xml_value(obj)
if xml:
# Remove invalid XML
xml = RE_ILLEGAL_XML.sub('', xml)
if root_tag is None:
return xml
else:
root = root_tag
return '<' + root + '>' + xml + '</' + root + '>'
def _get_xml_value(value):
"""Convert an individual value to an XML string. Calls itself
recursively for dictionaries and lists.
Uses some heuristics to convert the data to XML:
- In dictionaries, the keys become the tag name.
- In lists the tag name is 'child' with an order-attribute giving
the list index.
- All other values are included as is.
All values are escaped to fit into the XML document.
:param value: The value to convert to XML.
:type value: Any valid Python value
:rtype: string
"""
retval = []
if isinstance(value, dict):
for key, value in value.iteritems():
retval.append('<' + xml_escape(str(key)) + '>')
retval.append(_get_xml_value(value))
retval.append('</' + xml_escape(str(key)) + '>')
elif isinstance(value, list):
for key, value in enumerate(value):
retval.append('<child order="' + xml_escape(str(key)) + '">')
retval.append(_get_xml_value(value))
retval.append('</child>')
elif isinstance(value, bool):
retval.append(xml_escape(str(value).lower()))
elif isinstance(value, unicode):
retval.append(xml_escape(value.encode('utf-8')))
else:
retval.append(xml_escape(str(value)))
return "".join(retval)
| 35.8
| 125
| 0.627417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,030
| 0.44263
|
b8c51a5a3052b41343351c2e050b600648c80729
| 45,700
|
py
|
Python
|
sql/query.py
|
real-fire/archer
|
8e9e82a51125859c61d23496ad0cab0a4bbc5181
|
[
"Apache-2.0"
] | null | null | null |
sql/query.py
|
real-fire/archer
|
8e9e82a51125859c61d23496ad0cab0a4bbc5181
|
[
"Apache-2.0"
] | null | null | null |
sql/query.py
|
real-fire/archer
|
8e9e82a51125859c61d23496ad0cab0a4bbc5181
|
[
"Apache-2.0"
] | null | null | null |
import re
import simplejson as json
from django.core.urlresolvers import reverse
from django.db.models import Q, Min, F, Sum
from django.db import connection
from django.conf import settings
from django.db.models.functions import Concat
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.core import serializers
from django.db import transaction
from datetime import date
from django.db.models import Value as V
import datetime
import time
from sql.extend_json_encoder import ExtendJSONEncoder
from .aes_decryptor import Prpcrypt
from .sendmail import MailSender
from .dao import Dao
from .const import WorkflowDict
from .inception import InceptionDao
from .models import users, master_config, slave_config, QueryPrivilegesApply, QueryPrivileges, QueryLog, SlowQuery, \
SlowQueryHistory
from .data_masking import Masking
from .workflow import Workflow
from .permission import role_required, superuser_required
if settings.ALIYUN_RDS_MANAGE:
from .aliyun_function import slowquery_review as aliyun_rds_slowquery_review, \
slowquery_review_history as aliyun_rds_slowquery_review_history
dao = Dao()
prpCryptor = Prpcrypt()
inceptionDao = InceptionDao()
datamasking = Masking()
workflowOb = Workflow()
mailSenderOb = MailSender()
# 查询权限申请用于工作流审核回调
def query_audit_call_back(workflow_id, workflow_status):
# 更新业务表状态
apply_info = QueryPrivilegesApply()
apply_info.apply_id = workflow_id
apply_info.status = workflow_status
apply_info.save(update_fields=['status'])
# 审核通过插入权限信息,批量插入,减少性能消耗
if workflow_status == WorkflowDict.workflow_status['audit_success']:
apply_queryset = QueryPrivilegesApply.objects.get(apply_id=workflow_id)
# 库权限
if apply_queryset.priv_type == 1:
insertlist = [QueryPrivileges(
user_name=apply_queryset.user_name,
cluster_name=apply_queryset.cluster_name, db_name=db_name,
table_name=apply_queryset.table_list, valid_date=apply_queryset.valid_date,
limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for db_name in
apply_queryset.db_list.split(',')]
# 表权限
elif apply_queryset.priv_type == 2:
insertlist = [QueryPrivileges(
user_name=apply_queryset.user_name,
cluster_name=apply_queryset.cluster_name, db_name=apply_queryset.db_list,
table_name=table_name, valid_date=apply_queryset.valid_date,
limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for table_name in
apply_queryset.table_list.split(',')]
QueryPrivileges.objects.bulk_create(insertlist)
# 查询权限校验
def query_priv_check(loginUserOb, cluster_name, dbName, sqlContent, limit_num):
finalResult = {'status': 0, 'msg': 'ok', 'data': {}}
# 检查用户是否有该数据库/表的查询权限
loginUser = loginUserOb.username
if loginUserOb.is_superuser == 1 or loginUserOb.role == 'DBA':
user_limit_num = getattr(settings, 'ADMIN_QUERY_LIMIT')
if int(limit_num) == 0:
limit_num = int(user_limit_num)
else:
limit_num = min(int(limit_num), int(user_limit_num))
pass
# 查看表结构和执行计划,inception会报错,故单独处理,explain直接跳过不做校验
elif re.match(r"^show\s+create\s+table", sqlContent.lower()):
tb_name = re.sub('^show\s+create\s+table', '', sqlContent, count=1, flags=0).strip()
# 先判断是否有整库权限
db_privileges = QueryPrivileges.objects.filter(user_name=loginUser, cluster_name=cluster_name,
db_name=dbName, priv_type=1,
valid_date__gte=datetime.datetime.now(), is_deleted=0)
# 无整库权限再验证表权限
if len(db_privileges) == 0:
tb_privileges = QueryPrivileges.objects.filter(user_name=loginUser, cluster_name=cluster_name,
db_name=dbName, table_name=tb_name, priv_type=2,
valid_date__gte=datetime.datetime.now(), is_deleted=0)
if len(tb_privileges) == 0:
finalResult['status'] = 1
finalResult['msg'] = '你无' + dbName + '.' + tb_name + '表的查询权限!请先到查询权限管理进行申请'
return finalResult
# sql查询, 可以校验到表级权限
else:
# 首先使用inception的语法树打印获取查询涉及的的表
table_ref_result = datamasking.query_table_ref(sqlContent + ';', cluster_name, dbName)
# 正确解析拿到表数据,可以校验表权限
if table_ref_result['status'] == 0:
table_ref = table_ref_result['data']
# 获取表信息,校验是否拥有全部表查询权限
QueryPrivilegesOb = QueryPrivileges.objects.filter(user_name=loginUser, cluster_name=cluster_name)
# 先判断是否有整库权限
for table in table_ref:
db_privileges = QueryPrivilegesOb.filter(db_name=table['db'], priv_type=1,
valid_date__gte=datetime.datetime.now(),
is_deleted=0)
# 无整库权限再验证表权限
if len(db_privileges) == 0:
tb_privileges = QueryPrivilegesOb.filter(db_name=table['db'], table_name=table['table'],
valid_date__gte=datetime.datetime.now(), is_deleted=0)
if len(tb_privileges) == 0:
finalResult['status'] = 1
finalResult['msg'] = '你无' + table['db'] + '.' + table['table'] + '表的查询权限!请先到查询权限管理进行申请'
return finalResult
# 获取表数据报错,检查配置文件是否允许继续执行,并进行库权限校验
else:
table_ref = None
# 校验库权限,防止inception的语法树打印错误时连库权限也未做校验
privileges = QueryPrivileges.objects.filter(user_name=loginUser, cluster_name=cluster_name, db_name=dbName,
valid_date__gte=datetime.datetime.now(),
is_deleted=0)
if len(privileges) == 0:
finalResult['status'] = 1
finalResult['msg'] = '你无' + dbName + '数据库的查询权限!请先到查询权限管理进行申请'
return finalResult
if settings.CHECK_QUERY_ON_OFF:
return table_ref_result
else:
pass
# 获取查询涉及表的最小limit限制
if table_ref:
db_list = [table_info['db'] for table_info in table_ref]
table_list = [table_info['table'] for table_info in table_ref]
user_limit_num = QueryPrivileges.objects.filter(user_name=loginUser,
cluster_name=cluster_name,
db_name__in=db_list,
table_name__in=table_list,
valid_date__gte=datetime.datetime.now(),
is_deleted=0).aggregate(Min('limit_num'))['limit_num__min']
if user_limit_num is None:
# 如果表没获取到则获取涉及库的最小limit限制
user_limit_num = QueryPrivileges.objects.filter(user_name=loginUser,
cluster_name=cluster_name,
db_name=dbName,
valid_date__gte=datetime.datetime.now(),
is_deleted=0).aggregate(Min('limit_num'))[
'limit_num__min']
else:
# 如果表没获取到则获取涉及库的最小limit限制
user_limit_num = QueryPrivileges.objects.filter(user_name=loginUser,
cluster_name=cluster_name,
db_name=dbName,
valid_date__gte=datetime.datetime.now(),
is_deleted=0).aggregate(Min('limit_num'))['limit_num__min']
if int(limit_num) == 0:
limit_num = user_limit_num
else:
limit_num = min(int(limit_num), user_limit_num)
finalResult['data'] = limit_num
return finalResult
# 获取所有集群名称
@csrf_exempt
def getClusterList(request):
slaves = slave_config.objects.all().order_by('cluster_name')
result = {'status': 0, 'msg': 'ok', 'data': []}
# 获取所有集群名称
listAllClusterName = [slave.cluster_name for slave in slaves]
result['data'] = listAllClusterName
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取集群里面的数据库集合
@csrf_exempt
def getdbNameList(request):
clusterName = request.POST.get('cluster_name')
is_master = request.POST.get('is_master')
result = {'status': 0, 'msg': 'ok', 'data': []}
if is_master:
try:
master_info = master_config.objects.get(cluster_name=clusterName)
except Exception:
result['status'] = 1
result['msg'] = '找不到对应的主库配置信息,请配置'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
# 取出该集群主库的连接方式,为了后面连进去获取所有databases
listDb = dao.getAlldbByCluster(master_info.master_host, master_info.master_port, master_info.master_user,
prpCryptor.decrypt(master_info.master_password))
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
result['data'] = listDb
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
else:
try:
slave_info = slave_config.objects.get(cluster_name=clusterName)
except Exception:
result['status'] = 1
result['msg'] = '找不到对应的从库配置信息,请配置'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
# 取出该集群的连接方式,为了后面连进去获取所有databases
listDb = dao.getAlldbByCluster(slave_info.slave_host, slave_info.slave_port, slave_info.slave_user,
prpCryptor.decrypt(slave_info.slave_password))
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
result['data'] = listDb
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取数据库的表集合
@csrf_exempt
def getTableNameList(request):
clusterName = request.POST.get('cluster_name')
db_name = request.POST.get('db_name')
is_master = request.POST.get('is_master')
result = {'status': 0, 'msg': 'ok', 'data': []}
if is_master:
try:
master_info = master_config.objects.get(cluster_name=clusterName)
except Exception:
result['status'] = 1
result['msg'] = '找不到对应的主库配置信息,请配置'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
# 取出该集群主库的连接方式,为了后面连进去获取所有的表
listTb = dao.getAllTableByDb(master_info.master_host, master_info.master_port, master_info.master_user,
prpCryptor.decrypt(master_info.master_password), db_name)
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
result['data'] = listTb
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
else:
try:
slave_info = slave_config.objects.get(cluster_name=clusterName)
except Exception:
result['status'] = 1
result['msg'] = '找不到对应的从库配置信息,请配置'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
# 取出该集群从库的连接方式,为了后面连进去获取所有的表
listTb = dao.getAllTableByDb(slave_info.slave_host, slave_info.slave_port, slave_info.slave_user,
prpCryptor.decrypt(slave_info.slave_password), db_name)
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
result['data'] = listTb
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取表里面的字段集合
@csrf_exempt
def getColumnNameList(request):
clusterName = request.POST.get('cluster_name')
db_name = request.POST.get('db_name')
tb_name = request.POST.get('tb_name')
is_master = request.POST.get('is_master')
result = {'status': 0, 'msg': 'ok', 'data': []}
if is_master:
try:
master_info = master_config.objects.get(cluster_name=clusterName)
except Exception:
result['status'] = 1
result['msg'] = '找不到对应的主库配置信息,请配置'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
# 取出该集群主库的连接方式,为了后面连进去获取所有字段
listCol = dao.getAllColumnsByTb(master_info.master_host, master_info.master_port, master_info.master_user,
prpCryptor.decrypt(master_info.master_password), db_name, tb_name)
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
result['data'] = listCol
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
else:
try:
slave_info = slave_config.objects.get(cluster_name=clusterName)
except Exception:
result['status'] = 1
result['msg'] = '找不到对应的从库配置信息,请配置'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
# 取出该集群的连接方式,为了后面连进去获取表的所有字段
listCol = dao.getAllColumnsByTb(slave_info.slave_host, slave_info.slave_port, slave_info.slave_user,
prpCryptor.decrypt(slave_info.slave_password), db_name, tb_name)
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
result['data'] = listCol
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取查询权限申请列表
@csrf_exempt
def getqueryapplylist(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
# 获取搜索参数
search = request.POST.get('search')
if search is None:
search = ''
# 获取列表数据,申请人只能查看自己申请的数据,管理员可以看到全部数据
if loginUserOb.is_superuser == 1 or loginUserOb.role == 'DBA':
applylist = QueryPrivilegesApply.objects.all().filter(title__contains=search).order_by('-apply_id')[
offset:limit].values(
'apply_id', 'title', 'cluster_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_name', 'status', 'create_time'
)
applylistCount = QueryPrivilegesApply.objects.all().filter(title__contains=search).count()
else:
applylist = QueryPrivilegesApply.objects.filter(user_name=loginUserOb.username).filter(
title__contains=search).order_by('-apply_id')[offset:limit].values(
'apply_id', 'title', 'cluster_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_name', 'status', 'create_time'
)
applylistCount = QueryPrivilegesApply.objects.filter(user_name=loginUserOb.username).filter(
title__contains=search).count()
# QuerySet 序列化
rows = [row for row in applylist]
result = {"total": applylistCount, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# 申请查询权限
@csrf_exempt
def applyforprivileges(request):
title = request.POST['title']
cluster_name = request.POST['cluster_name']
priv_type = request.POST['priv_type']
db_name = request.POST['db_name']
valid_date = request.POST['valid_date']
limit_num = request.POST['limit_num']
try:
workflow_remark = request.POST['apply_remark']
except Exception:
workflow_remark = ''
# 获取用户信息
loginUser = request.session.get('login_username', False)
# 服务端参数校验
result = {'status': 0, 'msg': 'ok', 'data': []}
if int(priv_type) == 1:
db_list = request.POST['db_list']
if title is None or cluster_name is None or db_list is None or valid_date is None or limit_num is None:
result['status'] = 1
result['msg'] = '请填写完整'
return HttpResponse(json.dumps(result), content_type='application/json')
elif int(priv_type) == 2:
table_list = request.POST['table_list']
if title is None or cluster_name is None or db_name is None or valid_date is None or table_list is None or limit_num is None:
result['status'] = 1
result['msg'] = '请填写完整'
return HttpResponse(json.dumps(result), content_type='application/json')
# 判断是否需要限制到表级别的权限
# 库权限
if int(priv_type) == 1:
db_list = db_list.split(',')
# 检查申请账号是否已拥整个库的查询权限
own_dbs = QueryPrivileges.objects.filter(cluster_name=cluster_name, user_name=loginUser, db_name__in=db_list,
valid_date__gte=datetime.datetime.now(), priv_type=1,
is_deleted=0).values('db_name')
own_db_list = [table_info['db_name'] for table_info in own_dbs]
if own_db_list is None:
pass
else:
for db_name in db_list:
if db_name in own_db_list:
result['status'] = 1
result['msg'] = '你已拥有' + cluster_name + '集群' + db_name + '库的全部查询权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 表权限
elif int(priv_type) == 2:
table_list = table_list.split(',')
# 检查申请账号是否已拥有该表的查询权限
own_tables = QueryPrivileges.objects.filter(cluster_name=cluster_name, user_name=loginUser, db_name=db_name,
table_name__in=table_list, valid_date__gte=datetime.datetime.now(),
priv_type=2, is_deleted=0).values('table_name')
own_table_list = [table_info['table_name'] for table_info in own_tables]
if own_table_list is None:
pass
else:
for table_name in table_list:
if table_name in own_table_list:
result['status'] = 1
result['msg'] = '你已拥有' + cluster_name + '集群' + db_name + '.' + table_name + '表的查询权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 保存申请信息到数据库
applyinfo = QueryPrivilegesApply()
applyinfo.title = title
applyinfo.user_name = loginUser
applyinfo.cluster_name = cluster_name
if int(priv_type) == 1:
applyinfo.db_list = ','.join(db_list)
applyinfo.table_list = ''
elif int(priv_type) == 2:
applyinfo.db_list = db_name
applyinfo.table_list = ','.join(table_list)
applyinfo.priv_type = int(priv_type)
applyinfo.valid_date = valid_date
applyinfo.status = WorkflowDict.workflow_status['audit_wait'] # 待审核
applyinfo.limit_num = limit_num
applyinfo.create_user = loginUser
applyinfo.save()
apply_id = applyinfo.apply_id
# 调用工作流插入审核信息,查询权限申请workflow_type=2
auditresult = workflowOb.addworkflowaudit(request, WorkflowDict.workflow_type['query'], apply_id,
title, loginUser, workflow_remark)
if auditresult['status'] == 0:
# 更新业务表审核状态,判断是否插入权限信息
query_audit_call_back(apply_id, auditresult['data']['workflow_status'])
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
else:
result = auditresult
return HttpResponse(json.dumps(result), content_type='application/json')
# 用户的查询权限管理
@csrf_exempt
def getuserprivileges(request):
user_name = request.POST.get('user_name')
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
# 获取搜索参数
search = request.POST.get('search')
if search is None:
search = ''
# 判断权限,除了管理员外其他人只能查看自己的权限信息
result = {'status': 0, 'msg': 'ok', 'data': []}
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 获取用户的权限数据
if loginUserOb.is_superuser == 1 or loginUserOb.role == 'DBA':
if user_name != 'all':
privilegeslist = QueryPrivileges.objects.all().filter(user_name=user_name, is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()).order_by(
'-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.all().filter(user_name=user_name, is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()).count()
else:
privilegeslist = QueryPrivileges.objects.all().filter(is_deleted=0, table_name__contains=search,
valid_date__gte=datetime.datetime.now()).order_by(
'-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.all().filter(is_deleted=0,
table_name__contains=search,
valid_date__gte=datetime.datetime.now()).count()
else:
privilegeslist = QueryPrivileges.objects.filter(user_name=loginUserOb.username, is_deleted=0).filter(
table_name__contains=search).order_by('-privilege_id')[offset:limit]
privilegeslistCount = QueryPrivileges.objects.filter(user_name=loginUserOb.username, is_deleted=0).filter(
table_name__contains=search).count()
# QuerySet 序列化
privilegeslist = serializers.serialize("json", privilegeslist)
privilegeslist = json.loads(privilegeslist)
privilegeslist_result = []
for i in range(len(privilegeslist)):
privilegeslist[i]['fields']['id'] = privilegeslist[i]['pk']
privilegeslist_result.append(privilegeslist[i]['fields'])
result = {"total": privilegeslistCount, "rows": privilegeslist_result}
# 返回查询结果
return HttpResponse(json.dumps(result), content_type='application/json')
# 变更权限信息
@csrf_exempt
@superuser_required
def modifyqueryprivileges(request):
privilege_id = request.POST.get('privilege_id')
type = request.POST.get('type')
result = {'status': 0, 'msg': 'ok', 'data': []}
# type=1删除权限,type=2变更权限
privileges = QueryPrivileges()
if int(type) == 1:
# 删除权限
privileges.privilege_id = int(privilege_id)
privileges.is_deleted = 1
privileges.save(update_fields=['is_deleted'])
return HttpResponse(json.dumps(result), content_type='application/json')
elif int(type) == 2:
# 变更权限
valid_date = request.POST.get('valid_date')
limit_num = request.POST.get('limit_num')
privileges.privilege_id = int(privilege_id)
privileges.valid_date = valid_date
privileges.limit_num = limit_num
privileges.save(update_fields=['valid_date', 'limit_num'])
return HttpResponse(json.dumps(result), content_type='application/json')
# 查询权限审核
@csrf_exempt
def queryprivaudit(request):
apply_id = int(request.POST['apply_id'])
audit_status = int(request.POST['audit_status'])
audit_remark = request.POST.get('audit_remark')
if audit_remark is None:
audit_remark = ''
# 获取用户信息
loginUser = request.session.get('login_username', False)
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 获取audit_id
audit_id = workflowOb.auditinfobyworkflow_id(workflow_id=apply_id,
workflow_type=WorkflowDict.workflow_type['query']).audit_id
# 调用工作流接口审核
auditresult = workflowOb.auditworkflow(audit_id, audit_status, loginUser, audit_remark)
# 按照审核结果更新业务表审核状态
auditInfo = workflowOb.auditinfo(audit_id)
if auditInfo.workflow_type == WorkflowDict.workflow_type['query']:
# 更新业务表审核状态,插入权限信息
query_audit_call_back(auditInfo.workflow_id, auditresult['data']['workflow_status'])
# 给拒绝和审核通过的申请人发送邮件
if settings.MAIL_ON_OFF == "on":
email_reciver = users.objects.get(username=auditInfo.create_user).email
email_content = "发起人:" + auditInfo.create_user + "\n审核人:" + auditInfo.audit_users \
+ "\n工单地址:" + request.scheme + "://" + request.get_host() + "/workflowdetail/" \
+ str(audit_id) + "\n工单名称: " + auditInfo.workflow_title \
+ "\n审核备注: " + audit_remark
if auditresult['data']['workflow_status'] == WorkflowDict.workflow_status['audit_success']:
email_title = "工单审核通过 # " + str(auditInfo.audit_id)
mailSenderOb.sendEmail(email_title, email_content, [email_reciver])
elif auditresult['data']['workflow_status'] == WorkflowDict.workflow_status['audit_reject']:
email_title = "工单被驳回 # " + str(auditInfo.audit_id)
mailSenderOb.sendEmail(email_title, email_content, [email_reciver])
except Exception as msg:
context = {'errMsg': msg}
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:queryapplydetail', args=(apply_id,)))
# 获取SQL查询结果
@csrf_exempt
def query(request):
cluster_name = request.POST.get('cluster_name')
sqlContent = request.POST.get('sql_content')
dbName = request.POST.get('db_name')
limit_num = request.POST.get('limit_num')
finalResult = {'status': 0, 'msg': 'ok', 'data': {}}
# 服务器端参数验证
if sqlContent is None or dbName is None or cluster_name is None or limit_num is None:
finalResult['status'] = 1
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.strip()
if sqlContent[-1] != ";":
finalResult['status'] = 1
finalResult['msg'] = 'SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
# 过滤注释语句和非查询的语句
sqlContent = ''.join(
map(lambda x: re.compile(r'(^--\s+.*|^/\*.*\*/;\s*$)').sub('', x, count=1),
sqlContent.splitlines(1))).strip()
# 去除空行
sqlContent = re.sub('[\r\n\f]{2,}', '\n', sqlContent)
sql_list = sqlContent.strip().split('\n')
for sql in sql_list:
if re.match(r"^select|^show|^explain", sql.lower()):
break
else:
finalResult['status'] = 1
finalResult['msg'] = '仅支持^select|^show|^explain语法,请联系管理员!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 取出该集群的连接方式,查询只读账号,按照分号截取第一条有效sql执行
slave_info = slave_config.objects.get(cluster_name=cluster_name)
sqlContent = sqlContent.strip().split(';')[0]
# 查询权限校验,以及limit_num获取
priv_check_info = query_priv_check(loginUserOb, cluster_name, dbName, sqlContent, limit_num)
if priv_check_info['status'] == 0:
limit_num = priv_check_info['data']
else:
return HttpResponse(json.dumps(priv_check_info), content_type='application/json')
if re.match(r"^explain", sqlContent.lower()):
limit_num = 0
# 对查询sql增加limit限制
if re.match(r"^select", sqlContent.lower()):
if re.search(r"limit\s+(\d+)$", sqlContent.lower()) is None:
if re.search(r"limit\s+\d+\s*,\s*(\d+)$", sqlContent.lower()) is None:
sqlContent = sqlContent + ' limit ' + str(limit_num)
sqlContent = sqlContent + ';'
# 执行查询语句,统计执行时间
t_start = time.time()
sql_result = dao.mysql_query(slave_info.slave_host, slave_info.slave_port, slave_info.slave_user,
prpCryptor.decrypt(slave_info.slave_password), str(dbName), sqlContent, limit_num)
t_end = time.time()
cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
sql_result['cost_time'] = cost_time
# 数据脱敏,同样需要检查配置,是否开启脱敏,语法树解析是否允许出错继续执行
t_start = time.time()
if settings.DATA_MASKING_ON_OFF:
# 仅对查询语句进行脱敏
if re.match(r"^select", sqlContent.lower()):
try:
masking_result = datamasking.data_masking(cluster_name, dbName, sqlContent, sql_result)
except Exception:
if settings.CHECK_QUERY_ON_OFF:
finalResult['status'] = 1
finalResult['msg'] = '脱敏数据报错,请联系管理员'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
else:
if masking_result['status'] != 0:
if settings.CHECK_QUERY_ON_OFF:
return HttpResponse(json.dumps(masking_result), content_type='application/json')
t_end = time.time()
masking_cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
sql_result['masking_cost_time'] = masking_cost_time
finalResult['data'] = sql_result
# 成功的查询语句记录存入数据库
if sql_result.get('Error'):
pass
else:
query_log = QueryLog()
query_log.username = loginUser
query_log.db_name = dbName
query_log.cluster_name = cluster_name
query_log.sqllog = sqlContent
if int(limit_num) == 0:
limit_num = int(sql_result['effect_row'])
else:
limit_num = min(int(limit_num), int(sql_result['effect_row']))
query_log.effect_row = limit_num
query_log.cost_time = cost_time
# 防止查询超时
try:
query_log.save()
except:
connection.close()
query_log.save()
# 返回查询结果
return HttpResponse(json.dumps(finalResult, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# 获取sql查询记录
@csrf_exempt
def querylog(request):
# 获取用户信息
loginUser = request.session.get('login_username', False)
loginUserOb = users.objects.get(username=loginUser)
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
# 获取搜索参数
search = request.POST.get('search')
if search is None:
search = ''
# 查询个人记录,超管查看所有数据
if loginUserOb.is_superuser == 1 or loginUserOb.role == 'DBA':
sql_log_count = QueryLog.objects.all().filter(Q(sqllog__contains=search) | Q(username__contains=search)).count()
sql_log_list = QueryLog.objects.all().filter(
Q(sqllog__contains=search) | Q(username__contains=search)).order_by(
'-id')[offset:limit]
else:
sql_log_count = QueryLog.objects.filter(username=loginUser).filter(
Q(sqllog__contains=search) | Q(username__contains=search)).count()
sql_log_list = QueryLog.objects.filter(username=loginUser).filter(
Q(sqllog__contains=search) | Q(username__contains=search)).order_by('-id')[offset:limit]
# QuerySet 序列化
sql_log_list = serializers.serialize("json", sql_log_list)
sql_log_list = json.loads(sql_log_list)
sql_log = [log_info['fields'] for log_info in sql_log_list]
result = {"total": sql_log_count, "rows": sql_log}
# 返回查询结果
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取SQL执行计划
@csrf_exempt
def explain(request):
if request.is_ajax():
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
dbName = request.POST.get('db_name')
else:
sqlContent = request.POST['sql_content']
clusterName = request.POST['cluster_name']
dbName = request.POST.get('db_name')
finalResult = {'status': 0, 'msg': 'ok', 'data': []}
# 服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 1
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip()
if sqlContent[-1] != ";":
finalResult['status'] = 1
finalResult['msg'] = 'SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 过滤非查询的语句
if re.match(r"^explain", sqlContent.lower()):
pass
else:
finalResult['status'] = 1
finalResult['msg'] = '仅支持explain开头的语句,请检查'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
# 取出该集群的连接方式,按照分号截取第一条有效sql执行
masterInfo = master_config.objects.get(cluster_name=clusterName)
sqlContent = sqlContent.strip().split(';')[0]
# 执行获取执行计划语句
sql_result = dao.mysql_query(masterInfo.master_host, masterInfo.master_port, masterInfo.master_user,
prpCryptor.decrypt(masterInfo.master_password), str(dbName), sqlContent)
finalResult['data'] = sql_result
# 返回查询结果
return HttpResponse(json.dumps(finalResult, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# 获取SQL慢日志统计
@csrf_exempt
def slowquery_review(request):
cluster_name = request.POST.get('cluster_name')
# 判断是RDS还是其他实例
cluster_info = master_config.objects.get(cluster_name=cluster_name)
if settings.ALIYUN_RDS_MANAGE:
# 调用阿里云慢日志接口
result = aliyun_rds_slowquery_review(request)
else:
StartTime = request.POST.get('StartTime')
EndTime = request.POST.get('EndTime')
DBName = request.POST.get('db_name')
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
# 时间处理
EndTime = datetime.datetime.strptime(EndTime, '%Y-%m-%d') + datetime.timedelta(days=1)
# DBName非必传
if DBName:
# 获取慢查数据
slowsql_obj = SlowQuery.objects.filter(
slowqueryhistory__hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
slowqueryhistory__db_max=DBName,
slowqueryhistory__ts_min__range=(StartTime, EndTime),
last_seen__range=(StartTime, EndTime)
).annotate(CreateTime=F('last_seen'),
SQLId=F('checksum'),
DBName=F('slowqueryhistory__db_max'), # 数据库
SQLText=F('fingerprint'), # SQL语句
).values(
'CreateTime', 'SQLId', 'DBName', 'SQLText'
).annotate(
MySQLTotalExecutionCounts=Sum('slowqueryhistory__ts_cnt'), # 执行总次数
MySQLTotalExecutionTimes=Sum('slowqueryhistory__query_time_sum'), # 执行总时长
ParseTotalRowCounts=Sum('slowqueryhistory__rows_examined_sum'), # 扫描总行数
ReturnTotalRowCounts=Sum('slowqueryhistory__rows_sent_sum'), # 返回总行数
).order_by('-MySQLTotalExecutionCounts')[offset:limit] # 执行总次数倒序排列
slowsql_obj_count = SlowQuery.objects.filter(
slowqueryhistory__hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
slowqueryhistory__db_max=DBName,
slowqueryhistory__ts_min__range=(StartTime, EndTime),
last_seen__range=(StartTime, EndTime)
).annotate(CreateTime=F('last_seen'),
SQLId=F('checksum'),
DBName=F('slowqueryhistory__db_max'), # 数据库
SQLText=F('fingerprint'), # SQL语句
).values(
'CreateTime', 'SQLId', 'DBName', 'SQLText'
).annotate(
MySQLTotalExecutionCounts=Sum('slowqueryhistory__ts_cnt'), # 执行总次数
MySQLTotalExecutionTimes=Sum('slowqueryhistory__query_time_sum'), # 执行总时长
ParseTotalRowCounts=Sum('slowqueryhistory__rows_examined_sum'), # 扫描总行数
ReturnTotalRowCounts=Sum('slowqueryhistory__rows_sent_sum'), # 返回总行数
).count()
else:
# 获取慢查数据
slowsql_obj = SlowQuery.objects.filter(
slowqueryhistory__hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
slowqueryhistory__ts_min__range=(StartTime, EndTime),
last_seen__range=(StartTime, EndTime)
).annotate(CreateTime=F('last_seen'),
SQLId=F('checksum'),
DBName=F('slowqueryhistory__db_max'), # 数据库
SQLText=F('fingerprint'), # SQL语句
).values(
'CreateTime', 'SQLId', 'DBName', 'SQLText'
).annotate(
MySQLTotalExecutionCounts=Sum('slowqueryhistory__ts_cnt'), # 执行总次数
MySQLTotalExecutionTimes=Sum('slowqueryhistory__query_time_sum'), # 执行总时长
ParseTotalRowCounts=Sum('slowqueryhistory__rows_examined_sum'), # 扫描总行数
ReturnTotalRowCounts=Sum('slowqueryhistory__rows_sent_sum'), # 返回总行数
).order_by('-MySQLTotalExecutionCounts')[offset:limit] # 执行总次数倒序排列
slowsql_obj_count = SlowQuery.objects.filter(
slowqueryhistory__hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
slowqueryhistory__ts_min__range=(StartTime, EndTime),
last_seen__range=(StartTime, EndTime)
).annotate(CreateTime=F('last_seen'),
SQLId=F('checksum'),
DBName=F('slowqueryhistory__db_max'), # 数据库
SQLText=F('fingerprint'), # SQL语句
).values(
'CreateTime', 'SQLId', 'DBName', 'SQLText'
).annotate(
MySQLTotalExecutionCounts=Sum('slowqueryhistory__ts_cnt'), # 执行总次数
MySQLTotalExecutionTimes=Sum('slowqueryhistory__query_time_sum'), # 执行总时长
ParseTotalRowCounts=Sum('slowqueryhistory__rows_examined_sum'), # 扫描总行数
ReturnTotalRowCounts=Sum('slowqueryhistory__rows_sent_sum'), # 返回总行数
).count()
# QuerySet 序列化
SQLSlowLog = [SlowLog for SlowLog in slowsql_obj]
result = {"total": slowsql_obj_count, "rows": SQLSlowLog}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# 获取SQL慢日志明细
@csrf_exempt
def slowquery_review_history(request):
cluster_name = request.POST.get('cluster_name')
# 判断是RDS还是其他实例
cluster_info = master_config.objects.get(cluster_name=cluster_name)
if settings.ALIYUN_RDS_MANAGE:
# 调用阿里云慢日志接口
result = aliyun_rds_slowquery_review_history(request)
else:
StartTime = request.POST.get('StartTime')
EndTime = request.POST.get('EndTime')
DBName = request.POST.get('db_name')
SQLId = request.POST.get('SQLId')
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
# 时间处理
EndTime = datetime.datetime.strptime(EndTime, '%Y-%m-%d') + datetime.timedelta(days=1)
limit = offset + limit
# SQLId、DBName非必传
if SQLId:
# 获取慢查明细数据
slowsql_record_obj = SlowQueryHistory.objects.filter(
hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
checksum=int(SQLId),
ts_min__range=(StartTime, EndTime)
).annotate(ExecutionStartTime=F('ts_min'), # 执行开始时间
DBName=F('db_max'), # 数据库名
HostAddress=Concat(V('\''), 'user_max', V('\''), V('@'), V('\''), 'client_max', V('\'')), # 用户名
SQLText=F('sample'), # SQL语句
QueryTimes=F('query_time_sum'), # 执行时长(秒)
LockTimes=F('lock_time_sum'), # 锁定时长(秒)
ParseRowCounts=F('rows_examined_sum'), # 解析行数
ReturnRowCounts=F('rows_sent_sum') # 返回行数
).values(
'ExecutionStartTime', 'DBName', 'HostAddress', 'SQLText', 'QueryTimes', 'LockTimes', 'ParseRowCounts',
'ReturnRowCounts'
)[offset:limit]
slowsql_obj_count = SlowQueryHistory.objects.filter(
hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
checksum=int(SQLId),
ts_min__range=(StartTime, EndTime)
).count()
else:
if DBName:
# 获取慢查明细数据
slowsql_record_obj = SlowQueryHistory.objects.filter(
hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
db_max=DBName,
ts_min__range=(StartTime, EndTime)
).annotate(ExecutionStartTime=F('ts_min'), # 执行开始时间
DBName=F('db_max'), # 数据库名
HostAddress=Concat(V('\''), 'user_max', V('\''), V('@'), V('\''), 'client_max', V('\'')), # 用户名
SQLText=F('sample'), # SQL语句
QueryTimes=F('query_time_sum'), # 执行时长(秒)
LockTimes=F('lock_time_sum'), # 锁定时长(秒)
ParseRowCounts=F('rows_examined_sum'), # 解析行数
ReturnRowCounts=F('rows_sent_sum') # 返回行数
).values(
'ExecutionStartTime', 'DBName', 'HostAddress', 'SQLText', 'QueryTimes', 'LockTimes',
'ParseRowCounts',
'ReturnRowCounts'
)[offset:limit] # 执行总次数倒序排列
slowsql_obj_count = SlowQueryHistory.objects.filter(
hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
db_max=DBName,
ts_min__range=(StartTime, EndTime)
).count()
else:
# 获取慢查明细数据
slowsql_record_obj = SlowQueryHistory.objects.filter(
hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
ts_min__range=(StartTime, EndTime)
).annotate(ExecutionStartTime=F('ts_min'), # 执行开始时间
DBName=F('db_max'), # 数据库名
HostAddress=F('user_max'), # 用户名
SQLText=F('sample'), # SQL语句
QueryTimes=F('query_time_sum'), # 执行时长(秒)
LockTimes=F('lock_time_sum'), # 锁定时长(秒)
ParseRowCounts=F('rows_examined_sum'), # 解析行数
ReturnRowCounts=F('rows_sent_sum') # 返回行数
).values(
'ExecutionStartTime', 'DBName', 'HostAddress', 'SQLText', 'QueryTimes', 'LockTimes',
'ParseRowCounts',
'ReturnRowCounts'
)[offset:limit] # 执行总次数倒序排列
slowsql_obj_count = SlowQueryHistory.objects.filter(
hostname_max=(cluster_info.master_host + ':' + str(cluster_info.master_port)),
ts_min__range=(StartTime, EndTime)
).count()
# QuerySet 序列化
SQLSlowRecord = [SlowRecord for SlowRecord in slowsql_record_obj]
result = {"total": slowsql_obj_count, "rows": SQLSlowRecord}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# 获取邮件抄送人
@csrf_exempt
def getEmailCc(request):
result = {'status': 0, 'msg': 'ok', 'data': []}
try:
listDb = [username['display'] for username in
users.objects.exclude(role__icontains="DBA").exclude(username__icontains="root").values('username', 'display')]
result['data'] = listDb
except Exception as e:
result['status'] = 1
result['msg'] = str(e)
return HttpResponse(json.dumps(result), content_type='application/json')
| 44.197292
| 136
| 0.596521
| 0
| 0
| 0
| 0
| 39,870
| 0.803118
| 0
| 0
| 11,775
| 0.237189
|
b8c640f9283d5b83c08e12647497d33055a9e83f
| 13,671
|
py
|
Python
|
pyTooling/CLIAbstraction/__init__.py
|
pyTooling/pyTooling.CLIAbstraction
|
3b17490ae729e126799328198a814b6c741b1ac7
|
[
"Apache-2.0"
] | null | null | null |
pyTooling/CLIAbstraction/__init__.py
|
pyTooling/pyTooling.CLIAbstraction
|
3b17490ae729e126799328198a814b6c741b1ac7
|
[
"Apache-2.0"
] | 8
|
2021-12-19T19:58:31.000Z
|
2022-03-02T10:45:16.000Z
|
pyTooling/CLIAbstraction/__init__.py
|
pyTooling/pyTooling.CLIAbstraction
|
3b17490ae729e126799328198a814b6c741b1ac7
|
[
"Apache-2.0"
] | null | null | null |
# ==================================================================================================================== #
# _____ _ _ ____ _ ___ _ _ _ _ _ #
# _ __ _ |_ _|__ ___ | (_)_ __ __ _ / ___| | |_ _| / \ | |__ ___| |_ _ __ __ _ ___| |_(_) ___ _ __ #
# | '_ \| | | || |/ _ \ / _ \| | | '_ \ / _` || | | | | | / _ \ | '_ \/ __| __| '__/ _` |/ __| __| |/ _ \| '_ \ #
# | |_) | |_| || | (_) | (_) | | | | | | (_| || |___| |___ | | / ___ \| |_) \__ \ |_| | | (_| | (__| |_| | (_) | | | | #
# | .__/ \__, ||_|\___/ \___/|_|_|_| |_|\__, (_)____|_____|___/_/ \_\_.__/|___/\__|_| \__,_|\___|\__|_|\___/|_| |_| #
# |_| |___/ |___/ #
# ==================================================================================================================== #
# Authors: #
# Patrick Lehmann #
# #
# License: #
# ==================================================================================================================== #
# Copyright 2017-2022 Patrick Lehmann - Bötzingen, Germany #
# Copyright 2007-2016 Technische Universität Dresden - Germany, Chair of VLSI-Design, Diagnostics and Architecture #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# SPDX-License-Identifier: Apache-2.0 #
# ==================================================================================================================== #
#
"""Basic abstraction layer for executables."""
__author__ = "Patrick Lehmann"
__email__ = "Paebbels@gmail.com"
__copyright__ = "2014-2022, Patrick Lehmann"
__license__ = "Apache License, Version 2.0"
__version__ = "0.4.0"
__keywords__ = ["abstract", "executable", "cli", "cli arguments"]
from pathlib import Path
from platform import system
from shutil import which as shutil_which
from subprocess import (
Popen as Subprocess_Popen,
PIPE as Subprocess_Pipe,
STDOUT as Subprocess_StdOut
)
from typing import Dict, Optional, ClassVar, Type, List, Tuple, Iterator, Generator
from pyTooling.Decorators import export
from pyTooling.Exceptions import ExceptionBase, PlatformNotSupportedException
from pyAttributes import Attribute
from .Argument import (
CommandLineArgument, ExecutableArgument,
NamedAndValuedArgument, ValuedArgument, PathArgument,
PathListArgument, NamedTupledArgument
)
from .ValuedFlag import ValuedFlag
@export
class CLIAbstractionException(ExceptionBase):
pass
@export
class DryRunException(CLIAbstractionException):
"""This exception is raised if an executable is launched while in dry-run mode."""
@export
class CLIArgument(Attribute):
"""An attribute to annotate nested classes as an CLI argument."""
@export
class Program:
"""Represent a simple command line interface (CLI) executable (program or script)."""
_platform: str #: Current platform the executable runs on (Linux, Windows, ...)
_executableNames: ClassVar[Dict[str, str]] #: Dictionary of platform specific executable names.
_executablePath: Path #: The path to the executable (binary, script, ...).
_dryRun: bool #: True, if program shall run in *dry-run mode*.
__cliOptions__: ClassVar[Dict[Type[CommandLineArgument], int]] #: List of all possible CLI options.
__cliParameters__: Dict[Type[CommandLineArgument], Optional[CommandLineArgument]] #: List of all CLI parameters (used CLI options).
def __init_subclass__(cls, *args, **kwargs):
"""
Whenever a subclass is derived from :class:``Program``, all nested classes declared within ``Program`` and which are
marked with pyAttribute ``CLIOption`` are collected and then listed in the ``__cliOptions__`` dictionary.
"""
super().__init_subclass__(*args, **kwargs)
# register all available CLI options (nested classes marked with attribute 'CLIOption')
cls.__cliOptions__: Dict[Type[CommandLineArgument], int] = {}
order: int = 0
for option in CLIArgument.GetClasses(scope=cls):
cls.__cliOptions__[option] = order
order += 1
def __init__(self, executablePath: Path = None, binaryDirectoryPath: Path = None, dryRun: bool = False):
self._platform = system()
self._dryRun = dryRun
if executablePath is not None:
if isinstance(executablePath, Path):
if not executablePath.exists():
if dryRun:
self.LogDryRun(f"File check for '{executablePath}' failed. [SKIPPING]")
else:
raise CLIAbstractionException(f"Program '{executablePath}' not found.") from FileNotFoundError(executablePath)
else:
raise TypeError(f"Parameter 'executablePath' is not of type 'Path'.")
elif binaryDirectoryPath is not None:
if isinstance(binaryDirectoryPath, Path):
if not binaryDirectoryPath.exists():
if dryRun:
self.LogDryRun(f"Directory check for '{binaryDirectoryPath}' failed. [SKIPPING]")
else:
raise CLIAbstractionException(f"Binary directory '{binaryDirectoryPath}' not found.") from FileNotFoundError(binaryDirectoryPath)
try:
executablePath = binaryDirectoryPath / self._executableNames[self._platform]
except KeyError:
raise CLIAbstractionException(f"Program is not supported on platform '{self._platform}'.") from PlatformNotSupportedException(self._platform)
if not executablePath.exists():
if dryRun:
self.LogDryRun(f"File check for '{executablePath}' failed. [SKIPPING]")
else:
raise CLIAbstractionException(f"Program '{executablePath}' not found.") from FileNotFoundError(executablePath)
else:
raise TypeError(f"Parameter 'binaryDirectoryPath' is not of type 'Path'.")
else:
try:
executablePath = Path(self._executableNames[self._platform])
except KeyError:
raise CLIAbstractionException(f"Program is not supported on platform '{self._platform}'.") from PlatformNotSupportedException(self._platform)
resolvedExecutable = shutil_which(str(executablePath))
if dryRun:
if resolvedExecutable is None:
pass
# XXX: log executable not found in PATH
# self.LogDryRun(f"Which '{executablePath}' failed. [SKIPPING]")
else:
fullExecutablePath = Path(resolvedExecutable)
if not fullExecutablePath.exists():
pass
# XXX: log executable not found
# self.LogDryRun(f"File check for '{fullExecutablePath}' failed. [SKIPPING]")
else:
if resolvedExecutable is None:
raise CLIAbstractionException(f"Program could not be found in PATH.") from FileNotFoundError(executablePath)
fullExecutablePath = Path(resolvedExecutable)
if not fullExecutablePath.exists():
raise CLIAbstractionException(f"Program '{fullExecutablePath}' not found.") from FileNotFoundError(fullExecutablePath)
# TODO: log found executable in PATH
# TODO: check if found executable has execute permissions
# raise ValueError(f"Neither parameter 'executablePath' nor 'binaryDirectoryPath' was set.")
self._executablePath = executablePath
self.__cliParameters__ = {}
@staticmethod
def _NeedsParameterInitialization(key):
return issubclass(key, (ValuedFlag, ValuedArgument, NamedAndValuedArgument, NamedTupledArgument, PathArgument, PathListArgument))
def __getitem__(self, key):
"""Access to a CLI parameter by CLI option (key must be of type :class:`CommandLineArgument`), which is already used."""
if not issubclass(key, CommandLineArgument):
raise TypeError(f"Key '{key}' is not a subclass of 'CommandLineArgument'.")
# TODO: is nested check
return self.__cliParameters__[key]
def __setitem__(self, key, value):
if not issubclass(key, CommandLineArgument):
raise TypeError(f"Key '{key}' is not a subclass of 'CommandLineArgument'.")
elif key not in self.__cliOptions__:
raise KeyError(f"Option '{key}' is not allowed on executable '{self.__class__.__name__}'")
elif key in self.__cliParameters__:
raise KeyError(f"Option '{key}' is already set to a value.")
if self._NeedsParameterInitialization(key):
self.__cliParameters__[key] = key(value)
else:
self.__cliParameters__[key] = key()
@property
def Path(self) -> Path:
return self._executablePath
def ToArgumentList(self) -> List[str]:
result: List[str] = []
result.append(str(self._executablePath))
def predicate(item: Tuple[Type[CommandLineArgument], int]) -> int:
return self.__cliOptions__[item[0]]
for key, value in sorted(self.__cliParameters__.items(), key=predicate):
param = value.AsArgument()
if isinstance(param, str):
result.append(param)
elif isinstance(param, (Tuple, List)):
result += param
else:
raise TypeError(f"") # XXX: needs error message
return result
def __repr__(self):
return "[" + ", ".join([f"\"{item}\"" for item in self.ToArgumentList()]) + "]"
def __str__(self):
return " ".join([f"\"{item}\"" for item in self.ToArgumentList()])
# @export
# class Environment:
# def __init__(self):
# self.Variables = {}
@export
class Executable(Program): # (ILogable):
"""Represent a CLI executable derived from :class:`Program`, that adds an abstraction of :class:`subprocess.Popen`."""
_BOUNDARY = "====== BOUNDARY pyTooling.CLIAbstraction BOUNDARY ======"
_environment: Dict[str, str] = None
_process: Subprocess_Popen = None
_iterator: Iterator = None
def __init__(self, executablePath: Path = None, binaryDirectoryPath: Path = None, dryRun: bool = False): #, environment: Environment = None):
super().__init__(executablePath, binaryDirectoryPath, dryRun)
def StartProcess(self):
# start child process
if self._dryRun:
self.LogDryRun(f"Start process: {self!r}")
return
if (self._environment is not None):
envVariables = self._environment.Variables
else:
envVariables = None
# FIXME: verbose log start process
# FIXME: debug log - parameter list
try:
self._process = Subprocess_Popen(
self.ToArgumentList(),
stdin=Subprocess_Pipe,
stdout=Subprocess_Pipe,
stderr=Subprocess_StdOut,
env=envVariables,
universal_newlines=True,
bufsize=256
)
except OSError as ex:
raise CLIAbstractionException(f"Error while launching a process for '{self._executablePath}'.") from ex
def Send(self, line: str, end: str="\n") -> None:
try:
self._process.stdin.write(line + end)
self._process.stdin.flush()
except Exception as ex:
raise CLIAbstractionException(f"") from ex # XXX: need error message
# This is TCL specific ...
# def SendBoundary(self):
# self.Send("puts \"{0}\"".format(self._pyIPCMI_BOUNDARY))
def GetLineReader(self) -> Generator[str, None, None]:
if self._dryRun:
raise DryRunException() # XXX: needs a message
try:
for line in iter(self._process.stdout.readline, ""): # FIXME: can it be improved?
yield line[:-1]
except Exception as ex:
raise CLIAbstractionException() from ex # XXX: need error message
# finally:
# self._process.terminate()
def Terminate(self):
self._process.terminate()
# This is TCL specific
# def ReadUntilBoundary(self, indent=0):
# __indent = " " * indent
# if (self._iterator is None):
# self._iterator = iter(self.GetReader())
#
# for line in self._iterator:
# print(__indent + line)
# if (self._pyIPCMI_BOUNDARY in line):
# break
# self.LogDebug("Quartus II is ready")
| 44.676471
| 147
| 0.575744
| 8,904
| 0.65121
| 384
| 0.028085
| 8,944
| 0.654136
| 0
| 0
| 7,151
| 0.523002
|
b8c7afa99f880ad851ed3d1e2b329906d0d376a5
| 1,601
|
py
|
Python
|
ingest_to_dynamodb/lambda_function.py
|
fladdimir/csa-simulation-based-sc-forecast
|
80f176a783496f8859609f63b56c6199a73d9909
|
[
"MIT"
] | 2
|
2020-11-04T17:34:38.000Z
|
2021-08-13T07:55:23.000Z
|
ingest_to_dynamodb/lambda_function.py
|
fladdimir/csa-simulation-based-sc-forecast
|
80f176a783496f8859609f63b56c6199a73d9909
|
[
"MIT"
] | null | null | null |
ingest_to_dynamodb/lambda_function.py
|
fladdimir/csa-simulation-based-sc-forecast
|
80f176a783496f8859609f63b56c6199a73d9909
|
[
"MIT"
] | 2
|
2021-05-28T02:55:44.000Z
|
2021-08-03T13:56:10.000Z
|
import base64
import json
import logging
import os
from decimal import Decimal
import boto3
"""
environment variables:
export AWS_ENDPOINT=http://localhost:4566
export TABLE_NAME=table_xy
# for direct local execution:
export AWS_DEFAULT_REGION=localhost
export AWS_ACCESS_KEY_ID=access_key_id
export AWS_SECRET_ACCESS_KEY=secret_access_key
"""
AWS_ENDPOINT = os.getenv("AWS_ENDPOINT")
TABLE_NAME = os.getenv("TABLE_NAME")
# localstack specific url processing
LOCALSTACK_HOSTNAME = "LOCALSTACK_HOSTNAME"
if LOCALSTACK_HOSTNAME in AWS_ENDPOINT:
localstack_hostname = os.getenv(LOCALSTACK_HOSTNAME, "localstack_main")
AWS_ENDPOINT = AWS_ENDPOINT.replace(LOCALSTACK_HOSTNAME, localstack_hostname)
dynamodb = boto3.resource("dynamodb", endpoint_url=AWS_ENDPOINT)
table = dynamodb.Table(TABLE_NAME)
def handler(event, context):
datas = [record["kinesis"]["data"] for record in event["Records"]]
bodies = [base64.b64decode(data) for data in datas]
deserialized_bodies = [json.loads(body) for body in bodies]
for data in deserialized_bodies:
update_item(data)
def update_item(item: dict):
# e.g.: data = {"order_name": "entity_1", "attribute": "time_of_acceptance", "value": 0.0, "timestamp": 0.0}
logging.info(f"updating item: {item}")
table.update_item(
Key={"order_name": item["order_name"]},
UpdateExpression=f"SET {item['attribute']} = :value, last_update = :ts",
ExpressionAttributeValues={
":value": Decimal(item["value"]),
":ts": Decimal(item["timestamp"]),
},
)
| 30.788462
| 112
| 0.715178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 650
| 0.405996
|
b8c91e32bf4a536211d6e1b856f0e33473d42a4f
| 3,816
|
py
|
Python
|
modules/sfp_psbdmp.py
|
IronFireFA/spiderfoot
|
e75428e7584666de52a20b0d2f1fb80dffd6f39c
|
[
"MIT"
] | null | null | null |
modules/sfp_psbdmp.py
|
IronFireFA/spiderfoot
|
e75428e7584666de52a20b0d2f1fb80dffd6f39c
|
[
"MIT"
] | null | null | null |
modules/sfp_psbdmp.py
|
IronFireFA/spiderfoot
|
e75428e7584666de52a20b0d2f1fb80dffd6f39c
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------------
# Name: sfp_psbdmp
# Purpose: Query psbdmp.cc for potentially hacked e-mail addresses.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 21/11/2016
# Copyright: (c) Steve Micallef
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import re
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_psbdmp(SpiderFootPlugin):
meta = {
'name': "Psbdmp",
'summary': "Check psbdmp.cc (PasteBin Dump) for potentially hacked e-mails and domains.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Leaks, Dumps and Breaches"],
'dataSource': {
'website': "https://psbdmp.cc/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://psbdmp.cc/"
],
'favIcon': "",
'logo': "",
'description': "Search dump(s) by some word.\n"
"Search dump(s) by email.\n"
"Search dump(s) by domain.\n"
"Search dump(s) from specific date.",
}
}
opts = {
}
optdescs = {
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ["EMAILADDR", "DOMAIN_NAME", "INTERNET_NAME"]
def producedEvents(self):
return ["LEAKSITE_URL", "LEAKSITE_CONTENT"]
def query(self, qry):
ret = None
if "@" in qry:
url = "https://psbdmp.cc/api/search/email/" + qry
else:
url = "https://psbdmp.cc/api/search/domain/" + qry
res = self.sf.fetchUrl(url, timeout=15, useragent="SpiderFoot")
if res['code'] == "403" or res['content'] is None:
self.info("Unable to fetch data from psbdmp.cc right now.")
return None
try:
ret = json.loads(res['content'])
except Exception as e:
self.error(f"Error processing JSON response from psbdmp.cc: {e}")
return None
ids = list()
if 'count' not in ret:
return None
if ret['count'] <= 0:
return None
for d in ret['data']:
ids.append("https://pastebin.com/" + d['id'])
return ids
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
data = self.query(eventData)
if data is None:
return
for n in data:
e = SpiderFootEvent("LEAKSITE_URL", n, self.__name__, event)
self.notifyListeners(e)
res = self.sf.fetchUrl(
n,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent']
)
if res['content'] is None:
self.debug(f"Ignoring {n} as no data returned")
continue
if re.search(
r"[^a-zA-Z\-\_0-9]" + re.escape(eventData) + r"[^a-zA-Z\-\_0-9]",
res['content'],
re.IGNORECASE
) is None:
continue
evt = SpiderFootEvent("LEAKSITE_CONTENT", res['content'], self.__name__, e)
self.notifyListeners(evt)
# End of sfp_psbdmp class
| 28.058824
| 97
| 0.508124
| 3,305
| 0.86609
| 0
| 0
| 0
| 0
| 0
| 0
| 1,454
| 0.381027
|
b8c9483c89fccb1526f7a1b94d89843858f14cf3
| 3,216
|
py
|
Python
|
dcr/scenarios/agent-bvt/test_agent_basics.py
|
sshedi/WALinuxAgent
|
99d07d29b7843293588bec4b961e4ef2d1daabb2
|
[
"Apache-2.0"
] | null | null | null |
dcr/scenarios/agent-bvt/test_agent_basics.py
|
sshedi/WALinuxAgent
|
99d07d29b7843293588bec4b961e4ef2d1daabb2
|
[
"Apache-2.0"
] | null | null | null |
dcr/scenarios/agent-bvt/test_agent_basics.py
|
sshedi/WALinuxAgent
|
99d07d29b7843293588bec4b961e4ef2d1daabb2
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import socket
from dotenv import load_dotenv
from dcr.scenario_utils.common_utils import execute_command_and_raise_on_error
from dcr.scenario_utils.models import get_vm_data_from_env
def test_agent_version():
stdout, _ = execute_command_and_raise_on_error(['waagent', '-version'], timeout=30)
# release_file contains:
# AGENT_VERSION = 'x.y.z'
load_dotenv()
expected_version = os.environ.get("AGENTVERSION")
if "Goal state agent: {0}".format(expected_version) not in stdout:
raise Exception("expected version {0} not found".format(expected_version))
return stdout
def check_hostname():
vm_name = get_vm_data_from_env().name
stdout, _ = execute_command_and_raise_on_error(['hostname'], timeout=30)
if vm_name.lower() != stdout.lower():
raise Exception("Hostname does not match! Expected: {0}, found: {1}".format(vm_name, stdout.strip()))
return stdout
def check_ns_lookup():
hostname, _ = execute_command_and_raise_on_error(['hostname'], timeout=30)
ip = socket.gethostbyname(hostname)
msg = "Resolved IP: {0}".format(ip)
print(msg)
return msg
def check_root_login():
stdout, _ = execute_command_and_raise_on_error(['cat', '/etc/shadow'], timeout=30)
root_passwd_line = next(line for line in stdout.splitlines() if 'root' in line)
print(root_passwd_line)
root_passwd = root_passwd_line.split(":")[1]
if any(val in root_passwd for val in ("!", "*", "x")):
return 'root login disabled'
else:
raise Exception('root login appears to be enabled: {0}'.format(root_passwd))
def check_agent_processes():
daemon_pattern = r'.*python.*waagent -daemon$'
handler_pattern = r'.*python.*-run-exthandlers'
status_pattern = r'^(\S+)\s+'
std_out, _ = execute_command_and_raise_on_error(['ps', 'axo', 'stat,args'], timeout=30)
daemon = False
ext_handler = False
agent_processes = [line for line in std_out.splitlines() if 'python' in line]
for process in agent_processes:
if re.match(daemon_pattern, process):
daemon = True
elif re.match(handler_pattern, process):
ext_handler = True
else:
continue
status = re.match(status_pattern, process).groups(1)[0]
if not(status.startswith('S') or status.startswith('R')):
raise Exception('process is not running: {0}'.format(process))
if not daemon:
raise Exception('daemon process not found:\n\n{0}'.format(std_out))
if not ext_handler:
raise Exception('extension handler process not found:\n\n{0}'.format(std_out))
return 'expected processes found running'
def check_sudoers(user):
found = False
root = '/etc/sudoers.d/'
for f in os.listdir(root):
sudoers = os.path.join(root, f)
with open(sudoers) as fh:
for entry in fh.readlines():
if entry.startswith(user) and 'ALL=(ALL)' in entry:
print('entry found: {0}'.format(entry))
found = True
if not found:
raise Exception('user {0} not found'.format(user))
return "Found user {0} in list of sudoers".format(user)
| 30.923077
| 109
| 0.661692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 670
| 0.208333
|
b8ca7c27c5d04fb6e63bdc64ba80458973c7d303
| 9,033
|
py
|
Python
|
src/DrawingEpisodes.py
|
Benykoz/simcom
|
ffe1c3636ef65a037a34e71d5cbcdb2e483d5b93
|
[
"MIT"
] | null | null | null |
src/DrawingEpisodes.py
|
Benykoz/simcom
|
ffe1c3636ef65a037a34e71d5cbcdb2e483d5b93
|
[
"MIT"
] | null | null | null |
src/DrawingEpisodes.py
|
Benykoz/simcom
|
ffe1c3636ef65a037a34e71d5cbcdb2e483d5b93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# This file includes mainly a class "randomEpisode" that:
# - draws localization of vehicle
# - draws number of rocks
# - draws position of each rock
# - save in a json file
# Author: Michele
# Project: SmartLoader - Innovation
import json
import random
from geometry_msgs.msg import PoseStamped, Quaternion, Vector3
import math
from math import pi as pi
import src.Unity2RealWorld as toRW
import os
def deleteFileIfExists(filename):
if os.path.exists(filename):
os.remove(filename)
else:
print("The file does not exist")
def find(name, path):
for root, dirs, files in os.walk(path):
if name in files or name in dirs:
return os.path.join(root, name)
def determinePathToConfig():
user=os.getenv("HOME")
simcomloc = find("simcom", user)
confpath = simcomloc+"/config"
return confpath
class randomEpisode:
actual_seed=0
# data = {}
# data['Objects'] = []
# NumberOfRocks = 0
# VehiclePosition= PoseStamped()
def __init__(self, typeOfRand, newseed):
data = {}
data['Objects'] = []
NumberOfRocks = 0
VehiclePosition = PoseStamped()
if newseed != 0:
actual_seed = random.seed(None,2)
if typeOfRand == "verysimple":
NumberOfRocks = random.randint(1,10)
else:
NumberOfRocks = random.randint(1,10)
VehiclePosition.pose.position.x = random.uniform(0,500)
VehiclePosition.pose.position.y = 0
VehiclePosition.pose.position.z = random.uniform(0,500)
euler_orient = Vector3()
euler_orient.x = 0
euler_orient.y = random.uniform(-pi,pi)
euler_orient.z = 0 #random.uniform(-pi,pi)
quat_orient = toRW.euler_to_quaternion(euler_orient.x, euler_orient.y, euler_orient.z)
VehiclePosition.pose.orientation.x = quat_orient[0] #random.uniform(-1,1)
VehiclePosition.pose.orientation.y = quat_orient[1] #random.uniform(-1,1)
VehiclePosition.pose.orientation.z = quat_orient[2] #random.uniform(-1,1)
VehiclePosition.pose.orientation.w = quat_orient[3] #random.uniform(-1,1)
data['Objects'].append({
'Name': 'BobCat',
'Id': 'BobCat',
'Position':
{
'x': VehiclePosition.pose.position.x,
'y': VehiclePosition.pose.position.y,
'z': VehiclePosition.pose.position.z
},
'Rotation':
{
'x': VehiclePosition.pose.orientation.x,
'y': VehiclePosition.pose.orientation.y,
'z': VehiclePosition.pose.orientation.z,
'w': VehiclePosition.pose.orientation.w
},
'Scale':
{
'x': 1,
'y': 1,
'z': 1
}
})
BobcatX = VehiclePosition.pose.position.x
BobcatZ = VehiclePosition.pose.position.z
XMin = BobcatX - 1
XMax = BobcatX + 1
ZMin = BobcatZ - 1.5
ZMax = BobcatZ + 1.5
for i in range(NumberOfRocks):
id = (i+1).__str__()
eulerRot = Vector3()
eulerRot.x = 0
eulerRot.y = random.uniform(-pi, pi)
eulerRot.z = 0 #random.uniform(-pi, pi)
quatRot = toRW.euler_to_quaternion(eulerRot.x, eulerRot.y, eulerRot.z)
data['Objects'].append({
'Name': 'Rock',
'Id': id,
'Position':
{
"x": random.uniform(XMin,XMax),
"y": 0,
"z": random.uniform(ZMin,ZMax)
},
"Rotation":
{
"x": quatRot[0], #random.uniform(-1,1),
"y": quatRot[1], #random.uniform(-1,1),
"z": quatRot[2], #random.uniform(-1,1),
"w": quatRot[3] #random.uniform(-1,1)
},
"Scale":
{
"x": 0.01,
"y": 0.01,
"z": 0.01
}
})
# deleteFileIfExists('/home/sload/ws/interfaces/src/simcom/config/InitialScene.json')
filepath = determinePathToConfig()+"/InitialScene.json"
with open(filepath, 'w') as outfile:
json.dump(data, outfile)
class MultipleRocksEpisode:
# actual_seed=0
# data = {}
# data['Objects'] = []
# NumberOfRocks = 0
# VehiclePosition= PoseStamped()
def __init__(self, newseed, NumberOfRocks, marker):
actual_seed = 0
data = {}
data['Objects'] = []
VehiclePosition = PoseStamped()
if newseed != 0:
actual_seed = random.seed(None,2)
VehiclePosition.pose.position.x = 250
VehiclePosition.pose.position.y = 0
VehiclePosition.pose.position.z = 250
euler_orient = Vector3()
euler_orient.x = 0
euler_orient.y = pi/2 #random.uniform(-pi,pi)
euler_orient.z = 0 #random.uniform(-pi,pi)
quat_orient = toRW.euler_to_quaternion(euler_orient.x, euler_orient.y, euler_orient.z)
VehiclePosition.pose.orientation.x = quat_orient[0] #random.uniform(-1,1)
VehiclePosition.pose.orientation.y = quat_orient[1] #random.uniform(-1,1)
VehiclePosition.pose.orientation.z = quat_orient[2] #random.uniform(-1,1)
VehiclePosition.pose.orientation.w = quat_orient[3] #random.uniform(-1,1)
data['Objects'].append({
'Name': 'BobCat',
'Id': 'BobCat',
'Position':
{
'x': VehiclePosition.pose.position.x,
'y': VehiclePosition.pose.position.y,
'z': VehiclePosition.pose.position.z
},
'Rotation':
{
'x': VehiclePosition.pose.orientation.x,
'y': VehiclePosition.pose.orientation.y,
'z': VehiclePosition.pose.orientation.z,
'w': VehiclePosition.pose.orientation.w
},
'Scale':
{
'x': 1,
'y': 1,
'z': 1
}
})
for i in range(NumberOfRocks):
id = (i+1).__str__()
eulerRot = Vector3()
eulerRot.x = 0
eulerRot.y = random.uniform(-pi, pi)
eulerRot.z = 0 #random.uniform(-pi, pi)
quatRot = toRW.euler_to_quaternion(eulerRot.x, eulerRot.y, eulerRot.z)
data['Objects'].append({
'Name': 'Rock',
'Id': id,
'Position':
{
"x": 253,
"y": 0,
"z": 250 + random.uniform(-0.5,0.5)
},
"Rotation":
{
"x": quatRot[0], #random.uniform(-1,1),
"y": quatRot[1], #random.uniform(-1,1),
"z": quatRot[2], #random.uniform(-1,1),
"w": quatRot[3] #random.uniform(-1,1)
},
"Scale":
{
"x": 0.25,
"y": 0.25,
"z": 0.25
}
})
if marker:
id = (NumberOfRocks+1).__str__()
eulerRot = Vector3()
eulerRot.x = 0
eulerRot.y = random.uniform(-pi, pi)
eulerRot.z = 0 #random.uniform(-pi, pi)
quatRot = toRW.euler_to_quaternion(eulerRot.x, eulerRot.y, eulerRot.z)
data['Objects'].append({
'Name': 'Rock',
'Id': id,
'Position':
{
# "x": 250 + random.uniform(XMin,XMax),
# "x": 258 + random.uniform(-1, 8),
"x": 250 + random.uniform(6, 12),
"y": 0,
"z": 250
},
"Rotation":
{
"x": quatRot[0], #random.uniform(-1,1),
"y": quatRot[1], #random.uniform(-1,1),
"z": quatRot[2], #random.uniform(-1,1),
"w": quatRot[3] #random.uniform(-1,1)
},
"Scale":
{
"x": 0.1,
"y": 0.1,
"z": 0.1
}
})
filepath = determinePathToConfig()+"/InitialScene.json"
with open(filepath,'w') as outfile:
json.dump(data, outfile)
if __name__ == '__main__':
for j in range(3):
scenario = recorderEpisode(j)
| 35.14786
| 94
| 0.469169
| 8,029
| 0.888852
| 0
| 0
| 0
| 0
| 0
| 0
| 1,771
| 0.196059
|
b8cbd20dcd81315e2ca364311bd80d356f50ed2d
| 587
|
py
|
Python
|
gimmemotifs/commands/logo.py
|
littleblackfish/gimmemotifs
|
913a6e5db378493155273e2c0f8ab0dc11ab219e
|
[
"MIT"
] | null | null | null |
gimmemotifs/commands/logo.py
|
littleblackfish/gimmemotifs
|
913a6e5db378493155273e2c0f8ab0dc11ab219e
|
[
"MIT"
] | null | null | null |
gimmemotifs/commands/logo.py
|
littleblackfish/gimmemotifs
|
913a6e5db378493155273e2c0f8ab0dc11ab219e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2009-2016 Simon van Heeringen <simon.vanheeringen@gmail.com>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from gimmemotifs.motif import pwmfile_to_motifs
def logo(args):
inputfile = args.pwmfile
motifs = pwmfile_to_motifs(inputfile)
if args.ids:
ids = args.ids.split(",")
motifs = [m for m in motifs if m.id in ids]
for motif in motifs:
motif.to_img(motif.id, fmt="PNG")
| 29.35
| 79
| 0.688245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 272
| 0.463373
|