hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d06b4ab911ac4dcdf7380ebba49a7d481ddeffe | 1,813 | py | Python | python/paddle/framework/__init__.py | Ray2020BD/Paddle | 994087188816575d456c2f9c2a6c90aad83b4e71 | [
"Apache-2.0"
] | 2 | 2020-12-09T16:09:59.000Z | 2020-12-09T16:10:02.000Z | python/paddle/framework/__init__.py | Ray2020BD/Paddle | 994087188816575d456c2f9c2a6c90aad83b4e71 | [
"Apache-2.0"
] | null | null | null | python/paddle/framework/__init__.py | Ray2020BD/Paddle | 994087188816575d456c2f9c2a6c90aad83b4e71 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: import framework api under this directory
__all__ = [
'create_parameter', 'ParamAttr', 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace',
'get_default_dtype', 'set_default_dtype'
]
__all__ += [
'grad', 'LayerList', 'load', 'save', 'to_variable', 'no_grad',
'DataParallel'
]
from . import random
from .random import seed
from .framework import get_default_dtype
from .framework import set_default_dtype
from ..fluid.framework import ComplexVariable #DEFINE_ALIAS
from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS
# from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS
from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS
from ..fluid.core import CPUPlace #DEFINE_ALIAS
from ..fluid.core import CUDAPlace #DEFINE_ALIAS
from ..fluid.core import CUDAPinnedPlace #DEFINE_ALIAS
from ..fluid.core import VarBase #DEFINE_ALIAS
from paddle.fluid import core #DEFINE_ALIAS
from ..fluid.dygraph.base import no_grad #DEFINE_ALIAS
from ..fluid.dygraph.base import to_variable #DEFINE_ALIAS
from ..fluid.dygraph.base import grad #DEFINE_ALIAS
from .io import save
from .io import load
from ..fluid.dygraph.parallel import DataParallel #DEFINE_ALIAS
| 38.574468 | 80 | 0.772201 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: import framework api under this directory
__all__ = [
'create_parameter', 'ParamAttr', 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace',
'get_default_dtype', 'set_default_dtype'
]
__all__ += [
'grad', 'LayerList', 'load', 'save', 'to_variable', 'no_grad',
'DataParallel'
]
from . import random
from .random import seed
from .framework import get_default_dtype
from .framework import set_default_dtype
from ..fluid.framework import ComplexVariable #DEFINE_ALIAS
from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS
# from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS
from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS
from ..fluid.core import CPUPlace #DEFINE_ALIAS
from ..fluid.core import CUDAPlace #DEFINE_ALIAS
from ..fluid.core import CUDAPinnedPlace #DEFINE_ALIAS
from ..fluid.core import VarBase #DEFINE_ALIAS
from paddle.fluid import core #DEFINE_ALIAS
from ..fluid.dygraph.base import no_grad #DEFINE_ALIAS
from ..fluid.dygraph.base import to_variable #DEFINE_ALIAS
from ..fluid.dygraph.base import grad #DEFINE_ALIAS
from .io import save
from .io import load
from ..fluid.dygraph.parallel import DataParallel #DEFINE_ALIAS
| 0 | 0 | 0 |
5c7b170332c963d2c748af8230525d7348d1ce37 | 1,851 | py | Python | Toolkits/Discovery/meta/searx/searx/engines/translated.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | Toolkits/Discovery/meta/searx/searx/engines/translated.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | searx/engines/translated.py | xu1991/open | 5398dab4ba669b3ca87d9fe26eb24431c45f153e | [
"CC0-1.0"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | """
MyMemory Translated
@website https://mymemory.translated.net/
@provide-api yes (https://mymemory.translated.net/doc/spec.php)
@using-api yes
@results JSON
@stable yes
@parse url, title, content
"""
import re
from sys import version_info
from searx.utils import is_valid_lang
if version_info[0] == 3:
unicode = str
categories = ['general']
url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
weight = 100
parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
api_key = ''
| 26.826087 | 93 | 0.590492 | """
MyMemory Translated
@website https://mymemory.translated.net/
@provide-api yes (https://mymemory.translated.net/doc/spec.php)
@using-api yes
@results JSON
@stable yes
@parse url, title, content
"""
import re
from sys import version_info
from searx.utils import is_valid_lang
if version_info[0] == 3:
unicode = str
categories = ['general']
url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
weight = 100
parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
api_key = ''
def request(query, params):
m = parser_re.match(unicode(query, 'utf8'))
if not m:
return params
from_lang, to_lang, query = m.groups()
from_lang = is_valid_lang(from_lang)
to_lang = is_valid_lang(to_lang)
if not from_lang or not to_lang:
return params
if api_key:
key_form = '&key=' + api_key
else:
key_form = ''
params['url'] = url.format(from_lang=from_lang[1],
to_lang=to_lang[1],
query=query,
key=key_form)
params['query'] = query
params['from_lang'] = from_lang
params['to_lang'] = to_lang
return params
def response(resp):
results = []
results.append({
'url': web_url.format(
from_lang=resp.search_params['from_lang'][2],
to_lang=resp.search_params['to_lang'][2],
query=resp.search_params['query']),
'title': '[{0}-{1}] {2}'.format(
resp.search_params['from_lang'][1],
resp.search_params['to_lang'][1],
resp.search_params['query']),
'content': resp.json()['responseData']['translatedText']
})
return results
| 1,164 | 0 | 46 |
a3ad10fbcabe9ffec09099e0d17108ec8407f036 | 10,854 | py | Python | trainers/saver.py | DorTsur/dine_ndt | 3a07064b1d37da12c36e679a9b1de6a32ae42689 | [
"MIT"
] | 1 | 2022-03-29T03:09:52.000Z | 2022-03-29T03:09:52.000Z | trainers/saver.py | DorTsur/dine_ndt | 3a07064b1d37da12c36e679a9b1de6a32ae42689 | [
"MIT"
] | null | null | null | trainers/saver.py | DorTsur/dine_ndt | 3a07064b1d37da12c36e679a9b1de6a32ae42689 | [
"MIT"
] | null | null | null | import numpy as np
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
import matplotlib.pyplot as plt
import tensorflow as tf
import logging
import os
from scipy.io import savemat
from scipy.stats import norm
logger = logging.getLogger("logger")
###################################
####### HISTOGRAM OBJECTS #########
###################################
| 31.46087 | 129 | 0.550028 | import numpy as np
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
import matplotlib.pyplot as plt
import tensorflow as tf
import logging
import os
from scipy.io import savemat
from scipy.stats import norm
logger = logging.getLogger("logger")
class Visualizer(object):
def __init__(self, config):
self.config = config
self.save_path = os.path.join(config.tensor_board_dir, 'visual')
def reset_state(self):
pass
def update_state(self, *args):
pass
def visualize(self):
pass
def save_raw_data(self):
pass
class DVVisualizer(Visualizer):
def __init__(self, config):
# Class for saving DV potentials values
super().__init__(config)
self.t_y_list = list()
self.t_xy_list = list()
def reset_state(self):
self.t_y_list = list()
self.t_xy_list = list()
def update_state(self, data):
self.t_y_list.append(data['t_y'])
self.t_xy_list.append(data['t_xy'])
def convert_lists_to_np(self):
t_y = [y[0] for y in self.t_y_list]
t_y = tf.concat(t_y, axis=1)
t_y_ = [y[1] for y in self.t_y_list]
t_y_ = tf.concat(t_y_, axis=1)
t_xy = [xy[0] for xy in self.t_xy_list]
t_xy = tf.concat(t_xy, axis=1)
t_xy_ = [xy[1] for xy in self.t_xy_list]
t_xy_ = tf.concat(t_xy_, axis=1)
return t_y, t_y_, t_xy, t_xy_
def save(self, name=None, save_dv=False):
save_dict = {}
if save_dv:
t_y, t_y_, t_xy, t_xy_ = self.convert_lists_to_np()
save_dict["t_y"] = t_y.numpy()
save_dict["t_xy"] = t_xy.numpy()
file_name = name if name is not None else 'raw_data_latest.mat'
savemat(os.path.join(self.config.tensor_board_dir, 'visual',
file_name), save_dict)
def histogram(self, x):
return
class DINE_NDT_vis(DVVisualizer):
def __init__(self, config):
super().__init__(config)
self.x_list = list()
self.y_list = list()
def reset_state(self):
super().reset_state()
self.x_list = list()
self.y_list = list()
def update_state(self, data):
super().update_state(data)
self.x_list.append(data['x'])
self.y_list.append(['y'])
def convert_lists_to_np(self):
t_y, t_y_, t_xy, t_xy_ = super().convert_lists_to_np()
x_n = [x for x in self.x_list]
x_np = tf.concat(x_n, axis=1)
y_n = [y for y in self.y_list]
y_np = tf.concat(y_n, axis=1)
return t_y, t_y_, t_xy, t_xy_, x_np, y_np
def save(self, models=None, path=None, name=None, save_dv=False):
save_dict = {}
if save_dv:
t_y, t_y_, t_xy, t_xy_, x, y = self.convert_lists_to_np()
save_dict["t_y"] = t_y.numpy()
save_dict["t_xy"] = t_xy.numpy()
save_dict["x"] = x.numpy()
save_dict["y"] = y.numpy()
file_name = name if name is not None else 'raw_data_latest.mat'
savemat(os.path.join(self.config.tensor_board_dir, 'visual',
file_name), save_dict)
self.save_models(models, path)
def save_models(self, models, path):
def save_recursively(models, path):
for model in models:
if isinstance(models[model], dict):
save_recursively(models[model], path)
else:
path = os.path.join(path, model, model)
# if model == 'ndt':
# models[model].save(filepath=os.path.join(path, "enc_model"))
# # models[model].save_weights(filepath=os.path.join(path, model + "weights_h5.h5"),save_format="h5")
models[model].save_weights(filepath=os.path.join(path, model, "weights_tf", "weights"),
save_format="tf")
save_recursively(models, path)
class MINE_vis(Visualizer):
def __init__(self, config):
# Class for saving DV potentials values
super().__init__(config)
self.t_list = list()
self.config = config
def reset_state(self):
self.t_list = list()
def update_state(self, data):
self.t_list.append(data['t'])
def convert_lists_to_np(self):
t = [y[0] for y in self.t_list]
t = tf.concat(t, axis=1)
t_ = [y[1] for y in self.t_list]
t_ = tf.concat(t_, axis=1)
return t, t_
def save(self, models=None, path=None, name=None, save_dv=False):
save_dict = {}
if save_dv:
t, t_ = self.convert_lists_to_np()
save_dict["t"] = t.numpy()
save_dict["t_"] = t_.numpy()
file_name = name if name is not None else 'raw_data_latest.mat'
savemat(os.path.join(self.config.tensor_board_dir, 'visual',
file_name), save_dict)
self.save_models(models, path)
def histogram(self, x):
return
def save_models(self, models, path):
def save_recursively(models, path):
for model in models:
if isinstance(models[model], dict):
save_recursively(models[model], path)
else:
path = os.path.join(path, model, model)
# if model == 'ndt':
# models[model].save(filepath=os.path.join(path, "enc_model"))
# # models[model].save_weights(filepath=os.path.join(path, model + "weights_h5.h5"),save_format="h5")
models[model].save_weights(filepath=os.path.join(path, model, "weights_tf", "weights"),
save_format="tf")
save_recursively(models, path)
def evaluate_ndt(self, ndt_model, path, epoch):
self.evaluate_ndt_struct(ndt_model, path, epoch)
self.evaluate_ndt_hist(ndt_model, path, epoch)
def evaluate_ndt_struct(self, ndt_model, path, epoch):
# obtain model input and output (for uniform p)
p = tf.expand_dims(tf.linspace(start=0., stop=1., num=self.config.batch_size), axis=-1)
x = ndt_model(p, training=False)
theo = norm.ppf(p)
# convert to numpy
xn, pn = x[0].numpy(), p.numpy()
data = {"p": pn,
"x": xn,
"theo": theo}
savemat(os.path.join(path, f"NDT_struct_data_epoch_{epoch}"), data)
# plot the mapping:
plt.figure()
plt.plot(pn, xn, 'bo', label="NDT")
plt.plot(pn, theo, label="Theoretical")
plt.legend()
plt.title("NDT mapping vs. Gaussian inverse")
plt.savefig(os.path.join(path, f"NDT structure for epoch {epoch}"))
# save p and x
def evaluate_ndt_hist(self, ndt_model,path, epoch):
ul = []
xl = []
for i in range(self.config.repeat_uniform):
ul.append(tf.random.uniform(shape=[self.config.batch_size, self.config.x_dim]))
xl.append(ndt_model(ul[i]))
u = tf.concat(ul, axis=0)
x = tf.concat(xl, axis=0)
un, xn = tf.squeeze(x[0]).numpy(), tf.squeeze(u).numpy()
data = {"u": un,
"x": xn}
savemat(os.path.join(path, f"NDT_hist_data_epoch_{epoch}"), data)
fig, axs = plt.subplots(2)
axs[0].set_title('Input Histogram')
axs[0].hist(un, bins=35)
axs[1].set_title('Output Histogram')
axs[1].hist(xn, bins=35)
plt.savefig(os.path.join(path, f"NDT mapping for epoch {epoch}"))
fig, axs = plt.subplots(2)
axs[0].set_title('Input Histogram')
axs[0].hist(un, density=True, bins=35)
axs[1].set_title('Output Histogram')
axs[1].hist(xn, density=True, bins=35)
plt.savefig(os.path.join(path, f"NDT mapping for epoch {epoch} with density"))
###################################
####### HISTOGRAM OBJECTS #########
###################################
class Figure(object):
def __init__(self, name='fig', **kwargs):
self.name = name
self.fig_data = list()
def reset_states(self):
self.fig_data = list()
def set_data(self, *args, **kwargs):
pass
def aggregate_data(self):
if isinstance(self.fig_data, list):
return np.concatenate(self.fig_data, axis=0)
else:
return self.fig_data
def update_state(self, data):
self.fig_data.append(data)
def plot(self, save=None):
pass
class Histogram2d(Figure):
def __init__(self, name, **kwargs):
super(Histogram2d, self).__init__(name, **kwargs)
def aggregate_data(self):
try:
data = np.concatenate(self.fig_data, axis=1)
except ValueError:
return None
return data
# return np.reshape(data, [-1, data.shape[-1]]) # - ziv's line
def plot(self, save=None, save_path="./visual", save_name="fig.png"):
data = self.aggregate_data()
if data is None:
logger.info("no data aggregated at visualizer")
return
plt.figure()
data_hist = np.reshape(data, newshape=[np.prod(data.shape[:-1]),data.shape[-1]])
d = plt.hist2d(data_hist[100:, 0], data_hist[100:, 1], bins=50)
plt.title(self.name)
bins = d[0]
edges = d[1]
if save:
plt.savefig(os.path.join(save_path, save_name))
savemat(os.path.join(save_path, self.name + '_raw_data.mat'),
{"bins": bins,
"edges": edges,
"data": data})
plt.close()
class Histogram(Figure):
def __init__(self, name, **kwargs):
super(Histogram, self).__init__(name, **kwargs)
def aggregate_data(self):
try:
data = np.concatenate(self.fig_data, axis=0)
except ValueError:
return None
return np.reshape(data, [-1, data.shape[-1]])
def plot(self, save=None, save_path="./", save_name="fig.png"):
data = self.aggregate_data()
if data is None:
logger.info("no data aggregated at visualizer")
return
plt.figure()
d = plt.hist(data[100:], bins=np.linspace(np.min(data), np.max(data), 200))
plt.title(self.name)
bins = d[0]
edges = d[1]
if save:
plt.savefig(os.path.join(save_path, save_name))
savemat(os.path.join(save_path, self.name + '_raw_data.mat'),
{"bins": bins,
"edges": edges})
plt.close()
| 9,193 | 40 | 1,232 |
01663bf4078a66ec2427bcc5e0c3d8ce2b545a84 | 627 | py | Python | src/igvfd/tests/fixtures/schemas/phenotype_term.py | IGVF-DACC/igvfd | 432d711a3a245182fc797eef21580158c1a713e6 | [
"MIT"
] | 1 | 2022-01-20T23:10:34.000Z | 2022-01-20T23:10:34.000Z | src/igvfd/tests/fixtures/schemas/phenotype_term.py | IGVF-DACC/igvfd | 432d711a3a245182fc797eef21580158c1a713e6 | [
"MIT"
] | 8 | 2022-02-24T00:34:29.000Z | 2022-03-30T01:02:47.000Z | src/igvfd/tests/fixtures/schemas/phenotype_term.py | IGVF-DACC/igvfd | 432d711a3a245182fc797eef21580158c1a713e6 | [
"MIT"
] | null | null | null | import pytest
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 22.392857 | 83 | 0.653907 | import pytest
@pytest.fixture
def phenotype_term_alzheimers(testapp):
item = {
'term_id': 'DOID:10652',
'term_name': 'Alzheimer\'s disease'
}
return testapp.post_json('/phenotype_term', item, status=201).json['@graph'][0]
@pytest.fixture
def phenotype_term_myocardial_infarction(testapp):
item = {
'term_id': 'HP:0001658',
'term_name': 'Myocardial infarction'
}
return testapp.post_json('/phenotype_term', item, status=201).json['@graph'][0]
@pytest.fixture
def phenotype_term_incomplete(testapp):
item = {
'term_id': 'DOID:10652'
}
return item
| 493 | 0 | 66 |
8c42c5b8b22563c12121d93d45b7c9495d732cda | 2,013 | py | Python | thundra/wrappers/aws_lambda/lambda_application_info_provider.py | sturmianseq/thundra-agent-python | 4cee02d790eb7b8e4dea4e2e9dcd1f67533b1c56 | [
"Apache-2.0"
] | 22 | 2018-03-05T20:02:46.000Z | 2021-04-09T12:00:18.000Z | thundra/wrappers/aws_lambda/lambda_application_info_provider.py | sturmianseq/thundra-agent-python | 4cee02d790eb7b8e4dea4e2e9dcd1f67533b1c56 | [
"Apache-2.0"
] | 13 | 2018-03-26T07:57:57.000Z | 2021-06-29T14:22:52.000Z | thundra/wrappers/aws_lambda/lambda_application_info_provider.py | thundra-io/thundra-agent-python | 448e18c17d8730c381b2e2a773782cf80c5a7cfb | [
"Apache-2.0"
] | 3 | 2021-08-07T14:19:23.000Z | 2021-12-08T15:35:40.000Z | import uuid
from thundra import constants, utils
from thundra.application.application_info_provider import ApplicationInfoProvider
| 41.9375 | 112 | 0.718828 | import uuid
from thundra import constants, utils
from thundra.application.application_info_provider import ApplicationInfoProvider
class LambdaApplicationInfoProvider(ApplicationInfoProvider):
def __init__(self):
log_stream_name = utils.get_env_variable(constants.AWS_LAMBDA_LOG_STREAM_NAME)
function_version = utils.get_env_variable(constants.AWS_LAMBDA_FUNCTION_VERSION)
function_name = utils.get_env_variable(constants.AWS_LAMBDA_FUNCTION_NAME)
region = utils.get_env_variable(constants.AWS_REGION, default='')
application_instance_id = str(uuid.uuid4())
if log_stream_name and len(log_stream_name.split(']')) >= 2:
application_instance_id = log_stream_name.split(']')[1]
self.application_info = {
'applicationId': '',
'applicationInstanceId': application_instance_id,
'applicationName': function_name,
'applicationVersion': function_version,
'applicationRegion': region
}
def get_application_info(self):
return self.application_info
def get_application_tags(self):
return self.application_info.get('applicationTags', {}).copy()
@staticmethod
def get_application_id(context, application_name=None):
arn = getattr(context, constants.CONTEXT_INVOKED_FUNCTION_ARN, '')
region = utils.get_aws_region_from_arn(arn)
if not region:
region = 'local'
account_no = 'sam_local' if utils.sam_local_debugging() else utils.get_aws_account_no(arn)
function_name = application_name if application_name else utils.get_aws_function_name(arn)
application_id_template = 'aws:lambda:{region}:{account_no}:{function_name}'
return application_id_template.format(region=region, account_no=account_no, function_name=function_name)
def update(self, opts):
filtered_opts = {k: v for k, v in opts.items() if v is not None}
self.application_info.update(filtered_opts)
| 1,664 | 193 | 23 |
64bb519d49f3762d4ca019caf415129bc32a8609 | 1,539 | py | Python | src/spaceone/inventory/info/resource_group_info.py | xellos00/inventory | e2831f2f09b5b72623f735a186264987d41954ab | [
"Apache-2.0"
] | 9 | 2020-06-04T23:01:38.000Z | 2021-06-03T03:38:59.000Z | src/spaceone/inventory/info/resource_group_info.py | xellos00/inventory | e2831f2f09b5b72623f735a186264987d41954ab | [
"Apache-2.0"
] | 10 | 2020-08-20T01:34:30.000Z | 2022-03-14T04:59:48.000Z | src/spaceone/inventory/info/resource_group_info.py | xellos00/inventory | e2831f2f09b5b72623f735a186264987d41954ab | [
"Apache-2.0"
] | 9 | 2020-06-08T22:03:02.000Z | 2021-12-06T06:12:30.000Z | import functools
import logging
from spaceone.api.inventory.v1 import resource_group_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.core import utils
from spaceone.inventory.model.resource_group_model import ResourceGroup, Resource
__all__ = ['ResourceGroupInfo', 'ResourceGroupsInfo']
_LOGGER = logging.getLogger(__name__)
| 34.2 | 113 | 0.665367 | import functools
import logging
from spaceone.api.inventory.v1 import resource_group_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.core import utils
from spaceone.inventory.model.resource_group_model import ResourceGroup, Resource
__all__ = ['ResourceGroupInfo', 'ResourceGroupsInfo']
_LOGGER = logging.getLogger(__name__)
def ResourceInfo(resource: Resource):
info = {
'resource_type': resource.resource_type,
'filter': change_list_value_type(resource.filter),
'keyword': resource.keyword
}
return resource_group_pb2.Resource(**info)
def ResourceGroupInfo(rg_vo: ResourceGroup, minimal=False):
info = {
'resource_group_id': rg_vo.resource_group_id,
'name': rg_vo.name,
'project_id': rg_vo.project_id
}
if not minimal:
info.update({
'resources': list(map(ResourceInfo, rg_vo.resources)),
'options': change_struct_type(rg_vo.options),
'tags': change_struct_type(utils.tags_to_dict(rg_vo.tags)),
'domain_id': rg_vo.domain_id,
'created_at': utils.datetime_to_iso8601(rg_vo.created_at),
})
return resource_group_pb2.ResourceGroupInfo(**info)
def ResourceGroupsInfo(rg_vos, total_count, **kwargs):
return resource_group_pb2.ResourceGroupsInfo(results=list(map(functools.partial(ResourceGroupInfo, **kwargs),
rg_vos)),
total_count=total_count)
| 1,122 | 0 | 69 |
6cce9677a9e96b4643f4b6f0a83e95d7bfdc56ba | 6,825 | py | Python | train.py | WANNA959/TrafficPrediction | 33d350f2d2ccbb9481d453d204e8c087aa493887 | [
"MIT"
] | null | null | null | train.py | WANNA959/TrafficPrediction | 33d350f2d2ccbb9481d453d204e8c087aa493887 | [
"MIT"
] | null | null | null | train.py | WANNA959/TrafficPrediction | 33d350f2d2ccbb9481d453d204e8c087aa493887 | [
"MIT"
] | 1 | 2021-01-06T18:28:01.000Z | 2021-01-06T18:28:01.000Z | """
Train the NN model.
"""
import sys
import _thread
import keras
import warnings
import argparse
import numpy as np
import pandas as pd
from data.data import process_data
from model import model
from keras.models import Model
from keras.callbacks import EarlyStopping
from tkinter import ttk, filedialog, dialog
import os
import tkinter
import tkinter.messagebox
warnings.filterwarnings("ignore")
file_path1=""
file_path2=""
modelName = None
def train_model(model, X_train, y_train, name, config,lag,callBack):
"""train
train a single model.
# Arguments
model: Model, NN model to train.
X_train: ndarray(number, lags), Input data for train.
y_train: ndarray(number, ), result data for train.
name: String, name of model.
config: Dict, parameter for train.
"""
model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
# early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
hist = model.fit(
X_train, y_train,
batch_size=config["batch"],
epochs=config["epochs"],
validation_split=0.05,
callbacks=[callBack]
)
model.save('model/' + name + '-' + str(lag) + '.h5')
def train_allDense_model(model, X_train, y_train, name, config,lag,callBack):
"""train
train a single model.
# Arguments
model: Model, NN model to train.
X_train: ndarray(number, lags), Input data for train.
y_train: ndarray(number, ), result data for train.
name: String, name of model.
config: Dict, parameter for train.
"""
model.compile(loss="mse", optimizer="rmsprop",metrics=['mape'])
hist = model.fit(
X_train, y_train,
batch_size=config["batch"],
epochs=config["epochs"],
callbacks = [callBack]
)
model.save('model/' + name + '-' + str(lag) + '.h5')
lagIntStart = 0
lagIntEnd = 0
def open_file_train():
'''
打开文件
:return:
'''
file_path1 = filedialog.askopenfilename(title=u'选择训练集', initialdir=(os.path.expanduser('./data/100211data/100211_all_train.csv')))
fileStr1.set(file_path1)
print('打开文件:', file_path1)
window = tkinter.Tk()
window.title('入口') # 标题
window.geometry('600x400') # 窗口尺寸
if __name__ == '__main__':
runUI()
# main(sys.argv)
| 32.655502 | 134 | 0.651868 | """
Train the NN model.
"""
import sys
import _thread
import keras
import warnings
import argparse
import numpy as np
import pandas as pd
from data.data import process_data
from model import model
from keras.models import Model
from keras.callbacks import EarlyStopping
from tkinter import ttk, filedialog, dialog
import os
import tkinter
import tkinter.messagebox
warnings.filterwarnings("ignore")
file_path1=""
file_path2=""
modelName = None
def train_model(model, X_train, y_train, name, config,lag,callBack):
"""train
train a single model.
# Arguments
model: Model, NN model to train.
X_train: ndarray(number, lags), Input data for train.
y_train: ndarray(number, ), result data for train.
name: String, name of model.
config: Dict, parameter for train.
"""
model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
# early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
hist = model.fit(
X_train, y_train,
batch_size=config["batch"],
epochs=config["epochs"],
validation_split=0.05,
callbacks=[callBack]
)
model.save('model/' + name + '-' + str(lag) + '.h5')
def train_allDense_model(model, X_train, y_train, name, config,lag,callBack):
"""train
train a single model.
# Arguments
model: Model, NN model to train.
X_train: ndarray(number, lags), Input data for train.
y_train: ndarray(number, ), result data for train.
name: String, name of model.
config: Dict, parameter for train.
"""
model.compile(loss="mse", optimizer="rmsprop",metrics=['mape'])
hist = model.fit(
X_train, y_train,
batch_size=config["batch"],
epochs=config["epochs"],
callbacks = [callBack]
)
model.save('model/' + name + '-' + str(lag) + '.h5')
def main(argv):
config = {"batch": 256, "epochs": 600}
file1 = 'data/100211data/100211_all_train.csv'
file2 = 'data/100211data/100211_all_test.csv'
#得到不同lag的lstm model
# for i in range(16,18,2):
# lag = i
# X_train, y_train, _, _, _ = process_data(file1, file2, lag)
# X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# m = model.get_lstm([lag, 64, 64, 1])
# train_model(m, X_train, y_train, "lstm", config,lag)
#得到全连接神经网络训练model(lag=12
lag=12
X_train, y_train, _, _, _ = process_data(file1, file2, lag)
m = model.get_AllDense([lag, 64, 64, 1])
train_allDense_model(m, X_train, y_train , "AllDense" , config , lag)
lagIntStart = 0
lagIntEnd = 0
def start_train():
config = {"batch": 256, "epochs": 10}
file_path1=fileStr1.get()
file_path2='data/100211data/100211_all_test.csv'
if file_path1=="" or file_path2=="":
tkinter.messagebox.askokcancel(title='请选择文件~', message='请选择两个文件')
return
print("start_train")
callBack = keras.callbacks.LambdaCallback(
on_epoch_end=lambda epoch, logs: print("epoch",epoch)
)
needLstm =modelName.get()=="lstm" or modelName.get()=="all"
needAllDense = modelName.get()=="allDense" or modelName.get()=="all"
# _thread.start_new_thread(show_progress,())
if needLstm:
for i in range(lagIntStart.get(),lagIntEnd.get(),2):
lag = i
X_train, y_train, _, _, _ = process_data(file_path1, file_path2, lag)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
m = model.get_lstm([lag, 64, 64, 1])
train_model(m, X_train, y_train, "lstm", config,lag,callBack)
if needAllDense:
for i in range(lagIntStart.get(), lagIntEnd.get(), 2):
lag=i
X_train, y_train, _, _, _ = process_data(file_path1, file_path2, lag)
m = model.get_AllDense([lag, 64, 64, 1])
train_allDense_model(m, X_train, y_train, "AllDense", config, lag,callBack)
tkinter.messagebox.askokcancel(title='ok~', message='训练完成,结果保存在model文件夹下')
return
def open_file_train():
'''
打开文件
:return:
'''
file_path1 = filedialog.askopenfilename(title=u'选择训练集', initialdir=(os.path.expanduser('./data/100211data/100211_all_train.csv')))
fileStr1.set(file_path1)
print('打开文件:', file_path1)
window = tkinter.Tk()
window.title('入口') # 标题
window.geometry('600x400') # 窗口尺寸
def runUI():
global lagIntStart
lagIntStart = tkinter.IntVar()
lagIntStart.set(4)
global lagIntEnd
lagIntEnd = tkinter.IntVar()
lagIntEnd.set(12)
global modelName
modelName = tkinter.StringVar()
frmL1 =tkinter.Frame( width=200, height=100,bg='blue')
# frmL2 =tkinter.Frame(width=200,height=100, bg='white')
frmM1 =tkinter.Frame(width=200, height=10, bg='white')
# frmM2 = tkinter.Frame(width=2000, height=10,bg='yellow')
frmL1.grid(row=0, column=0,padx=1,pady=1)
# frmL2.grid(row=1, column=0)
frmM1.grid(row=0,column=1)
# frmM2.grid(row=1,column=1)
#lag按钮
frm22 =tkinter.Frame()
frm21 =tkinter.Frame()
frm31 =tkinter.Frame()
frm32 =tkinter.Frame()
frm22.grid(row=2,column=1)
frm21.grid(row=2,column=0)
frm31.grid(row=3,column=0)
frm32.grid(row=3,column=1)
tkinter.Label(frm21,text='输入lag start').pack()
tkinter.Entry(frm22, textvariable=lagIntStart,width=40).pack()
tkinter.Label(frm31,text='输入lag end').pack()
tkinter.Entry(frm32, textvariable=lagIntEnd,width=40).pack()
#选择模型下拉框
frm41 = tkinter.Frame()
frm42 = tkinter.Frame()
frm41.grid(row=4,column=0)
frm42.grid(row=4,column=1,)
tkinter.Label(frm41, text='训练方法',).pack()
dropBopx = ttk.Combobox(frm42,width=30,textvariable=modelName,state='readonly')
dropBopx ['value'] = ('all', 'lstm', 'allDense')
dropBopx.pack()
dropBopx.current(0)
# 开始训练按钮
frm51=tkinter.Frame(width=30,height=10)
frm51.grid(row=5,column=0,columnspan=2)
frm52=tkinter.Frame(width=30,height=10)
frm52.grid(row=5,column=1)
# frmLB.grid(row=2,pady=4, column=0)
# frmRT.grid(row=0, column=1, rowspan=3, padx=2, pady=3)
global fileStr1
fileStr1= tkinter.StringVar()
global fileStr2
fileStr2=tkinter.StringVar()
tkinter.Entry(frmM1, textvariable=fileStr1,width=40).pack()
# tkinter.Entry(frmM2, textvariable=fileStr2,width=40).pack()
tkinter.Button(frmL1, text='打开训练集', width=18,bg='orange', command=open_file_train).pack()
# tkinter.Button(frmL2, text='打开测试集', width=18,bg='orange', command=open_file_test).pack()
tkinter.Button(frm51, text='开始训练', width=20, height=2,bg='orange', command=start_train).pack()
# tkinter.Button(frm31,text='开始训练',width=30,height =2,bg='orange',command = open_file_test).pack()
window.mainloop()
if __name__ == '__main__':
runUI()
# main(sys.argv)
| 4,602 | 0 | 68 |
7b3254d16854448bd39eb2e66be0671a02da0391 | 181 | py | Python | alg/ganite/ganite/utils/random.py | DaraOrange/mlforhealthlabpub | 9db861c850c94c6cf1f8bf75ed2ad8dcbd648aa3 | [
"BSD-3-Clause"
] | 171 | 2021-02-12T10:23:19.000Z | 2022-03-29T01:58:52.000Z | alg/ganite/ganite/utils/random.py | DaraOrange/mlforhealthlabpub | 9db861c850c94c6cf1f8bf75ed2ad8dcbd648aa3 | [
"BSD-3-Clause"
] | 4 | 2021-06-01T08:18:33.000Z | 2022-02-20T13:37:30.000Z | alg/ganite/ganite/utils/random.py | DaraOrange/mlforhealthlabpub | 9db861c850c94c6cf1f8bf75ed2ad8dcbd648aa3 | [
"BSD-3-Clause"
] | 93 | 2021-02-10T03:21:59.000Z | 2022-03-30T19:10:37.000Z | # stdlib
import random
# third party
import numpy as np
import torch
| 13.923077 | 42 | 0.712707 | # stdlib
import random
# third party
import numpy as np
import torch
def enable_reproducible_results() -> None:
np.random.seed(0)
torch.manual_seed(0)
random.seed(0)
| 87 | 0 | 23 |
bb5c3f9685695758521bbfa22ed771d05a96abe2 | 925 | py | Python | tests/test_utils.py | awoods/fcrepo-import-export-verify | 40126e69542d039bd52f338ec24bb6975c4939dd | [
"Apache-2.0"
] | 5 | 2017-12-05T17:57:00.000Z | 2018-08-22T18:11:24.000Z | tests/test_utils.py | awoods/fcrepo-import-export-verify | 40126e69542d039bd52f338ec24bb6975c4939dd | [
"Apache-2.0"
] | 31 | 2016-11-09T14:52:16.000Z | 2017-09-07T15:10:53.000Z | tests/test_utils.py | awoods/fcrepo-import-export-verify | 40126e69542d039bd52f338ec24bb6975c4939dd | [
"Apache-2.0"
] | 4 | 2016-11-08T18:54:47.000Z | 2017-05-17T12:47:15.000Z | from fcrepo_verify.utils import get_data_dir, replace_strings_in_file
from fcrepo_verify.constants import BAG_DATA_DIR
import os
import tempfile
config = MockConfig({})
config.dir = "/tmp"
| 23.717949 | 69 | 0.687568 | from fcrepo_verify.utils import get_data_dir, replace_strings_in_file
from fcrepo_verify.constants import BAG_DATA_DIR
import os
import tempfile
class MockConfig(dict):
pass
config = MockConfig({})
config.dir = "/tmp"
def test_get_data_dir():
config.bag = False
data_dir = get_data_dir(config)
assert data_dir == "/tmp"
def test_get_data_dir_for_bag():
config.bag = True
data_dir = get_data_dir(config)
assert data_dir == "/tmp" + BAG_DATA_DIR
def test_replace_strings_in_file():
tmp = tempfile.mkstemp()
filename = tmp[1]
with open(filename, "w") as source:
source.write("test y\n")
source.write("test z")
newfile = replace_strings_in_file(filename, "test", "confirm")
os.remove(filename)
with open(newfile, "r") as dest:
assert dest.readline().startswith("confirm y")
assert dest.readline() == "confirm z"
os.remove(newfile)
| 627 | 11 | 92 |
3bccb8316bd9339d883fc7862dee4e461aeb65f2 | 7,813 | py | Python | phi/vis/_dash/board.py | marc-gav/PhiFlow | b6186fd1503d040997b52d49aa18cd875267c27e | [
"MIT"
] | 556 | 2019-12-04T16:48:54.000Z | 2022-03-31T16:31:59.000Z | phi/vis/_dash/board.py | marc-gav/PhiFlow | b6186fd1503d040997b52d49aa18cd875267c27e | [
"MIT"
] | 26 | 2019-12-12T16:54:06.000Z | 2022-03-14T19:44:36.000Z | phi/vis/_dash/board.py | marc-gav/PhiFlow | b6186fd1503d040997b52d49aa18cd875267c27e | [
"MIT"
] | 93 | 2019-12-08T14:38:27.000Z | 2022-03-29T16:38:37.000Z | import logging
import os
import traceback
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input
from dash.exceptions import PreventUpdate
from plotly import graph_objects
from .dash_app import DashApp
from ._plotly_plots import plot_scalars
from .player_controls import STEP_COUNT, parse_step_count
from .._vis_base import display_name, gui_interrupt, benchmark
BENCHMARK_BUTTON = Input('benchmark-button', 'n_clicks')
PROFILE_BUTTON = Input('profile-button', 'n_clicks')
NO_BENCHMARK_TEXT = '*No benchmarks available.*'
NO_PROFILES_TEXT = '*No profiles available.*'
REFRESH_GRAPHS_BUTTON = Input('refresh-graphs-button', 'n_clicks')
TENSORBOARD_STATUS = Input('tensorboard-status', 'children')
| 42.461957 | 230 | 0.644823 | import logging
import os
import traceback
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input
from dash.exceptions import PreventUpdate
from plotly import graph_objects
from .dash_app import DashApp
from ._plotly_plots import plot_scalars
from .player_controls import STEP_COUNT, parse_step_count
from .._vis_base import display_name, gui_interrupt, benchmark
BENCHMARK_BUTTON = Input('benchmark-button', 'n_clicks')
PROFILE_BUTTON = Input('profile-button', 'n_clicks')
NO_BENCHMARK_TEXT = '*No benchmarks available.*'
NO_PROFILES_TEXT = '*No profiles available.*'
REFRESH_GRAPHS_BUTTON = Input('refresh-graphs-button', 'n_clicks')
def build_benchmark(dashapp):
assert isinstance(dashapp, DashApp)
layout = html.Div([
dcc.Markdown('## Benchmark'),
html.Div([
html.Button('Benchmark', id=BENCHMARK_BUTTON.component_id)
]),
dcc.Markdown(children=NO_BENCHMARK_TEXT, id='run-statistics'),
])
@dashapp.dash.callback(Output('run-statistics', 'children'), [BENCHMARK_BUTTON], [STEP_COUNT])
def run_benchmark(n_clicks, step_count):
step_count = parse_step_count(step_count, dashapp, default=1)
if n_clicks is None:
return NO_BENCHMARK_TEXT
if dashapp.play_status:
return '*Pause the vis before starting a benchmark.*'
# --- Run benchmark ---
step_count, time_elapsed = benchmark(dashapp.model, step_count)
output = '### Benchmark Results\n'
if step_count != step_count:
output += 'The benchmark was stopped prematurely. \n'
output += 'Finished %d steps in %.03f seconds.' % (step_count, time_elapsed)
output += ' \n*Average*: %.04f seconds per step, %.02f steps per second.' % (
time_elapsed / step_count, step_count / time_elapsed)
return output
return layout
def build_tf_profiler(dashapp):
assert isinstance(dashapp, DashApp)
layout = html.Div([
dcc.Markdown('## TensorFlow Profiler'),
html.Div([
html.Button('Profile', id=PROFILE_BUTTON.component_id)
]),
dcc.Markdown(children=NO_PROFILES_TEXT, id='profile-output'),
])
@dashapp.dash.callback(Output('profile-output', 'children'), [PROFILE_BUTTON], [STEP_COUNT])
def run_benchmark(n_clicks, step_count):
step_count = parse_step_count(step_count, dashapp, default=1)
if n_clicks is None:
return NO_PROFILES_TEXT
if dashapp.play_status:
return '*Pause the vis before starting a profiled run.*'
# --- Profile ---
with dashapp.model.session.profiler() as profiler:
timeline_file = profiler.timeline_file
step_count, time_elapsed = dashapp.model.benchmark(step_count)
output = '### Profiling Results\n'
if step_count != step_count:
output += 'The profiling run was stopped prematurely. \n'
output += 'Finished %d steps in %.03f seconds.' % (step_count, time_elapsed)
output += ' \n*Average*: %.04f seconds per step, %.02f steps per second.' % (time_elapsed / step_count, step_count / time_elapsed)
output += ' \nProfile saved. Open \n*chrome://tracing/* \n and load file \n *%s*' % timeline_file
return output
return layout
TENSORBOARD_STATUS = Input('tensorboard-status', 'children')
def build_tensorboard_launcher(dashapp):
assert isinstance(dashapp, DashApp)
layout = html.Div([
html.Div(id='tensorboard-div'),
dcc.Interval(id='tensorboard-init', interval=200, max_intervals=1),
html.Div(style={'display': 'none'}, id=TENSORBOARD_STATUS.component_id),
])
@dashapp.dash.callback(Output('tensorboard-div', 'children'), [Input('tensorboard-init', 'n_intervals'), TENSORBOARD_STATUS])
def update(*_):
if 'tensorboard_url' in dashapp.config:
return html.A('TensorBoard', href=dashapp.config['tensorboard_url'], id='tensorboard-href')
else:
return html.Button('Launch TensorBoard', id='launch-tensorboard')
@dashapp.dash.callback(Output(TENSORBOARD_STATUS.component_id, TENSORBOARD_STATUS.component_property), [Input('launch-tensorboard', 'n_clicks')])
def launch_tensorboard(clicks):
if clicks:
logging.info('Launching TensorBoard...')
logdir = dashapp.model.session.summary_directory
import phi.tf._profiling as profiling
url = profiling.launch_tensorboard(logdir, port=dashapp.config.get('tensorboard_port', None))
dashapp.config['tensorboard_url'] = url
logging.info('TensorBoard launched, URL: %s' % url)
return 'running'
else:
raise PreventUpdate()
return layout
def build_system_controls(dashapp):
assert isinstance(dashapp, DashApp)
layout = html.Div([
dcc.Markdown('## Application'),
html.Button('Exit / Interrupt', id='exit-button'),
html.Button('Kill', id='kill-button'),
])
@dashapp.dash.callback(Output('kill-button', 'style'), [Input('kill-button', 'n_clicks')])
def exit_application(n):
if n:
logging.info('DashGUI: Killing process...')
os._exit(0) # exit() does not work from Dash threads
@dashapp.dash.callback(Output('exit-button', 'style'), [Input('exit-button', 'n_clicks')])
def exit_application(n):
if n:
dashapp.exit_interrupt()
return layout
def build_graph_view(dashapp):
layout = html.Div(style={'width': '90%', 'margin-left': 'auto', 'margin-right': 'auto'}, children=[
html.H2("Graphs"),
html.Div([
html.Button('Refresh now', id=REFRESH_GRAPHS_BUTTON.component_id),
dcc.Checklist(id='auto-refresh-checkbox', options=[{'label': 'Auto-refresh', 'value': 'refresh'}], value=['refresh'], style={'display': 'inline-block'}),
dcc.Checklist(id='subplots-checkbox', options=[{'label': 'Subplots', 'value': 'subplots'}], value=[], style={'display': 'inline-block'}),
html.Div(style={'display': 'inline-block', 'width': '200px'}, children=[
dcc.Slider(id='smooth-slider', min=1, max=10, marks={1: 'Off', 5: '25 steps', 10: '100'}),
]),
dcc.Checklist(id='log-graph-checkbox', options=[{'label': 'Log(x)', 'value': 'x'}, {'label': 'Log(y)', 'value': 'y'}], value=[], style={'display': 'inline-block'}),
]),
dcc.Interval(id='graph-update', interval=5000, disabled=False),
html.Div(id='graph-figure-container', style={'height': 600, 'width': '100%'}, children=[
dcc.Graph(figure={}, id='board-graph', style={'height': '100%'})
])
])
@dashapp.dash.callback(Output('board-graph', 'figure'), [Input('subplots-checkbox', 'value'), Input('smooth-slider', 'value'), Input('log-graph-checkbox', 'value'), REFRESH_GRAPHS_BUTTON, Input('graph-update', 'n_intervals')])
def update_figure(subplots, smooth, log_scale, _n1, _n2):
curves = [dashapp.model.get_curve(n) for n in dashapp.model.curve_names]
labels = [display_name(n) for n in dashapp.model.curve_names]
try:
figure = plot_scalars(curves, labels, subplots=bool(subplots), log_scale=log_scale, smooth=(smooth or 1) ** 2)
return figure
except BaseException as err:
traceback.print_exc()
fig = graph_objects.Figure()
fig.update_layout(title_text=repr(err))
return fig
@dashapp.dash.callback(Output('graph-update', 'disabled'), [Input('auto-refresh-checkbox', 'value')])
def enable_auto_refresh(selected):
if selected:
return False
else:
return True
return layout | 6,931 | 0 | 115 |
0d2754b160457013efda332fcce1032bc1173de1 | 3,570 | py | Python | bootstrap.py | Jselvam/Unique-files-generator | d4a5a58d89e3fd121b75e2b928c3aea81ed123b3 | [
"MIT"
] | null | null | null | bootstrap.py | Jselvam/Unique-files-generator | d4a5a58d89e3fd121b75e2b928c3aea81ed123b3 | [
"MIT"
] | null | null | null | bootstrap.py | Jselvam/Unique-files-generator | d4a5a58d89e3fd121b75e2b928c3aea81ed123b3 | [
"MIT"
] | null | null | null | from flask import Flask
from filesbuilder import FilesBuilder
from inputoutput import IO
#writeExcelFile
if __name__ == '__main__':
App = Bootstrap()
App.run() | 42 | 109 | 0.606723 | from flask import Flask
from filesbuilder import FilesBuilder
from inputoutput import IO
class Bootstrap:
def __init__(self, request, response):
self.request = request
self.response = response
self.response['logs_messages'].append('File builder initiated...\n')
self.file_builder = FilesBuilder(request)
self.io_object = IO(self.request)
self.response['logs_messages'].append('Old files cleaned\n')
self.io_object.cleanUpOldData()
self.io_object.createFolders()
def getPath(self, subfolder=False):
return self.io_object.getFolderPath(subfolder)
def run(self, path):
if 'text' in self.request['file_type']:
self.response['logs_messages'].append('Building text file...\n')
size = 1000000
if 'KB' in self.request['file_size']:
size=500
file_name, content = self.file_builder.buildTextFile(length=size)
try:
self.io_object.writeTextFile(file_name=file_name, path=path, content=content)
self.response['logs_messages'].append('Success!: Text file created\n')
except:
self.response['logs_messages'].append('Error: while writing text file\n')
if 'pdf' in self.request['file_type']:
self.response['logs_messages'].append('Building text file...\n')
size = 1000000
if 'KB' in self.request['file_size']:
size=500
file_name, content = self.file_builder.buildPdfFile(length=size)
try:
self.io_object.writePdfFile(file_name=file_name, path=path, content=content)
self.response['logs_messages'].append('Success!: PDF file created\n')
except:
self.response['logs_messages'].append('Error: while writing pdf file\n')
if 'xlsx' in self.request['file_type']:
self.response['logs_messages'].append('Building text file...\n')
size = 1000000
if 'KB' in self.request['file_size']:
size=500
file_name, content = self.file_builder.buildXlsFile(length=size)
try:
self.io_object.writeExcelFile(file_name=file_name, path=path, content=content)
self.response['logs_messages'].append('Success!: PDF file created\n')
except:
self.response['logs_messages'].append('Error: while writing pdf file\n')
if 'image' in self.request['file_type']:
self.response['logs_messages'].append('Building image files ..\n')
width = 1920
height = 1080
if 'KB' in self.request['file_size']:
width=400
height=400
png_file_name, jpg_file_name = self.file_builder.buildImageFile()
try:
self.io_object.writeImageFile(file_name=png_file_name, width=width, height=height, path=path)
self.response['logs_messages'].append('Success!: PNG file created.\n')
except:
self.response['logs_messages'].append('Error: while creating PNG\n')
try:
self.io_object.writeImageFile(file_name=jpg_file_name, width=width, height=height, path=path)
self.response['logs_messages'].append('Success! JPG file created\n')
except:
self.response['logs_messages'].append('Error: while creating JPG\n')
#writeExcelFile
if __name__ == '__main__':
App = Bootstrap()
App.run() | 3,300 | -5 | 104 |
3ec370bafd4b644cefa70981a0e9399f121e7a3a | 6,874 | py | Python | tests/test_emfetch.py | axonchisel/ax_metrics | a2db75c9ef9a9752997ccb112e8db68c1c8584a0 | [
"MIT"
] | 10 | 2016-08-26T18:57:28.000Z | 2021-09-19T19:21:16.000Z | tests/test_emfetch.py | axonchisel/ax_metrics | a2db75c9ef9a9752997ccb112e8db68c1c8584a0 | [
"MIT"
] | 1 | 2015-01-08T19:54:54.000Z | 2015-01-09T01:24:17.000Z | tests/test_emfetch.py | axonchisel/ax_metrics | a2db75c9ef9a9752997ccb112e8db68c1c8584a0 | [
"MIT"
] | 3 | 2015-01-08T23:32:58.000Z | 2016-09-23T02:38:26.000Z | """
Ax_Metrics - Test io.emfetch package
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
import pytest
import axonchisel.metrics.foundation.chrono.timerange as timerange
from axonchisel.metrics.io.emfetch.interface import EMFetcher
from axonchisel.metrics.io.emfetch.base import EMFetcherBase
import axonchisel.metrics.io.emfetch.plugins.emf_random as emf_random
from axonchisel.metrics.io.emfetch.tmrange_time_t import TimeRange_time_t
# ----------------------------------------------------------------------------
class TestEMFetcher(object):
"""
Test general EMFetcher.
"""
#
# Setup / Teardown
#
#
# Tests
#
| 39.056818 | 94 | 0.610852 | """
Ax_Metrics - Test io.emfetch package
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
import pytest
import axonchisel.metrics.foundation.chrono.timerange as timerange
from axonchisel.metrics.io.emfetch.interface import EMFetcher
from axonchisel.metrics.io.emfetch.base import EMFetcherBase
import axonchisel.metrics.io.emfetch.plugins.emf_random as emf_random
from axonchisel.metrics.io.emfetch.tmrange_time_t import TimeRange_time_t
# ----------------------------------------------------------------------------
class TestEMFetcher(object):
"""
Test general EMFetcher.
"""
#
# Setup / Teardown
#
def setup_method(self, method):
self.extinfo = {'a': 65, 'b': "LilB", 'special': { 'q': 34, 'z': 35 } }
#
# Tests
#
def test_base_not_impl(self, mdefs, tmranges):
with pytest.raises(NotImplementedError):
absbase = EMFetcher(mdefs[1])
class FakeBase(EMFetcher):
def __init__(self, mdef, extinfo=None):
pass
absbase = FakeBase(mdefs[1])
with pytest.raises(NotImplementedError):
absbase.plugin_create()
with pytest.raises(NotImplementedError):
absbase.plugin_destroy()
with pytest.raises(NotImplementedError):
absbase.plugin_fetch(tmranges[1])
def test_bad_metricdef(self, mdefs):
with pytest.raises(TypeError):
emf_random.EMFetcher_random('Not MetricDef')
mdefs[1].emfetch_id = ''
with pytest.raises(ValueError):
emf_random.EMFetcher_random(mdefs[1])
def test_bad_timerange(self, mdefs):
emf = emf_random.EMFetcher_random(mdefs[1])
with pytest.raises(TypeError):
emf.fetch('Not TimeRange')
tmrange3 = timerange.TimeRange()
with pytest.raises(ValueError):
emf.fetch(tmrange3)
def test_random(self, mdefs, tmranges):
emf = emf_random.EMFetcher_random(mdefs[1])
emf.plugin_create()
for x in range(100):
dpoint1 = emf.fetch(tmranges[1])
assert (0 <= dpoint1.value <= 100)
emf.configure(options={'random': {'round': True}})
assert isinstance(emf.fetch(tmranges[1]).value, (int, long))
emf.plugin_destroy()
def test_bad_datapoint(self, mdefs, tmranges):
class EMFetcher_bad_datapoint(EMFetcherBase):
def plugin_create(self): pass
def plugin_destroy(self): pass
def plugin_fetch(self, tmrange): return 'Not DataPoint'
emf = EMFetcher_bad_datapoint(mdefs[1])
with pytest.raises(TypeError):
emf.fetch(tmranges[1])
def test_plugin_option(self, mdefs):
emf = emf_random.EMFetcher_random(mdefs[1])
assert emf.plugin_option('foo') == 123
assert emf.plugin_option('bar.zig') == "Zoom"
with pytest.raises(KeyError):
emf.plugin_option('BOGUS')
assert emf.plugin_option('BOGUS', default="D") == "D"
def test_plugin_extinfo(self, mdefs):
emf = emf_random.EMFetcher_random(mdefs[1], extinfo=self.extinfo)
assert emf.plugin_extinfo('a') == 65
assert emf.plugin_extinfo('b') == "LilB"
with pytest.raises(KeyError):
emf.plugin_extinfo('BOGUS')
assert emf.plugin_extinfo('BOGUS', default="D") == "D"
def test_format_str(self, mdefs, tmranges):
emf = emf_random.EMFetcher_random(mdefs[1])
emf.fetch(tmranges[2]) # (causes plugin to set its self._tmrange)
assert emf._format_str("plain") == "plain"
assert emf._format_str("My {mdef.table} here") == "My tblname here"
assert emf._format_str("{tmrange.inc_begin:%Y-%m-%d}") == "2014-04-14"
assert emf._format_str("{tmrange.exc_end:%Y-%m-%d %H:%M:%S}") == "2014-04-15 16:42:45"
assert emf._format_str("{tmrange.exc_end:%s}") == "1397605365"
def test_format_param_str(self, mdefs, tmranges):
emf = emf_random.EMFetcher_random(mdefs[1], extinfo=self.extinfo)
emf.fetch(tmranges[2]) # (causes plugin to set its self._tmrange)
assert emf._format_str("plain") == "plain"
assert emf._format_str("My {mdef.table} here") == "My tblname here"
assert emf._format_str("{tmrange.inc_begin:%Y-%m-%d}") == "2014-04-14"
assert emf._format_str("{tmrange.exc_end:%Y-%m-%d %H:%M:%S}") == "2014-04-15 16:42:45"
assert emf._format_str("{tmrange.exc_end:%s}") == "1397605365"
def test_format_param_literal(self, mdefs, tmranges):
emf = emf_random.EMFetcher_random(mdefs[1], extinfo=self.extinfo)
emf.fetch(tmranges[2]) # (causes plugin to set its self._tmrange)
assert emf._format_str('plain') == "plain"
assert emf._format_str("My {{mdef.table}} here") == "My {mdef.table} here"
def test_format_param_extinfo(self, mdefs, tmranges):
emf = emf_random.EMFetcher_random(mdefs[1], extinfo=self.extinfo)
emf.fetch(tmranges[2]) # (causes plugin to set its self._tmrange)
assert emf._format_str('{extinfo[a]}') == "65"
assert emf._format_str('{extinfo[special][q]}') == "34"
with pytest.raises(KeyError):
emf._format_str('{extinfo[special][BOGUS]}')
def test_format_param_bad(self, mdefs, tmranges):
emf = emf_random.EMFetcher_random(mdefs[1], extinfo=self.extinfo)
emf.fetch(tmranges[2]) # (causes plugin to set its self._tmrange)
with pytest.raises(TypeError):
emf._format_str(12345)
with pytest.raises(TypeError):
emf._format_str(None)
with pytest.raises(KeyError):
emf._format_str('{BOGUS}')
def test_format_params(self, mdefs, tmranges):
emf = emf_random.EMFetcher_random(mdefs[1], extinfo=self.extinfo)
emf.fetch(tmranges[2]) # (causes plugin to set its self._tmrange)
params_spec = {
'plain': "plainval",
'mdef': "My {mdef.table} here",
'lit': "{{mdef.table}}",
'ext': '{extinfo[special][q]}',
}
params = dict()
for k, v in params_spec.iteritems():
params[k] = emf._format_str(v)
assert params['plain'] == "plainval"
assert params['mdef'] == "My tblname here"
assert params['lit'] == "{mdef.table}"
assert params['ext'] == "34"
def test_tmrange_time_t(self, mdefs, tmranges):
tmrange = TimeRange_time_t(tmranges[2])
assert tmrange.inc_begin_time_t == 1397518965
assert tmrange.exc_begin_time_t == 1397518964
assert tmrange.inc_end_time_t == 1397605364
assert tmrange.exc_end_time_t == 1397605365
| 5,596 | 0 | 405 |
6b2eda2f899d72071839a00f76cb956aa9624cbe | 269 | py | Python | ex28.py | FernandaMakiHirose/programas-jupyter | 40ebfc820fefceb14293715104641ef184acfff4 | [
"MIT"
] | null | null | null | ex28.py | FernandaMakiHirose/programas-jupyter | 40ebfc820fefceb14293715104641ef184acfff4 | [
"MIT"
] | null | null | null | ex28.py | FernandaMakiHirose/programas-jupyter | 40ebfc820fefceb14293715104641ef184acfff4 | [
"MIT"
] | 1 | 2021-06-09T22:33:11.000Z | 2021-06-09T22:33:11.000Z | # Importando apenas uma funcionalidade da biblioteca - peça a raiz quadrada de um número e arredonde ele para cima.
from math import sqrt, ceil
n = float(input('Digite um número para ver a sua raiz quadrada: '))
print('A raiz quadrada de {} é {}' .format(n, sqrt(n)))
| 44.833333 | 115 | 0.728625 | # Importando apenas uma funcionalidade da biblioteca - peça a raiz quadrada de um número e arredonde ele para cima.
from math import sqrt, ceil
n = float(input('Digite um número para ver a sua raiz quadrada: '))
print('A raiz quadrada de {} é {}' .format(n, sqrt(n)))
| 0 | 0 | 0 |
4edcff1838d61f9aaf382ed08fc5c25c6cbf4f93 | 1,154 | py | Python | tests/test_string_operations.py | nathfroech/flake8_pylint_comparison | 1f6d5063b3055687e880b5b436346ce4b5ae95da | [
"MIT"
] | null | null | null | tests/test_string_operations.py | nathfroech/flake8_pylint_comparison | 1f6d5063b3055687e880b5b436346ce4b5ae95da | [
"MIT"
] | null | null | null | tests/test_string_operations.py | nathfroech/flake8_pylint_comparison | 1f6d5063b3055687e880b5b436346ce4b5ae95da | [
"MIT"
] | null | null | null | import pytest
from hamcrest import assert_that, contains_inanyorder
from tests.testing_utils import param_wrapper, run_flake8, run_pylint
strip_params = [
# code, flake8 rules, pylint rules
param_wrapper("s.strip('abca')", {'B005'}, set(), id='strip_string'),
param_wrapper(r"s.strip(r'\n\t ')", {'B005'}, set(), id='strip_raw_string'),
param_wrapper("s.lstrip('abca')", {'B005'}, set(), id='lstrip_string'),
param_wrapper(r"s.lstrip(r'\n\t ')", {'B005'}, set(), id='lstrip_raw_string'),
param_wrapper("s.rstrip('abca')", {'B005'}, set(), id='rstrip_string'),
param_wrapper(r"s.rstrip(r'\n\t ')", {'B005'}, set(), id='rstrip_raw_string'),
]
@pytest.mark.parametrize('content,flake8_errors,pylint_errors', strip_params)
| 44.384615 | 103 | 0.730503 | import pytest
from hamcrest import assert_that, contains_inanyorder
from tests.testing_utils import param_wrapper, run_flake8, run_pylint
strip_params = [
# code, flake8 rules, pylint rules
param_wrapper("s.strip('abca')", {'B005'}, set(), id='strip_string'),
param_wrapper(r"s.strip(r'\n\t ')", {'B005'}, set(), id='strip_raw_string'),
param_wrapper("s.lstrip('abca')", {'B005'}, set(), id='lstrip_string'),
param_wrapper(r"s.lstrip(r'\n\t ')", {'B005'}, set(), id='lstrip_raw_string'),
param_wrapper("s.rstrip('abca')", {'B005'}, set(), id='rstrip_string'),
param_wrapper(r"s.rstrip(r'\n\t ')", {'B005'}, set(), id='rstrip_raw_string'),
]
@pytest.mark.parametrize('content,flake8_errors,pylint_errors', strip_params)
def test_detects_strip_with_multicharacter_string(content, flake8_errors, pylint_errors, file_to_lint):
file_to_lint.write_text(content)
found_flake8_errors = run_flake8(file_to_lint)
assert_that(set(found_flake8_errors), contains_inanyorder(*flake8_errors))
found_pylint_errors = run_pylint(file_to_lint)
assert_that(set(found_pylint_errors), contains_inanyorder(*pylint_errors))
| 381 | 0 | 22 |
640dcfd22c816c6be0a699e8918f42f1b1b5baa7 | 1,096 | py | Python | tapis_cli/commands/taccapis/v2/profiles/show.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 8 | 2020-10-18T22:48:23.000Z | 2022-01-10T09:16:14.000Z | tapis_cli/commands/taccapis/v2/profiles/show.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 238 | 2019-09-04T14:37:54.000Z | 2020-04-15T16:24:24.000Z | tapis_cli/commands/taccapis/v2/profiles/show.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 5 | 2019-09-20T04:23:49.000Z | 2020-01-16T17:45:14.000Z | from tapis_cli.display import Verbosity
from tapis_cli.clients.services.mixins import Username
from . import API_NAME, SERVICE_VERSION
from .models import Profile
from .formatters import ProfilesFormatOne
__all__ = ['ProfilesShow']
| 31.314286 | 64 | 0.708029 | from tapis_cli.display import Verbosity
from tapis_cli.clients.services.mixins import Username
from . import API_NAME, SERVICE_VERSION
from .models import Profile
from .formatters import ProfilesFormatOne
__all__ = ['ProfilesShow']
class ProfilesShow(ProfilesFormatOne, Username):
HELP_STRING = 'Show details for a specific Profile'
LEGACY_COMMMAND_STRING = 'profiles-list'
VERBOSITY = Verbosity.RECORD
def get_parser(self, prog_name):
parser = super(ProfilesShow, self).get_parser(prog_name)
parser = Username.extend_parser(self, parser)
return parser
def take_action(self, parsed_args):
parsed_args = self.preprocess_args(parsed_args)
self.requests_client.setup(API_NAME, SERVICE_VERSION)
headers = self.render_headers(Profile, parsed_args)
rec = self.tapis_client.profiles.listByUsername(
username=parsed_args.username)
data = []
for key in headers:
val = self.render_value(rec.get(key, None))
data.append(val)
return (tuple(headers), tuple(data))
| 621 | 217 | 23 |
f29230d1f98fa0a6b8a41d157909690cafaca5d2 | 1,546 | py | Python | tests/apps/aspect_rendering.py | T4rk1n/dazzler | 69c49422dc19c910445ab265b1d3481041de8f43 | [
"MIT"
] | 15 | 2019-12-19T11:57:30.000Z | 2021-11-15T23:34:41.000Z | tests/apps/aspect_rendering.py | T4rk1n/dazzler | 69c49422dc19c910445ab265b1d3481041de8f43 | [
"MIT"
] | 196 | 2019-09-21T15:10:14.000Z | 2022-03-31T11:07:48.000Z | tests/apps/aspect_rendering.py | jbampton/dazzler | 4018f6cbcb55a9f482cb5c5cbf6a06b063c15e21 | [
"MIT"
] | 7 | 2019-10-30T19:38:15.000Z | 2021-12-01T04:54:16.000Z | from dazzler import Dazzler
from dazzler.components import core
from dazzler.system import Page, BindingContext, Trigger
from tests.components import spec_components as spec
app = Dazzler(__name__)
aspect_types = {
'array': {
'value': [1, 2, 3],
'json': True,
},
'bool': {
'value': True,
},
'number': {
'value': 42,
},
'object': {
'value': {'foo': 'bar'},
'json': True,
},
'string': {
'value': 'foo bar',
},
'enum': {
'value': 'News',
},
'union': {
'value': 1,
},
'array_of': {
'value': [6, 7, 8, 9],
'json': True,
},
'shape': {
'value': {'color': '#000', 'fontSize': 777},
'json': True,
},
}
button_ids = ['set-{}'.format(y) for y in aspect_types]
output_ids = ['out-{}'.format(y) for y in aspect_types]
layout = core.Container([
core.Container([core.Button(x, identity=x) for x in button_ids]),
spec.TestComponent('', identity='spec-output', id='spec-output'),
])
page = Page(
'page',
url='/',
layout=layout
)
app.add_page(page)
for button in button_ids:
page.bind(Trigger(button, 'clicks'))(on_click_render_type)
if __name__ == '__main__':
app.start('-v --debug=1 --port=8155'.split())
| 20.891892 | 69 | 0.556921 | from dazzler import Dazzler
from dazzler.components import core
from dazzler.system import Page, BindingContext, Trigger
from tests.components import spec_components as spec
app = Dazzler(__name__)
aspect_types = {
'array': {
'value': [1, 2, 3],
'json': True,
},
'bool': {
'value': True,
},
'number': {
'value': 42,
},
'object': {
'value': {'foo': 'bar'},
'json': True,
},
'string': {
'value': 'foo bar',
},
'enum': {
'value': 'News',
},
'union': {
'value': 1,
},
'array_of': {
'value': [6, 7, 8, 9],
'json': True,
},
'shape': {
'value': {'color': '#000', 'fontSize': 777},
'json': True,
},
}
button_ids = ['set-{}'.format(y) for y in aspect_types]
output_ids = ['out-{}'.format(y) for y in aspect_types]
layout = core.Container([
core.Container([core.Button(x, identity=x) for x in button_ids]),
spec.TestComponent('', identity='spec-output', id='spec-output'),
])
page = Page(
'page',
url='/',
layout=layout
)
app.add_page(page)
async def on_click_render_type(context: BindingContext):
identity = context.trigger.identity.replace('set-', '')
await context.set_aspect(
'spec-output',
**{f'{identity}_prop': aspect_types[identity]['value']}
)
for button in button_ids:
page.bind(Trigger(button, 'clicks'))(on_click_render_type)
if __name__ == '__main__':
app.start('-v --debug=1 --port=8155'.split())
| 218 | 0 | 23 |
5005040f66f1a18691ae929607bcca93f50ed8de | 100 | py | Python | graphene_django/forms/types.py | radekwlsk/graphene-django | b552dcac24364d3ef824f865ba419c74605942b2 | [
"MIT"
] | 2 | 2021-06-14T20:01:22.000Z | 2022-01-07T12:56:53.000Z | graphene_django/forms/types.py | radekwlsk/graphene-django | b552dcac24364d3ef824f865ba419c74605942b2 | [
"MIT"
] | 16 | 2019-01-03T15:21:49.000Z | 2020-12-11T15:11:35.000Z | graphene_django/forms/types.py | radekwlsk/graphene-django | b552dcac24364d3ef824f865ba419c74605942b2 | [
"MIT"
] | 2 | 2021-04-12T18:16:00.000Z | 2021-06-26T05:01:18.000Z | import graphene
from ..types import ErrorType # noqa Import ErrorType for backwards compatability
| 25 | 82 | 0.82 | import graphene
from ..types import ErrorType # noqa Import ErrorType for backwards compatability
| 0 | 0 | 0 |
2b879ac647f07fe391553bcd79a30bc6e3c48f35 | 313 | py | Python | Snippets/segment_access.py | derwind/GlyphsScripts | 37934072b02850b2b84654ed312d75834729f78e | [
"Apache-2.0"
] | null | null | null | Snippets/segment_access.py | derwind/GlyphsScripts | 37934072b02850b2b84654ed312d75834729f78e | [
"Apache-2.0"
] | null | null | null | Snippets/segment_access.py | derwind/GlyphsScripts | 37934072b02850b2b84654ed312d75834729f78e | [
"Apache-2.0"
] | null | null | null | #MenuTitle: Access to segments
# -*- coding: utf-8 -*-
from GlyphsApp.plugins import *
g = Glyphs.font.selectedLayers[0].parent
paths = Glyphs.font.selectedLayers[0].paths
for path in paths:
segments = path.segments
for segment in segments:
print type(segment.points[0]), dir(segment.points[0])
| 26.083333 | 61 | 0.709265 | #MenuTitle: Access to segments
# -*- coding: utf-8 -*-
from GlyphsApp.plugins import *
g = Glyphs.font.selectedLayers[0].parent
paths = Glyphs.font.selectedLayers[0].paths
for path in paths:
segments = path.segments
for segment in segments:
print type(segment.points[0]), dir(segment.points[0])
| 0 | 0 | 0 |
7b50dca36fc41d2437703a4e15155b9083cd3728 | 3,280 | py | Python | Backend/grades_lambda.py | klmahesh/PennGrader | 58cd3ccd6dcd85df0e5438ccf8aad6640033100b | [
"MIT"
] | null | null | null | Backend/grades_lambda.py | klmahesh/PennGrader | 58cd3ccd6dcd85df0e5438ccf8aad6640033100b | [
"MIT"
] | null | null | null | Backend/grades_lambda.py | klmahesh/PennGrader | 58cd3ccd6dcd85df0e5438ccf8aad6640033100b | [
"MIT"
] | null | null | null | import sys
sys.path.append('/opt')
import os
import boto3
import json
import dill
import ast
import base64
import shutil
import time
import pandas as pd
from boto3 import resource
from boto3.dynamodb.conditions import Key, Attr
# Dynamo Config
dynamo_resource = resource('dynamodb')
dynamo = boto3.client('dynamodb')
METADATA_TABLE = 'HomeworksMetadata'
TEST_CASES_TABLE = 'HomeworksTestCases'
GRADEBOOK_TABLE = 'Gradebook'
# Return Codes
SUCCESS = 200
ERROR = 400
# Request Types
STUDENT_REQUEST = 'STUDENT_GRADE'
ALL_STUDENTS_REQUEST = 'ALL_STUDENTS_GRADES'
| 31.238095 | 114 | 0.674085 | import sys
sys.path.append('/opt')
import os
import boto3
import json
import dill
import ast
import base64
import shutil
import time
import pandas as pd
from boto3 import resource
from boto3.dynamodb.conditions import Key, Attr
# Dynamo Config
dynamo_resource = resource('dynamodb')
dynamo = boto3.client('dynamodb')
METADATA_TABLE = 'HomeworksMetadata'
TEST_CASES_TABLE = 'HomeworksTestCases'
GRADEBOOK_TABLE = 'Gradebook'
# Return Codes
SUCCESS = 200
ERROR = 400
# Request Types
STUDENT_REQUEST = 'STUDENT_GRADE'
ALL_STUDENTS_REQUEST = 'ALL_STUDENTS_GRADES'
def lambda_handler(event, context):
try:
body = parse_event(event)
homework_id = body['homework_id']
print(homework_id)
deadline, max_daily_submissions, max_score = get_homework_metadata(homework_id)
if body['request_type'] == ALL_STUDENTS_REQUEST:
validate_secret_key(body['secret_key'])
all_grades = get_grades(homework_id)
response = (all_grades, deadline)
return build_http_response(SUCCESS,serialize(response))
elif body['request_type'] == STUDENT_REQUEST:
student_id = body['student_id']
grades = get_grades(homework_id, student_id)
response = (grades, deadline, max_daily_submissions, max_score)
return build_http_response(SUCCESS,serialize(response))
except Exception as exception:
return build_http_response(ERROR, exception)
def parse_event(event):
try:
return ast.literal_eval(event['body'])
except:
raise Exception('Malformed payload.')
def validate_secret_key(secret_key):
try:
response = dynamo.get_item(TableName = 'Classes', Key={'secret_key': {'S': secret_key}})
return response['Item']['course_id']['S']
except:
raise Exception('Secret key is incorrect.')
def get_homework_metadata(homework_id):
try:
response = dynamo.get_item(TableName = METADATA_TABLE, Key={'homework_id': {'S': homework_id}})
return response['Item']['deadline']['S'], \
response['Item']['max_daily_submissions']['S'], \
response['Item']['total_score']['S']
except:
raise Exception('Homework ID was not found.')
def get_grades(homework_id, student_id = None):
table = dynamo_resource.Table(GRADEBOOK_TABLE)
if student_id is not None:
filtering_exp = Key('homework_id').eq(homework_id) & Attr('student_submission_id').begins_with(student_id)
else:
filtering_exp = Key('homework_id').eq(homework_id)
response = table.scan(FilterExpression=filtering_exp)
items = response.get('Items')
if len(response.get('LastEvaluatedKey')) > 0:
response = table.scan(FilterExpression=filtering_exp,ExclusiveStartKey=response.get('LastEvaluatedKey'))
items = items + response.get('Items')
return items
def serialize(obj):
byte_serialized = dill.dumps(obj, recurse = True)
return base64.b64encode(byte_serialized).decode("utf-8")
def build_http_response(status_code, message):
return {
'statusCode': status_code,
'body': str(message),
'headers': {
'Content-Type': 'application/json',
}
}
| 2,498 | 0 | 182 |
4c1e149556d19e3e9a842ee6ad6ef634ab661f77 | 4,900 | py | Python | src/clusterfuzz/_internal/tests/appengine/libs/crash_access_test.py | mspectorgoogle/clusterfuzz | 44df69cbcb94efc212f27758d45d6ff0f36061e5 | [
"Apache-2.0"
] | 5,023 | 2019-02-07T16:57:56.000Z | 2022-03-31T01:08:05.000Z | src/clusterfuzz/_internal/tests/appengine/libs/crash_access_test.py | mspectorgoogle/clusterfuzz | 44df69cbcb94efc212f27758d45d6ff0f36061e5 | [
"Apache-2.0"
] | 2,303 | 2019-02-07T17:36:36.000Z | 2022-03-31T15:44:38.000Z | src/clusterfuzz/_internal/tests/appengine/libs/crash_access_test.py | mspectorgoogle/clusterfuzz | 44df69cbcb94efc212f27758d45d6ff0f36061e5 | [
"Apache-2.0"
] | 564 | 2019-02-07T17:34:24.000Z | 2022-03-26T09:25:44.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the crash_access library."""
# pylint: disable=protected-access
import unittest
import mock
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from libs import crash_access
from libs import helpers
from libs.query import base
class AddScopeTest(unittest.TestCase):
"""Test add_scope."""
def test_forbidden(self):
"""Test when user is forbidden."""
self.mock.has_access.return_value = False
with self.assertRaises(helpers.EarlyExitException):
crash_access.add_scope(self.query, self.params, 'security_flag',
'job_type', 'fuzzer_name')
def test_default_global_privileged(self):
"""Test the default filter for globally privileged users."""
self.mock.has_access.return_value = True
crash_access.add_scope(self.query, self.params, 'security_flag', 'job_type',
'fuzzer_name')
self.assertTrue(self.params['permissions']['everything'])
self.assertTrue(self.params['permissions']['isPrivileged'])
self.assertEqual([], self.params['permissions']['jobs'])
self.assertFalse([], self.params['permissions']['fuzzers'])
self.query.union.assert_has_calls([])
self.query.filter.assert_has_calls([])
def test_default_domain(self):
"""Test the default filter for domain users."""
self.mock.has_access.side_effect = _has_access
crash_access.add_scope(self.query, self.params, 'security_flag', 'job_type',
'fuzzer_name')
self.assertTrue(self.params['permissions']['everything'])
self.assertFalse(self.params['permissions']['isPrivileged'])
self.assertEqual([], self.params['permissions']['jobs'])
self.assertFalse([], self.params['permissions']['fuzzers'])
self.query.filter.assert_has_calls([])
self.query.union.assert_called_once_with(mock.ANY)
q = self.query.union.call_args[0][0]
q.union.assert_has_calls([])
q.filter.assert_has_calls([mock.call('security_flag', False)])
def test_domain_with_job_and_fuzzer(self):
"""Test domain user with job and fuzzer."""
self.mock.has_access.side_effect = _has_access
self.mock.get_user_job_type.return_value = 'job'
self.mock._allowed_entities_for_user.side_effect = [['job2'], ['fuzzer']]
self.mock.get_permission_names.side_effect = [['perm'], ['perm1']]
crash_access.add_scope(self.query, self.params, 'security_flag', 'job_type',
'fuzzer_name')
self.assertTrue(self.params['permissions']['everything'])
self.assertFalse(self.params['permissions']['isPrivileged'])
self.assertListEqual(['perm', 'job'], self.params['permissions']['jobs'])
self.assertListEqual(['perm1'], self.params['permissions']['fuzzers'])
self.query.union.assert_has_calls([])
self.query.union.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
everything_query = self.query.union.call_args[0][0]
job_query = self.query.union.call_args[0][1]
fuzzer_query = self.query.union.call_args[0][2]
everything_query.union.assert_has_calls([])
job_query.union.assert_has_calls([])
fuzzer_query.union.assert_has_calls([])
everything_query.filter.assert_has_calls(
[mock.call('security_flag', False)])
job_query.filter_in.assert_has_calls([
mock.call('job_type', ['job2', 'job']),
])
fuzzer_query.filter_in.assert_has_calls([
mock.call('fuzzer_name', ['fuzzer']),
])
| 37.40458 | 80 | 0.706122 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the crash_access library."""
# pylint: disable=protected-access
import unittest
import mock
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from libs import crash_access
from libs import helpers
from libs.query import base
def _has_access(need_privileged_access=False):
return not need_privileged_access
class AddScopeTest(unittest.TestCase):
"""Test add_scope."""
def setUp(self):
Query = base.Query # pylint: disable=invalid-name
test_helpers.patch(self, [
'clusterfuzz._internal.base.external_users._allowed_entities_for_user',
'libs.crash_access.get_permission_names',
'libs.access.has_access',
'libs.access.get_user_job_type',
'libs.helpers.get_user_email',
'libs.query.base.Query',
])
self.params = {}
self.mock.get_user_job_type.return_value = None
self.mock.get_user_email.return_value = 'test@test.com'
self.mock._allowed_entities_for_user.return_value = []
self.mock.get_permission_names.return_value = []
def create_query():
q = mock.create_autospec(Query)
return q
self.mock.Query.side_effect = create_query
self.query = base.Query()
def test_forbidden(self):
"""Test when user is forbidden."""
self.mock.has_access.return_value = False
with self.assertRaises(helpers.EarlyExitException):
crash_access.add_scope(self.query, self.params, 'security_flag',
'job_type', 'fuzzer_name')
def test_default_global_privileged(self):
"""Test the default filter for globally privileged users."""
self.mock.has_access.return_value = True
crash_access.add_scope(self.query, self.params, 'security_flag', 'job_type',
'fuzzer_name')
self.assertTrue(self.params['permissions']['everything'])
self.assertTrue(self.params['permissions']['isPrivileged'])
self.assertEqual([], self.params['permissions']['jobs'])
self.assertFalse([], self.params['permissions']['fuzzers'])
self.query.union.assert_has_calls([])
self.query.filter.assert_has_calls([])
def test_default_domain(self):
"""Test the default filter for domain users."""
self.mock.has_access.side_effect = _has_access
crash_access.add_scope(self.query, self.params, 'security_flag', 'job_type',
'fuzzer_name')
self.assertTrue(self.params['permissions']['everything'])
self.assertFalse(self.params['permissions']['isPrivileged'])
self.assertEqual([], self.params['permissions']['jobs'])
self.assertFalse([], self.params['permissions']['fuzzers'])
self.query.filter.assert_has_calls([])
self.query.union.assert_called_once_with(mock.ANY)
q = self.query.union.call_args[0][0]
q.union.assert_has_calls([])
q.filter.assert_has_calls([mock.call('security_flag', False)])
def test_domain_with_job_and_fuzzer(self):
"""Test domain user with job and fuzzer."""
self.mock.has_access.side_effect = _has_access
self.mock.get_user_job_type.return_value = 'job'
self.mock._allowed_entities_for_user.side_effect = [['job2'], ['fuzzer']]
self.mock.get_permission_names.side_effect = [['perm'], ['perm1']]
crash_access.add_scope(self.query, self.params, 'security_flag', 'job_type',
'fuzzer_name')
self.assertTrue(self.params['permissions']['everything'])
self.assertFalse(self.params['permissions']['isPrivileged'])
self.assertListEqual(['perm', 'job'], self.params['permissions']['jobs'])
self.assertListEqual(['perm1'], self.params['permissions']['fuzzers'])
self.query.union.assert_has_calls([])
self.query.union.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
everything_query = self.query.union.call_args[0][0]
job_query = self.query.union.call_args[0][1]
fuzzer_query = self.query.union.call_args[0][2]
everything_query.union.assert_has_calls([])
job_query.union.assert_has_calls([])
fuzzer_query.union.assert_has_calls([])
everything_query.filter.assert_has_calls(
[mock.call('security_flag', False)])
job_query.filter_in.assert_has_calls([
mock.call('job_type', ['job2', 'job']),
])
fuzzer_query.filter_in.assert_has_calls([
mock.call('fuzzer_name', ['fuzzer']),
])
| 828 | 0 | 48 |
b79b812df3f82ca1f88577abb43ebbecffe4a810 | 25,526 | py | Python | ics/services/dataset_collection_service.py | aesuli/ics | ae6753f721f88d6f30ad9a3450feedbd9a7e20c4 | [
"BSD-3-Clause"
] | 1 | 2022-03-31T14:32:54.000Z | 2022-03-31T14:32:54.000Z | ics/services/dataset_collection_service.py | aesuli/ics | ae6753f721f88d6f30ad9a3450feedbd9a7e20c4 | [
"BSD-3-Clause"
] | null | null | null | ics/services/dataset_collection_service.py | aesuli/ics | ae6753f721f88d6f30ad9a3450feedbd9a7e20c4 | [
"BSD-3-Clause"
] | null | null | null | import csv
import os
import shutil
from random import randint
from uuid import uuid4
import cherrypy
import numpy as np
from cherrypy.lib.static import serve_file
from ics.classifier.classifier import NO_LABEL, YES_LABEL
from ics.db.sqlalchemydb import SQLAlchemyDB, Job, ClassificationMode, LabelSource
from ics.util.util import get_fully_portable_file_name, bool_to_string
__author__ = 'Andrea Esuli'
MAX_BATCH_SIZE = 1000
CSV_LARGE_FIELD = 1024 * 1024 * 10
QUICK_CLASSIFICATION_BATCH_SIZE = 100
| 43.337861 | 201 | 0.595706 | import csv
import os
import shutil
from random import randint
from uuid import uuid4
import cherrypy
import numpy as np
from cherrypy.lib.static import serve_file
from ics.classifier.classifier import NO_LABEL, YES_LABEL
from ics.db.sqlalchemydb import SQLAlchemyDB, Job, ClassificationMode, LabelSource
from ics.util.util import get_fully_portable_file_name, bool_to_string
__author__ = 'Andrea Esuli'
MAX_BATCH_SIZE = 1000
CSV_LARGE_FIELD = 1024 * 1024 * 10
QUICK_CLASSIFICATION_BATCH_SIZE = 100
class DatasetCollectionService(object):
def __init__(self, db_connection_string, data_dir):
self._db_connection_string = db_connection_string
self._db = SQLAlchemyDB(db_connection_string)
self._download_dir = os.path.join(data_dir, 'datasets', 'downloads')
os.makedirs(self._download_dir, exist_ok=True)
self._upload_dir = os.path.join(data_dir, 'datasets', 'uploads')
os.makedirs(self._upload_dir, exist_ok=True)
def close(self):
self._db.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False
@cherrypy.expose
@cherrypy.tools.json_out()
def info(self, page=None, page_size=50):
result = []
if page is None:
names = self._db.dataset_names()
else:
names = self._db.dataset_names()[int(page) * int(page_size):(int(page) + 1) * int(page_size)]
for name in names:
dataset_info = dict()
dataset_info['name'] = name
dataset_info['description'] = self._db.get_dataset_description(name)
dataset_info['created'] = str(self._db.get_dataset_creation_time(name))
dataset_info['updated'] = str(self._db.get_dataset_last_update_time(name))
dataset_info['size'] = self._db.get_dataset_size(name)
result.append(dataset_info)
return result
@cherrypy.expose
@cherrypy.tools.json_out()
def count(self):
return str(len(list(self._db.dataset_names())))
@cherrypy.expose
@cherrypy.tools.json_out()
def create(self, name):
name = name.strip()
if len(name) == 0:
cherrypy.response.status = 400
return 'Must specify a dataset name'
self._db.create_dataset(name)
return 'Ok'
@cherrypy.expose
@cherrypy.tools.json_out()
def add_document(self, name, document_name, document_content):
if not self._db.dataset_exists(name):
self._db.create_dataset(name)
self._db.create_dataset_documents(name, ((document_name, document_content),))
return 'Ok'
@cherrypy.expose
@cherrypy.tools.json_out()
def delete_document(self, name, document_name):
if not self._db.dataset_exists(name):
cherrypy.response.status = 404
return '%s does not exist' % name
self._db.delete_dataset_document(name, document_name)
return 'Ok'
@cherrypy.expose
@cherrypy.tools.json_out()
def upload(self, **data):
try:
dataset_name = data['name']
except KeyError:
cherrypy.response.status = 400
return 'Must specify a name'
try:
file = data['file']
except KeyError:
cherrypy.response.status = 400
return 'Must upload a file'
if not self._db.dataset_exists(dataset_name):
self._db.create_dataset(dataset_name)
filename = 'dataset %s %s.csv' % (dataset_name, uuid4())
filename = get_fully_portable_file_name(filename)
fullpath = os.path.join(self._upload_dir, filename)
with open(fullpath, 'wb') as outfile:
shutil.copyfileobj(file.file, outfile)
job_id = self._db.create_job(_create_dataset_documents, (self._db_connection_string, dataset_name, fullpath),
description='upload to dataset \'%s\'' % dataset_name)
return [job_id]
@cherrypy.expose
@cherrypy.tools.json_out()
def set_description(self, name, description):
if description is not None:
self._db.set_dataset_description(name, description)
return 'Ok'
@cherrypy.expose
@cherrypy.tools.json_out()
def rename(self, name, new_name):
try:
self._db.rename_dataset(name, new_name)
except KeyError:
cherrypy.response.status = 404
return '%s does not exist' % name
except Exception as e:
cherrypy.response.status = 500
return 'Error (%s)' % str(e)
else:
return 'Ok'
@cherrypy.expose
@cherrypy.tools.json_out()
def delete(self, name):
job_id = self._db.create_job(_delete_dataset,
(self._db_connection_string, name),
description='delete dataset \'%s\'' % name)
return [job_id]
@cherrypy.expose
def download(self, name):
if not self._db.dataset_exists(name):
cherrypy.response.status = 404
return '\'%s\' does not exist' % name
filename = 'dataset %s %s.csv' % (name, str(self._db.get_dataset_last_update_time(name)))
filename = get_fully_portable_file_name(filename)
fullpath = os.path.join(self._download_dir, filename)
if not os.path.isfile(fullpath):
try:
with open(fullpath, 'w', encoding='utf-8', newline='') as file:
writer = csv.writer(file, lineterminator='\n')
for document in self._db.get_dataset_documents(name):
writer.writerow([document.external_id, document.text])
except:
os.unlink(fullpath)
return serve_file(fullpath, "text/csv", "attachment")
@cherrypy.expose
@cherrypy.tools.json_out()
def size(self, name):
if not self._db.dataset_exists(name):
cherrypy.response.status = 404
return '\'%s\' does not exist' % name
return str(self._db.get_dataset_size(name))
@cherrypy.expose
@cherrypy.tools.json_out()
def document_by_name(self, name, document_name):
if not self._db.dataset_exists(name):
cherrypy.response.status = 404
return '\'%s\' does not exist' % name
document = self._db.get_dataset_document_by_name(name, document_name)
if document is not None:
result = dict()
result['external_id'] = document.external_id
result['text'] = document.text
result['created'] = str(document.creation)
return result
else:
cherrypy.response.status = 404
return 'Document with name \'%i\' does not exist in \'%s\'' % (document_name, name)
@cherrypy.expose
@cherrypy.tools.json_out()
def document_by_position(self, name, position):
position = int(position)
if not self._db.dataset_exists(name):
cherrypy.response.status = 404
return '\'%s\' does not exist' % name
document = self._db.get_dataset_document_by_position(name, position)
if document is not None:
result = dict()
result['external_id'] = document.external_id
result['text'] = document.text
result['created'] = str(document.creation)
return result
else:
cherrypy.response.status = 404
return 'Position %i does not exist in \'%s\'' % (position, name)
@cherrypy.expose
@cherrypy.tools.json_out()
def get_documents(self, name, page=None, page_size=50, filter=None):
if not self._db.dataset_exists(name):
cherrypy.response.status = 404
return '%s does not exist' % name
page_size = int(page_size)
if page is None:
offset = 0
else:
offset = int(page) * page_size
limit = page_size
batch = list()
for document in self._db.get_dataset_documents(name, filter, offset, limit):
batch.append({'id': document.external_id, 'pos': document.id, 'creation': str(document.creation),
'text': document.text})
return batch
def _softmax(self, x):
return np.exp(x) / np.sum(np.exp(x))
@cherrypy.expose
@cherrypy.tools.json_out()
def documents_without_labels_count(self, dataset_name, classifier_name):
return str(self._db.get_dataset_documents_without_labels_count(dataset_name, classifier_name))
@cherrypy.expose
@cherrypy.tools.json_out()
def most_uncertain_document_id(self, name, classifier_name, filter=None):
X = list()
doc_ids = list()
for text, id in self._db.get_dataset_random_documents_without_labels(name, classifier_name, filter,
QUICK_CLASSIFICATION_BATCH_SIZE):
X.append(text)
doc_ids.append(id)
if len(X) == 0:
cherrypy.response.status = 400
if len(filter) == 0:
return f'No unlabeled documents in dataset \'{name}\' for classifier \'{classifier_name}\''
else:
return f'No unlabeled documents in dataset \'{name}\' for classifier \'{classifier_name}\' and text filter \'{filter}\''
if len(self._db.get_classifier_labels(classifier_name)) >= 2:
scores = self._db.score(classifier_name, X)
positions_scores = list()
for i, dict_ in enumerate(scores):
probs = self._softmax(list(dict_.values()))
probs.sort()
diff = probs[-1] - probs[-2]
positions_scores.append((i, diff))
positions_scores.sort(key=lambda x: x[1])
return self._db.get_dataset_document_position_by_id(name, doc_ids[positions_scores[0][0]])
else:
random_position = randint(0, len(doc_ids))
return self._db.get_dataset_document_position_by_id(name, doc_ids[random_position])
@cherrypy.expose
@cherrypy.tools.json_out()
def most_certain_document_id(self, name, classifier_name, filter=None):
X = list()
doc_ids = list()
for text, id in self._db.get_dataset_random_documents_without_labels(name, classifier_name, filter,
QUICK_CLASSIFICATION_BATCH_SIZE):
X.append(text)
doc_ids.append(id)
if len(X) == 0:
cherrypy.response.status = 400
if len(filter) == 0:
return f'No unlabeled documents in dataset \'{name}\' for classifier \'{classifier_name}\''
else:
return f'No unlabeled documents in dataset \'{name}\' for classifier \'{classifier_name}\' and text filter \'{filter}\''
if len(self._db.get_classifier_labels(classifier_name)) >= 2:
scores = self._db.score(classifier_name, X)
positions_scores = list()
for i, dict_ in enumerate(scores):
probs = self._softmax(list(dict_.values()))
probs.sort()
diff = probs[-1] - probs[-2]
positions_scores.append((i, diff))
positions_scores.sort(key=lambda x: -x[1])
return self._db.get_dataset_document_position_by_id(name, doc_ids[positions_scores[0][0]])
else:
random_position = randint(0, len(doc_ids))
return self._db.get_dataset_document_position_by_id(name, doc_ids[random_position])
@cherrypy.expose
@cherrypy.tools.json_out()
def random_unlabeled_document_id(self, name, classifier_name, filter=None):
try:
doc_id = self._db.get_dataset_random_documents_without_labels(name, classifier_name, filter, 1)[0][1]
return self._db.get_dataset_document_position_by_id(name, doc_id)
except:
cherrypy.response.status = 400
if len(filter) == 0:
return f'No unlabeled documents in dataset \'{name}\' for classifier \'{classifier_name}\''
else:
return f'No unlabeled documents in dataset \'{name}\' for classifier \'{classifier_name}\' and text filter \'{filter}\''
@cherrypy.expose
@cherrypy.tools.json_out()
def random_document_id(self, name, filter=None):
try:
doc_id = self._db.get_dataset_random_documents(name, filter, 1)[0].id
return self._db.get_dataset_document_position_by_id(name, doc_id)
except:
cherrypy.response.status = 400
if len(filter) == 0:
return f'No documents in dataset \'{name}\''
else:
return f'No documents in dataset \'{name}\' for text filter \'{filter}\''
@cherrypy.expose
@cherrypy.tools.json_out()
def next_document_id(self, name, start_from, filter=None):
try:
doc_id = self._db.get_dataset_next_documents(name, start_from, filter, 1)[0].id
return self._db.get_dataset_document_position_by_id(name, doc_id)
except:
cherrypy.response.status = 400
if len(filter) == 0:
return f'No succeeding documents in dataset \'{name}\' starting from position {start_from}'
else:
return f'No succeeding documents in dataset \'{name}\' for text filter \'{filter}\' starting from position {start_from}'
@cherrypy.expose
@cherrypy.tools.json_out()
def next_unlabeled_document_id(self, name, classifier_name, start_from, filter=None):
try:
doc_id = self._db.get_dataset_next_documents_without_labels(name, classifier_name, start_from, filter, 1)[0].id
return self._db.get_dataset_document_position_by_id(name, doc_id)
except:
cherrypy.response.status = 400
if len(filter) == 0:
return f'No succeeding unlabeled documents in dataset \'{name}\' starting from position {start_from}'
else:
return f'No succeeding unlabeled documents in dataset \'{name}\' for text filter \'{filter}\' starting from position {start_from}'
@cherrypy.expose
@cherrypy.tools.json_out()
def prev_document_id(self, name, start_from, filter=None):
try:
doc_id = self._db.get_dataset_prev_documents(name, start_from, filter, 1)[0].id
return self._db.get_dataset_document_position_by_id(name, doc_id)
except:
cherrypy.response.status = 400
if len(filter) == 0:
return f'No preceeding documents in dataset \'{name}\' starting from position {start_from}'
else:
return f'No preceeding documents in dataset \'{name}\' for text filter \'{filter}\' starting from position {start_from}'
@cherrypy.expose
@cherrypy.tools.json_out()
def prev_unlabeled_document_id(self, name, classifier_name, start_from, filter=None):
try:
doc_id = self._db.get_dataset_prev_documents_without_labels(name, classifier_name, start_from, filter, 1)[0].id
return self._db.get_dataset_document_position_by_id(name, doc_id)
except:
cherrypy.response.status = 400
if len(filter) == 0:
return f'No preceeding unlabeled documents in dataset \'{name}\' starting from position {start_from}'
else:
return f'No preceeding unlabeled documents in dataset \'{name}\' for text filter \'{filter}\' starting from position {start_from}'
@cherrypy.expose
@cherrypy.tools.json_out()
def classify(self, **data):
try:
datasetname = data['name']
except KeyError:
cherrypy.response.status = 400
return 'Must specify a dataset name'
try:
classifiers = data['classifiers']
except KeyError:
try:
classifiers = data['classifiers[]']
except KeyError:
cherrypy.response.status = 400
return 'Must specify a vector of names of classifiers'
classifiers = np.atleast_1d(classifiers).tolist()
last_update_time = self._db.get_most_recent_classifier_update_time(classifiers)
dataset_update_time = self._db.get_dataset_last_update_time(datasetname)
if last_update_time is None or last_update_time < dataset_update_time:
last_update_time = dataset_update_time
filename = 'dataset %s classified %s %s.csv' % (
datasetname, "-".join(classifiers), str(last_update_time))
filename = get_fully_portable_file_name(filename)
fullpath = os.path.join(self._download_dir, filename)
if self._db.classification_exists(fullpath):
cherrypy.response.status = 409
return 'An up-to-date classification is already available.'
job_id = self._db.create_job(_classify,
(self._db_connection_string, datasetname, classifiers, fullpath),
description='classify dataset \'%s\' with %s' % (
datasetname,
', '.join(['\'%s\'' % classifier for classifier in classifiers])))
self._db.create_classification_job(datasetname, classifiers, job_id, fullpath)
return [job_id]
@cherrypy.expose
@cherrypy.tools.json_out()
def classification_info(self, name, page=None, page_size=50):
got_deleted = True
result = None
while got_deleted:
got_deleted = False
result = list()
to_delete = list()
if page is None:
jobs = self._db.get_classification_jobs()
else:
jobs = self._db.get_classification_jobs(name)[
int(page) * int(page_size):(int(page) + 1) * int(page_size)]
for classification_job in jobs:
classification_job_info = dict()
classification_job_info['id'] = classification_job.id
if (classification_job.filename is None or not os.path.exists(
classification_job.filename)) and classification_job.job is None:
to_delete.append(classification_job.id)
got_deleted = True
continue
classification_job_info['dataset'] = name
classification_job_info['classifiers'] = classification_job.classifiers
classification_job_info['creation'] = str(classification_job.creation)
if classification_job.job:
classification_job_info['status'] = classification_job.job.status
classification_job_info['completion'] = str(classification_job.job.completion)
else:
classification_job_info['status'] = Job.status_done
classification_job_info['completion'] = str(os.path.getmtime(classification_job.filename))
result.append(classification_job_info)
for id in to_delete:
self._db.delete_classification_job(id)
return result
@cherrypy.expose
@cherrypy.tools.json_out()
def classification_count(self, name):
return str(len(list(self._db.get_classification_jobs(name))))
@cherrypy.expose
def classification_download(self, id):
filename = self._db.get_classification_job_filename(int(id))
if filename is None or not os.path.exists(filename):
cherrypy.response.status = 404
return "File not found"
return serve_file(filename, "text/csv", "attachment")
@cherrypy.expose
@cherrypy.tools.json_out()
def classification_delete(self, id):
try:
filename = self._db.get_classification_job_filename(id)
os.unlink(filename)
except FileNotFoundError:
pass
self._db.delete_classification_job(id)
return 'Ok'
@cherrypy.expose
@cherrypy.tools.json_out()
def version(self):
import ics
return ics.__version__
def _classify(db_connection_string, datasetname, classifiers, fullpath):
cherrypy.log('DatasetCollectionService._classify(datasetname="' + datasetname + '", classifiers="' + str(
classifiers) + '", fullpath="' + fullpath + '")')
with SQLAlchemyDB(db_connection_string) as db:
tempfile = fullpath + '.tmp'
try:
with open(tempfile, 'w', encoding='utf-8', newline='') as file:
writer = csv.writer(file, lineterminator='\n')
header = list()
header.append('#id')
header.append('text')
classification_modes = dict()
for classifier in classifiers:
if db.classifier_exists(classifier):
classification_modes[classifier] = db.get_preferred_classification_mode(classifier)
header.append(
f'{classifier} = {classification_modes[classifier].value}, ({", ".join(db.get_classifier_labels(classifier))})')
writer.writerow(header)
batch_count = 0
found = True
while found:
found = False
X = list()
id = list()
for document in db.get_dataset_documents(datasetname, offset=batch_count * MAX_BATCH_SIZE,
limit=MAX_BATCH_SIZE):
id.append(document.external_id)
X.append(document.text)
if len(X) > 0:
cols = list()
cols.append(id)
cols.append(X)
for classifier in classification_modes:
classification_mode = classification_modes[classifier]
if classification_mode == ClassificationMode.SINGLE_LABEL:
cols.append([
f'{classifier}:{label}{bool_to_string(gold, LabelSource.HUMAN_LABEL.value, LabelSource.MACHINE_LABEL.value)}'
for label, gold in
db.classify(classifier, X, classification_mode=classification_mode)])
elif classification_mode == ClassificationMode.MULTI_LABEL:
label_lists = zip(*db.classify(classifier, X, classification_mode=classification_mode))
for label_list in label_lists:
cols.append(
[
f'{classifier}:{label}:{bool_to_string(assigned, YES_LABEL, NO_LABEL)}{bool_to_string(gold, LabelSource.HUMAN_LABEL.value, LabelSource.MACHINE_LABEL.value)}'
for label, assigned, gold
in label_list])
for row in zip(*cols):
writer.writerow(row)
found = True
batch_count += 1
try:
os.unlink(fullpath)
except FileNotFoundError:
pass
os.rename(tempfile, fullpath)
except Exception as e:
try:
os.unlink(tempfile)
except FileNotFoundError:
pass
try:
os.unlink(fullpath)
except FileNotFoundError:
pass
raise
return 'done'
def _create_dataset_documents(db_connection_string, dataset_name, filename):
cherrypy.log(
'DatasetCollectionService._create_dataset_documents(dataset_name="' + dataset_name + '", filename="' + filename + '")')
with SQLAlchemyDB(db_connection_string) as db:
if not db.dataset_exists(dataset_name):
db.create_dataset(dataset_name)
if csv.field_size_limit() < CSV_LARGE_FIELD:
csv.field_size_limit(CSV_LARGE_FIELD)
with open(filename, 'r', encoding='utf-8', errors='ignore') as file:
reader = csv.reader(file)
external_ids_and_contents = list()
for row in reader:
if len(row) > 1:
document_name = row[0].strip()
if len(document_name) == 0 or document_name[0] == '#':
continue
content = row[1]
external_ids_and_contents.append((document_name, content))
if len(external_ids_and_contents) >= MAX_BATCH_SIZE:
db.create_dataset_documents(dataset_name, external_ids_and_contents)
external_ids_and_contents = list()
if len(external_ids_and_contents) > 0:
db.create_dataset_documents(dataset_name, external_ids_and_contents)
return 'done'
def _delete_dataset(db_connection_string, name):
cherrypy.log('DatasetCollectionService._delete_dataset(dname="' + name + '")')
with SQLAlchemyDB(db_connection_string) as db:
db.delete_dataset(name)
return 'done'
| 22,546 | 2,381 | 92 |
8c924cabffc4ee5cc8c89951f0d76a62a643767a | 7,828 | py | Python | custom_one_gallery/custom_one_gallery/report/supplier_backlog_report/supplier_backlog_report.py | AGtechnologies/custom_one_gallery | af081a8e8d81101281a54b20117c43a83e486b69 | [
"MIT"
] | null | null | null | custom_one_gallery/custom_one_gallery/report/supplier_backlog_report/supplier_backlog_report.py | AGtechnologies/custom_one_gallery | af081a8e8d81101281a54b20117c43a83e486b69 | [
"MIT"
] | null | null | null | custom_one_gallery/custom_one_gallery/report/supplier_backlog_report/supplier_backlog_report.py | AGtechnologies/custom_one_gallery | af081a8e8d81101281a54b20117c43a83e486b69 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from datetime import datetime,timedelta
from dateutil.relativedelta import relativedelta
from frappe.utils import flt, getdate, today
| 35.420814 | 470 | 0.711037 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from datetime import datetime,timedelta
from dateutil.relativedelta import relativedelta
from frappe.utils import flt, getdate, today
def execute(filters=None):
if not filters: filters = {}
condition,months,item_condition,future_months=get_condition(filters)
columns = get_columns(months,item_condition)
items = get_item_info(item_condition)
data = []
for item in items:
lmonths={0:0,1:0,2:0,3:0,4:0,5:0}
fmonths={0:0,1:0,2:0}
#frappe.throw(repr(months))
if len(months)<6:
lmonths={}
mf=0
for m in months:
lmonths.update({mf:0})
mf+=1
so_months = 0
scrap_quantity = 0
warehouse_bal_quantity = 0
future_1stmonth = 0
reserved_quant = 0
sales_qty_current = 0.0
sales_qty_next = 0.0
scrap_quantity=get_scrap_quantity(condition,item.item_name)
st_items=get_stock_balance(condition,item.item_name)
if st_items:
st_items=st_items[0]
warehouse_bal_quantity=st_items.actual_qty
reserved_qty=st_items.reserved_qty
#frappe.throw(repr(warehouse_bal_quantity)+repr(reserved_quant))
datalist=[item.name, item.item_name,item.uom_name]
for lmon in months:
localcondition=condition + " and so.transaction_date >= '%s' and so.transaction_date <= '%s'" % (lmon.get('start',''),lmon.get('end',''))
so_items_map=get_sales_items(localcondition,item.item_name)
if so_items_map:
so_items=so_items_map[0]
lmonths.update({so_months:so_items.so_qty})
so_months+=1
#frappe.throw(str(lmonths))
f=1
checker=len(lmonths)-2
for i in lmonths:
iqty=lmonths.get(i,0)
if checker:
datalist.append(iqty)
if f==checker:
datalist.append(warehouse_bal_quantity)
elif not checker:
datalist.append(warehouse_bal_quantity)
datalist.append(iqty)
checker=True
f+=1
#datalist+=[warehouse_bal_quantity, sales_qty_current, sales_qty_next]
#frappe.throw(repr(lmonths)+repr(datalist))
po_months=0
for fmon in future_months:
localcondition=condition + " and po.transaction_date >= '%s' and po.transaction_date <= '%s'" % (fmon.get('start',''),fmon.get('end',''))
po_items_map=get_purchase_items(localcondition,item.item_name)
if po_items_map:
po_items=po_items_map[0]
fmonths.update({po_months:po_items.po_qty})
po_months+=1
#frappe.throw(str(po_items_map))
f=1
for i in fmonths:
iqty=fmonths.get(i,0)
datalist.append(iqty)
if f==1:
future_1stmonth=iqty
f+=1
#frappe.throw(repr(datalist))
short_current = warehouse_bal_quantity - sales_qty_current
short_next = (sales_qty_next + short_current) - (future_1stmonth + reserved_quant)
datalist+=[short_current, short_next, scrap_quantity, reserved_quant]
data.append(datalist)
#frappe.throw(repr(data)+repr(columns))
return columns ,data
def get_item_info(item_condition):
if item_condition:
query="select it.name, it.item_name, um.uom_name from `tabItem` it, `tabUOM` um where it.stock_uom=um.name and %s" % item_condition
#frappe.throw(repr(query))
return frappe.db.sql(query, as_dict=1)
return frappe.db.sql("select it.name, it.item_name, um.uom_name as uom_name from `tabItem` it, `tabUOM` um where it.stock_uom=um.name", as_dict=1)
def get_scrap_quantity(condition, item_name):
condition=" and it.item_name ='%s'" %item_name
query="""select bi.actual_qty
from `tabBin` bi, `tabWarehouse` wh, `tabItem` it
where wh.name = bi.warehouse and bi.item_code = it.name and wh.warehouse_name='Warehouse- Scrap' %s""" % (condition)
sc_items = frappe.db.sql(query, as_dict=1)
if not sc_items:
return 0
#frappe.throw(repr(sc_items))
return sc_items[0].actual_qty
def get_stock_balance(condition, item_name):
condition=" and it.item_name ='%s'" %item_name
query="""select bi.actual_qty, bi.reserved_qty
from `tabBin` bi, `tabWarehouse` wh, `tabItem` it
where wh.name = bi.warehouse and bi.item_code = it.name %s""" % (condition)
sc_items = frappe.db.sql(query, as_dict=1)
if not sc_items:
return 0
#frappe.throw(repr(sc_items))
return sc_items
def get_sales_items(condition, item_name):
condition+=" and so_item.item_name ='%s'" %item_name
query="""select so_item.item_name, so.transaction_date, sum(so_item.qty) as so_qty
from `tabSales Order` so, `tabSales Order Item` so_item
where so.name = so_item.parent %s group by MONTH(so.transaction_date)""" % (condition)
#frappe.throw(query)
so_items = frappe.db.sql(query, as_dict=1)
#frappe.throw(repr(so_items))
return so_items
def get_purchase_items(condition, item_name):
condition+=" and po_item.item_name ='%s'" %item_name
query="""select po_item.item_name, po.transaction_date, sum(po_item.qty) as po_qty
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.name = po_item.parent %s group by MONTH(po.transaction_date)""" % (condition)
#frappe.throw(query)
po_items = frappe.db.sql(query, as_dict=1)
#frappe.throw(repr(so_items))
return po_items
def get_columns(months,item_condition):
items = get_item_info(item_condition)
columns = [_("Product ID") + "::100",_("Product Name") + "::200",_("UOM") + "::100"]
#frappe.throw(repr(months))
for mon in months[:-2]:
start=datetime.strptime(mon.get('start'),'%Y-%m-%d')
end=datetime.strptime(mon.get('end'),'%Y-%m-%d')
month_name=start.strftime('%d')+'-'+end.strftime('%d')+' '+start.strftime('%b')+' Sales Qty'
#frappe.throw(repr(month_name))
columns.append(month_name + ":Float:150")
columns+=[_("Stock Balance (all local W/Hse)") + ":Float:100",_("Sales Order Total qty (Current month)") + ":Float:100",_("Sales Order Total qty (Next month)") + ":Float:100",_("Future 1st Month PO Qty") + ":Float:100",_("Future 2nd Month PO Qty") + ":Float:100",_("Future 3rd Month PO Qty") + ":Float:100",_("Shortagefor *current month*") + ":Float:100",_("Shortage for *Next month*") + ":Float:100",_("Scrap W/Hse Qty") + ":Float:100",_("Reserved Qty") + ":Float:100"
]
return columns
def get_condition(filters):
conditions = ""
item_condition=""
months=[]
future_months=[]
today=datetime.now().strftime("%Y-%m-%d")
to_date=filters.get("to_date")
if to_date>today:
frappe.throw("To Date can not be greater than Current Date.")
if filters.get("to_date") and filters.get("from_date"):
to_da=datetime.strptime(to_date,"%Y-%m-%d")
from_date=filters.get("from_date")
from_da= datetime.strptime(from_date,"%Y-%m-%d")
if from_date>to_date:
frappe.throw("To Date must be greater than From Date")
end=""
start=""
while(from_da<to_da):
start=from_da.strftime("%Y-%m-%d")
flag=from_da.strftime('%m')
while(from_da.strftime('%m')==flag):
flag=from_da.strftime('%m')
end=from_da.strftime("%Y-%m-%d")
if flag==to_da.strftime('%m'):
end=to_da.strftime("%Y-%m-%d")
from_da+=timedelta(1)
months.append({'start':start,'end':end})
#frappe.throw(repr(months))
from_da=to_da
to_da=to_da+relativedelta(months=3)
end=""
while(from_da<to_da):
start=from_da.strftime("%Y-%m-%d")
flag=from_da.strftime('%m')
while(from_da.strftime('%m')==flag):
flag=from_da.strftime('%m')
end=from_da.strftime("%Y-%m-%d")
if flag==to_da.strftime('%m'):
end=to_da.strftime("%Y-%m-%d")
from_da+=timedelta(1)
future_months.append({'start':start,'end':end})
else:
frappe.throw(_("From and To dates are required"))
if filters.get("item"):
item_condition += " item_code = '%s'" % filters["item"]
if len(months)>4:
frappe.throw("Difference between Date FROM and TO must not more than 3.")
# if len(future_months)>3:
# frappe.throw("Difference between Date FROM and TO must not more than 3.")
return conditions,months+future_months[:2],item_condition,future_months | 7,297 | 0 | 184 |
befe7284b3671cc89b1f93952be62824f00a39e3 | 8,595 | py | Python | graph_partitioning/partitioners/scotch/lib_scotch.py | sbarakat/algorithmshop-graph-partitioning | db575ce585e2de0df4b0d944c24777cabc2146a3 | [
"MIT"
] | 13 | 2017-03-26T13:47:51.000Z | 2021-01-29T14:01:30.000Z | graph_partitioning/partitioners/scotch/lib_scotch.py | sbarakat/algorithmshop-graph-partitioning | db575ce585e2de0df4b0d944c24777cabc2146a3 | [
"MIT"
] | null | null | null | graph_partitioning/partitioners/scotch/lib_scotch.py | sbarakat/algorithmshop-graph-partitioning | db575ce585e2de0df4b0d944c24777cabc2146a3 | [
"MIT"
] | 7 | 2017-03-21T14:01:26.000Z | 2021-07-28T10:26:42.000Z | import ctypes # used for accessing the dynamic library
import graph_partitioning.partitioners.utils as putils # used for some of the utilities functions
| 38.542601 | 259 | 0.665852 | import ctypes # used for accessing the dynamic library
import graph_partitioning.partitioners.utils as putils # used for some of the utilities functions
class LibScotch(putils.CLibInterface):
def __init__(self, libraryPath = None):
super().__init__(libraryPath=libraryPath)
def _getDefaultLibPath(self):
return putils.defaultSCOTCHLibraryPath()
def _loadLibraryFunctions(self):
# *****************
# structures & data
# *****************
# These describe the type of object to be created
self.SCOTCH_Arch = ctypes.c_double*128
self.SCOTCH_Graph = ctypes.c_double*128
self.SCOTCH_Strat = ctypes.c_double*128
# These store the scotch data objects (ie. graph = SCOTCH_Graph())
self.architecture = None
self.graph = None
self.strategy = None
self.SCOTCH_version = self.clib.SCOTCH_version
self.SCOTCH_version.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)]
# SCOTCH_archAlloc
self.SCOTCH_archAlloc = self.clib.SCOTCH_archAlloc
#self.SCOTCH_archAlloc.argtypes = [ None ]
# SCOTCH_archInit
self.SCOTCH_archInit = self.clib.SCOTCH_archInit
self.SCOTCH_archInit.argtypes = [ctypes.POINTER(self.SCOTCH_Arch)]
# SCOTCH_archExit
self.SCOTCH_archExit = self.clib.SCOTCH_archExit
self.SCOTCH_archExit.argtypes = [ctypes.POINTER(self.SCOTCH_Arch)]
# SCOTCH_archCmplt - builds architecture for partitioning
self.SCOTCH_archCmplt = self.clib.SCOTCH_archCmplt
self.SCOTCH_archCmplt.argtypes = [ctypes.POINTER(self.SCOTCH_Arch), ctypes.c_int]
# SCOTCH_graphAlloc
self.SCOTCH_graphAlloc = self.clib.SCOTCH_graphAlloc
#self.SCOTCH_graphAlloc.argtypes = [ None ]
# SCOTCH_graphInit
self.SCOTCH_graphInit = self.clib.SCOTCH_graphInit
self.SCOTCH_graphInit.argtypes = [ctypes.POINTER(self.SCOTCH_Graph)]
# SCOTCH_graphExit
self.SCOTCH_graphExit = self.clib.SCOTCH_graphExit
self.SCOTCH_graphExit.argtypes = [ctypes.POINTER(self.SCOTCH_Graph)]
# SCOTCH_graphCheck
self.SCOTCH_graphCheck = self.clib.SCOTCH_graphCheck
self.SCOTCH_graphCheck.argtypes = [ctypes.POINTER(self.SCOTCH_Graph)]
# SCOTCH_graphBuild
self.SCOTCH_graphBuild = self.clib.SCOTCH_graphBuild
self.SCOTCH_graphBuild.argtypes = [
ctypes.POINTER(self.SCOTCH_Graph), ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p
]
# SCOTCH_stratAlloc
self.SCOTCH_stratAlloc = self.clib.SCOTCH_stratAlloc
#self.SCOTCH_stratAlloc.argtypes = [ None ]
# SCOTCH_stratInit
self.SCOTCH_stratInit = self.clib.SCOTCH_stratInit
self.SCOTCH_stratInit.argtypes = [ctypes.POINTER(self.SCOTCH_Strat)]
self.SCOTCH_stratExit = self.clib.SCOTCH_stratExit
self.SCOTCH_stratExit.argtypes = [ctypes.POINTER(self.SCOTCH_Strat)]
self.SCOTCH_stratGraphMap = self.clib.SCOTCH_stratGraphMap
self.SCOTCH_stratGraphMap.argtypes = [ctypes.POINTER(self.SCOTCH_Strat), ctypes.c_char_p]
self.SCOTCH_stratGraphMapBuild = self.clib.SCOTCH_stratGraphMapBuild
self.SCOTCH_stratGraphMapBuild.argtypes = [ctypes.POINTER(self.SCOTCH_Strat), ctypes.c_int, ctypes.c_int, ctypes.c_double]
# MAPPING Functions
self.SCOTCH_graphMap = self.clib.SCOTCH_graphMap
self.SCOTCH_graphMap.argtypes = [ctypes.POINTER(self.SCOTCH_Graph), ctypes.POINTER(self.SCOTCH_Arch), ctypes.POINTER(self.SCOTCH_Strat), ctypes.c_void_p]
self.SCOTCH_graphMapFixed = self.clib.SCOTCH_graphMapFixed
self.SCOTCH_graphMapFixed.argtypes = [ctypes.POINTER(self.SCOTCH_Graph), ctypes.POINTER(self.SCOTCH_Arch), ctypes.POINTER(self.SCOTCH_Strat), ctypes.c_void_p]
def isLoaded(self):
if self.clib is None:
return False
return True
def version(self):
major_ptr = ctypes.c_int(0)
relative_ptr = ctypes.c_int(0)
patch_ptr = ctypes.c_int(0)
ret = self.SCOTCH_version(major_ptr, relative_ptr, patch_ptr)
return "{}.{}.{}".format(major_ptr.value, relative_ptr.value, patch_ptr.value)
def createSCOTCHArch(self):
#self.SCOTCH_Arch = self.SCOTCH_archAlloc()
#print(self.SCOTCH_Arch)
self.architecture = self.SCOTCH_Arch()
ret = self.SCOTCH_archInit(self.architecture)
if(ret == 0):
return True
return False
def deleteSCOTCHStrat(self):
self.SCOTCH_stratExit(self.strategy)
del self.strategy
self.strategy = None
def deleteSCOTCHArch(self):
self.SCOTCH_archExit(self.architecture)
del self.architecture
self.architecture = None
def populatePartitionArchitecture(self, numPartitions):
if(self.architecture is None):
return False
if(isinstance(numPartitions, int)):
ret = self.SCOTCH_archCmplt(self.architecture, numPartitions)
if(ret == 0):
return True
return False
def createSCOTCHGraph(self):
#self.SCOTCH_Graph = self.SCOTCH_graphAlloc()
self.graph = self.SCOTCH_Graph()
ret = self.SCOTCH_graphInit(self.graph)
if(ret == 0):
return True
return False
def buildSCOTCHGraphFromData(self, scotchData):
#if isinstance(scotchData, scotchio.ScotchGraphArrays) == False:
# return False
if self.graph is None:
if(self.createSCOTCHGraph() == False):
return False
if scotchData._vlbltab is None:
success = self.SCOTCH_graphBuild(self.graph, scotchData.baseval, scotchData.vertnbr, scotchData._verttab.ctypes, 0, scotchData._velotab.ctypes, 0, scotchData.edgenbr, scotchData._edgetab.ctypes, scotchData._edlotab.ctypes)
else:
#print('SCOTCH.py, using vlbltab array')
success = self.SCOTCH_graphBuild(self.graph, scotchData.baseval, scotchData.vertnbr, scotchData._verttab.ctypes, 0, scotchData._velotab.ctypes, scotchData._vlbltab.ctypes, scotchData.edgenbr, scotchData._edgetab.ctypes, scotchData._edlotab.ctypes)
if success == 0:
return True
return False
def deleteSCOTCHGraph(self):
# TODO write test for this
self.SCOTCH_graphExit(self.graph)
del self.graph
self.graph = None
def scotchGraphValid(self):
# TODO write test for this
ret = self.SCOTCH_graphCheck(self.graph)
if(ret == 0):
return True
return False
def createStrategy(self):
self.strategy = self.SCOTCH_Strat()
ret = self.SCOTCH_stratInit(self.strategy)
if ret == 0:
return True
return False
def setStrategyGraphMapBuild(self, straval, partitionNbr, kbalval = 0.1):
ret = self.SCOTCH_stratGraphMapBuild(self.strategy, straval, partitionNbr, kbalval)
if ret == 0:
return True
return False
def setStrategyFlags(self, strategyFlags):
if(isinstance(strategyFlags, str) == False):
strategyFlags = ''
# Note: must encode the string as that returns a bytecode equivalent
success = self.SCOTCH_stratGraphMap(self.strategy, strategyFlags.encode('utf-8'))
if(success == 0):
return True
return False
def createSCOTCHGraphMapStrategy(self, strategyFlags):
#self.strategy = self.SCOTCH_stratAlloc()
self.strategy = self.SCOTCH_Strat()
ret = self.SCOTCH_stratInit(self.strategy)
if(ret == 0):
if(isinstance(strategyFlags, str) == False):
strategyFlags = ''
# Note: must encode the string as that returns a bytecode equivalent
success = self.SCOTCH_stratGraphMap(self.strategy, strategyFlags.encode('utf-8'))
if(success == 0):
return True
return False
def graphMap(self, parttab):
ret = self.SCOTCH_graphMap(self.graph, self.architecture, self.strategy, parttab.ctypes)
if ret == 0:
return True
return False
def graphMapFixed(self, parttab):
ret = self.SCOTCH_graphMapFixed(self.graph, self.architecture, self.strategy, parttab.ctypes)
if ret == 0:
return True
return False
| 7,888 | 17 | 535 |
839af60c763401f583944e34ff504a964a34c2ce | 1,055 | py | Python | tool/offline_job_info_generator.py | yamanalab/DAMCREM | 8064613b799efee1a4896b1e60488312368183ab | [
"Apache-2.0"
] | null | null | null | tool/offline_job_info_generator.py | yamanalab/DAMCREM | 8064613b799efee1a4896b1e60488312368183ab | [
"Apache-2.0"
] | null | null | null | tool/offline_job_info_generator.py | yamanalab/DAMCREM | 8064613b799efee1a4896b1e60488312368183ab | [
"Apache-2.0"
] | null | null | null | # coding: UTF-8
import sys
import os
import numpy as np
# unit is [us].
if __name__ == "__main__":
argc = len(sys.argv)
# 単位はus
dirname = sys.argv[1]
mu = int(sys.argv[2])
M = int(sys.argv[3])
N = int(sys.argv[4])
for trial in range(N):
print(run(dirname, mu, M, trial))
pass
| 23.444444 | 90 | 0.566825 | # coding: UTF-8
import sys
import os
import numpy as np
# unit is [us].
def generate_dt(mu, M):
return -mu * np.log(1-np.random.random(M))
def generate(mu, M):
dt = generate_dt(mu, M)
result = np.zeros([M], dtype=np.float)
result[0] = dt[0] + 1000000
for i in range(1, M):
result[i] = result[i-1] + dt[i]
return result
def run(dirname, mu, M, trial):
filename = os.path.join(dirname, "received_time_M{}_mu{}_{}.txt".format(M, mu, trial))
result = generate(mu, M)
if os.path.exists(filename):
print("{} already exists.".format(filename), file=sys.stderr)
exit(1)
with open(filename, "w") as f:
print("# {}[us], {}, {}".format(mu, M, trial), file=f)
for t in result:
print(t, file=f)
return filename
if __name__ == "__main__":
argc = len(sys.argv)
# 単位はus
dirname = sys.argv[1]
mu = int(sys.argv[2])
M = int(sys.argv[3])
N = int(sys.argv[4])
for trial in range(N):
print(run(dirname, mu, M, trial))
pass
| 657 | 0 | 69 |
be70bdf5c7d2fda78c2ae2cf24b3e324863dde56 | 174 | py | Python | encuestas/encuesta/apps.py | davidbmx/encuestas | 3a80a970fecd477e61ea0a51e4b3787226cbea19 | [
"MIT"
] | null | null | null | encuestas/encuesta/apps.py | davidbmx/encuestas | 3a80a970fecd477e61ea0a51e4b3787226cbea19 | [
"MIT"
] | null | null | null | encuestas/encuesta/apps.py | davidbmx/encuestas | 3a80a970fecd477e61ea0a51e4b3787226cbea19 | [
"MIT"
] | null | null | null | #Django
from django.apps import AppConfig
class EncuestasAppConfig(AppConfig):
"""Encuestas app config"""
name = 'encuestas.encuesta'
verbose_name = 'Encuestas' | 21.75 | 36 | 0.729885 | #Django
from django.apps import AppConfig
class EncuestasAppConfig(AppConfig):
"""Encuestas app config"""
name = 'encuestas.encuesta'
verbose_name = 'Encuestas' | 0 | 0 | 0 |
9ed73e407562b79ec431ff9e6ae1ee26f6ba03da | 402 | py | Python | PyMOTW/source/collections/collections_deque_rotate.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | PyMOTW/source/collections/collections_deque_rotate.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2020-07-18T03:52:03.000Z | 2020-07-18T04:18:01.000Z | PyMOTW/source/collections/collections_deque_rotate.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 2 | 2021-03-06T04:28:32.000Z | 2021-03-06T04:59:17.000Z | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Manipulating the order of items in a deque.
"""
#end_pymotw_header
import collections
d = collections.deque(range(10))
print('Normal :', d)
d = collections.deque(range(10))
d.rotate(2)
print('Right rotation:', d)
d = collections.deque(range(10))
d.rotate(-2)
print('Left rotation :', d)
| 18.272727 | 55 | 0.689055 | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Manipulating the order of items in a deque.
"""
#end_pymotw_header
import collections
d = collections.deque(range(10))
print('Normal :', d)
d = collections.deque(range(10))
d.rotate(2)
print('Right rotation:', d)
d = collections.deque(range(10))
d.rotate(-2)
print('Left rotation :', d)
| 0 | 0 | 0 |
eec9a49cf9c8c2f5bd04068ea05aa3b970a23638 | 4,887 | py | Python | curve_fitting/curve_fitting.py | tufts-ml/covid19-forecasting | b0e3eed6cc03a981598d8f0b7c6fe882310c710d | [
"MIT"
] | 3 | 2020-04-02T23:38:02.000Z | 2020-04-08T18:57:16.000Z | curve_fitting/curve_fitting.py | tufts-ml/covid19-forecasting | b0e3eed6cc03a981598d8f0b7c6fe882310c710d | [
"MIT"
] | 24 | 2020-04-03T13:58:28.000Z | 2021-04-27T02:12:07.000Z | curve_fitting/curve_fitting.py | tufts-ml/covid19-forecasting | b0e3eed6cc03a981598d8f0b7c6fe882310c710d | [
"MIT"
] | null | null | null | import numpy as np
import scipy
import scipy.optimize
import argparse
import sklearn.metrics
import matplotlib.pyplot as plt
import autograd.scipy
import autograd.numpy as ag_np
import autograd
import pandas as pd
## TODO:
# - Need a mapping from timesteps to dates
################## Functions for fitting data ########################
#####################################################################
############## loss calculation ###############
#####################################################################
FUNCTIONS = {'erf': erf, 'ag_erf': ag_erf}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--function', default='ag_erf')
parser.add_argument('--fit_type', default='cumulative')
parser.add_argument('--lower_bound', default='0.0')
parser.add_argument('--inputfile', default='input_example_mass_positives.csv')
parser.add_argument('--outputfile', default='output_example_mass_positives.csv')
args = parser.parse_args()
fun = FUNCTIONS[args.function]
fit_type = args.fit_type
lower_bound = float(args.lower_bound)
num_params = 3
seed_list = ag_np.arange(5)
x, dates, y = load_data(args.inputfile, fit_type)
best_loss = ag_np.inf
best_seed = 0
def calc_loss(params):
'''
Default loss is MSE.
'''
yhat = fun(x, *params)
loss = MSE(y, yhat)
return loss
for seed in seed_list:
ag_np.random.seed(seed)
initial_guess = ag_np.random.random(num_params)
result = scipy.optimize.minimize(
calc_loss,
initial_guess,
jac=autograd.grad(calc_loss),
method='l-bfgs-b',
constraints={},
# changing the lower bounds shifts the peak, we can explore
# this for the sake of confidence intervals.
bounds=[(0, ag_np.inf), (0, ag_np.inf), (np.max(y)*lower_bound, ag_np.inf)])
params = result.x
loss = calc_loss(params)
if loss < best_loss:
best_loss = loss
best_seed = seed
ag_np.random.seed(best_seed)
initial_guess = ag_np.random.random(num_params)
result = scipy.optimize.minimize(
calc_loss,
initial_guess,
jac=autograd.grad(calc_loss),
method='l-bfgs-b',
constraints={},
bounds=[(0, ag_np.inf), (0, ag_np.inf), (np.max(y)*lower_bound, ag_np.inf)])
params = result.x
save_results(x, y, fun, params, fit_type, dates, args.outputfile)
| 30.166667 | 112 | 0.565173 | import numpy as np
import scipy
import scipy.optimize
import argparse
import sklearn.metrics
import matplotlib.pyplot as plt
import autograd.scipy
import autograd.numpy as ag_np
import autograd
import pandas as pd
## TODO:
# - Need a mapping from timesteps to dates
################## Functions for fitting data ########################
def erf(t, alpha, beta, p):
return 0.5*p*(scipy.special.erf(alpha*(t - beta)) + 1.0)
def ag_erf(t, alpha, beta, p):
return 0.5*p*(autograd.scipy.special.erf(alpha*(t - beta)) + 1.0)
#####################################################################
def load_data(input_filename, fit_type):
df = pd.read_csv(input_filename)
if fit_type == 'cumulative':
cumulative = []
for i in range(df['rate'].shape[0]):
if i == 0:
cumulative.append(df['rate'][i])
else:
cumulative.append(df['rate'][i] + cumulative[i-1])
y = np.array(cumulative)
elif fit_type == 'log_rate':
y = np.log(np.array(df['rate']) + 1e-13)
x = ag_np.arange(y.shape[0], dtype=float)
dates = df['date']
return x, dates, y
def extend_forecast(x, dates, max_date):
pass
def calc_rate(y):
rate = []
for i in range(y.shape[0]):
if i == 0:
rate.append(y[i])
elif i > 0:
rate.append(y[i] - y[i -1])
return np.array(rate)
def calc_cumulative(y):
cumulative = []
for i in range(y.shape[0]):
if i == 0:
cumulative.append(y[i])
else:
cumulative.append(y[i] + cumulative[i-1])
return np.array(cumulative)
def save_results(x, y_true, fun, params, fit_type, dates, outputfile):
x_ext = ag_np.arange(x.shape[0] + 30) # extend prediction by a month
y = fun(x_ext, *params)
if fit_type == 'cumulative':
rate = calc_rate(y)
rate_true = calc_rate(y_true)
elif fit_type == 'log_rate':
rate = ag_np.exp(y)
rate_true = ag_np.exp(y_true)
plt.scatter(x, y_true, label='True data')
plt.plot(x_ext, y, label='Prediction')
plt.ylabel(fit_type)
plt.show()
plt.scatter(x, rate_true, label='True data')
plt.plot(x_ext, rate, label='Prediction')
plt.ylabel('rate')
plt.show()
# TODO: compute dates using mapping
# save results to outputfile as csv
df = pd.DataFrame({'date': x_ext,'rate': rate, 'cumulative': calc_cumulative(rate)})
df.to_csv(outputfile)
return x_ext, rate
############## loss calculation ###############
def MSE(y_true, y_hat):
return ag_np.sum(ag_np.power(y_true - y_hat, 2)) / y_true.shape[0]
#####################################################################
FUNCTIONS = {'erf': erf, 'ag_erf': ag_erf}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--function', default='ag_erf')
parser.add_argument('--fit_type', default='cumulative')
parser.add_argument('--lower_bound', default='0.0')
parser.add_argument('--inputfile', default='input_example_mass_positives.csv')
parser.add_argument('--outputfile', default='output_example_mass_positives.csv')
args = parser.parse_args()
fun = FUNCTIONS[args.function]
fit_type = args.fit_type
lower_bound = float(args.lower_bound)
num_params = 3
seed_list = ag_np.arange(5)
x, dates, y = load_data(args.inputfile, fit_type)
best_loss = ag_np.inf
best_seed = 0
def calc_loss(params):
'''
Default loss is MSE.
'''
yhat = fun(x, *params)
loss = MSE(y, yhat)
return loss
for seed in seed_list:
ag_np.random.seed(seed)
initial_guess = ag_np.random.random(num_params)
result = scipy.optimize.minimize(
calc_loss,
initial_guess,
jac=autograd.grad(calc_loss),
method='l-bfgs-b',
constraints={},
# changing the lower bounds shifts the peak, we can explore
# this for the sake of confidence intervals.
bounds=[(0, ag_np.inf), (0, ag_np.inf), (np.max(y)*lower_bound, ag_np.inf)])
params = result.x
loss = calc_loss(params)
if loss < best_loss:
best_loss = loss
best_seed = seed
ag_np.random.seed(best_seed)
initial_guess = ag_np.random.random(num_params)
result = scipy.optimize.minimize(
calc_loss,
initial_guess,
jac=autograd.grad(calc_loss),
method='l-bfgs-b',
constraints={},
bounds=[(0, ag_np.inf), (0, ag_np.inf), (np.max(y)*lower_bound, ag_np.inf)])
params = result.x
save_results(x, y, fun, params, fit_type, dates, args.outputfile)
| 1,993 | 0 | 184 |
109ba6a43658288de2594170db4de1ef331a0ed7 | 1,697 | py | Python | docs/source/proposals/np-where-override.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | 6,620 | 2015-01-04T08:51:04.000Z | 2022-03-31T12:52:18.000Z | docs/source/proposals/np-where-override.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | 6,457 | 2015-01-04T03:18:41.000Z | 2022-03-31T17:38:42.000Z | docs/source/proposals/np-where-override.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | 930 | 2015-01-25T02:33:03.000Z | 2022-03-30T14:10:32.000Z | import numpy as np
from numba.core import types
from numba.extending import overload
@overload(np.where)
def where(cond, x, y):
"""
Implement np.where().
"""
# Choose implementation based on argument types.
if isinstance(cond, types.Array):
# Array where() => return an array of the same shape
if all(ty.layout == 'C' for ty in (cond, x, y)):
def where_impl(cond, x, y):
"""
Fast implementation for C-contiguous arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
"""
Generic implementation for other arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
else:
def where_impl(cond, x, y):
"""
Scalar where() => return a 0-dim array
"""
scal = x if cond else y
return np.full_like(scal, scal)
return where_impl
| 32.634615 | 77 | 0.475545 | import numpy as np
from numba.core import types
from numba.extending import overload
@overload(np.where)
def where(cond, x, y):
"""
Implement np.where().
"""
# Choose implementation based on argument types.
if isinstance(cond, types.Array):
# Array where() => return an array of the same shape
if all(ty.layout == 'C' for ty in (cond, x, y)):
def where_impl(cond, x, y):
"""
Fast implementation for C-contiguous arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
"""
Generic implementation for other arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
else:
def where_impl(cond, x, y):
"""
Scalar where() => return a 0-dim array
"""
scal = x if cond else y
return np.full_like(scal, scal)
return where_impl
| 0 | 0 | 0 |
bf1d5832a330171a8f98c1fe7bacdfe8b23b8baa | 3,059 | py | Python | debug_tools.py | Obs01ete/pytorch-detection | 4af02e232b38fd202bb348e9bbe7373c7eba165b | [
"MIT"
] | 11 | 2018-07-24T09:31:19.000Z | 2021-04-07T06:20:38.000Z | debug_tools.py | Obs01ete/pytorch-detection | 4af02e232b38fd202bb348e9bbe7373c7eba165b | [
"MIT"
] | null | null | null | debug_tools.py | Obs01ete/pytorch-detection | 4af02e232b38fd202bb348e9bbe7373c7eba165b | [
"MIT"
] | 1 | 2019-07-10T05:48:15.000Z | 2019-07-10T05:48:15.000Z | import os
import cv2
import itertools
import numpy as np
def dump_images(
names, pil_images, annotations, detections, stats,
labelmap, dir):
"""
Dumps images with bbox overlays to disk.
:param names: batch of sample names
:param pil_images: batch of original PIL images
:param annotations: batch of annotations
:param detections: batch of detections from NN
:param stats: batch of debug info from a network. Keeps number of anchors that match particular GT box.
:param labelmap: names of classes
:param dir: destination directory to save images
:return: None
"""
det_color = (0, 255, 0)
anno_color = (255, 0, 0)
if annotations is None: annotations = []
if detections is None: detections = []
if stats is None: stats = []
try:
for ib, (name, pil_img, anno, detection, stat) in \
enumerate(itertools.zip_longest(names, pil_images, annotations, detections, stats)):
img = np.asarray(pil_img).copy()
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
scale = [img.shape[1], img.shape[0], img.shape[1], img.shape[0]]
if detection is not None:
for icls, cls_det in enumerate(detection):
for det in cls_det:
conf = det[0]
if conf > 0.0:
bbox = det[1:]
bbox_pix = bbox * scale
type = labelmap[icls]
cv2.rectangle(
img,
(int(bbox_pix[0]), int(bbox_pix[1])),
(int(bbox_pix[2]), int(bbox_pix[3])),
det_color, 1)
cv2.putText(
img,
'{} {:.2f}'.format(type, conf),
(int(bbox_pix[0]), int(bbox_pix[1])+10),
cv2.FONT_HERSHEY_SIMPLEX,
0.4,
det_color)
if anno is not None and stat is not None:
for obj, num_matches in zip(anno, stat):
bbox = obj['bbox']
bbox_pix = bbox * scale
cv2.rectangle(
img,
(int(bbox_pix[0]), int(bbox_pix[1])),
(int(bbox_pix[2]), int(bbox_pix[3])),
anno_color, 1)
cv2.putText(
img,
obj['type'] + " M{}".format(num_matches), # M - number of matching anchors
(int(bbox_pix[0]), int(bbox_pix[1])+10),
cv2.FONT_HERSHEY_SIMPLEX,
0.4,
anno_color)
filename = name + '.png'
cv2.imwrite(os.path.join(dir, filename), img)
pass
except Exception as e:
pass
pass
| 37.304878 | 107 | 0.459954 | import os
import cv2
import itertools
import numpy as np
def dump_images(
names, pil_images, annotations, detections, stats,
labelmap, dir):
"""
Dumps images with bbox overlays to disk.
:param names: batch of sample names
:param pil_images: batch of original PIL images
:param annotations: batch of annotations
:param detections: batch of detections from NN
:param stats: batch of debug info from a network. Keeps number of anchors that match particular GT box.
:param labelmap: names of classes
:param dir: destination directory to save images
:return: None
"""
det_color = (0, 255, 0)
anno_color = (255, 0, 0)
if annotations is None: annotations = []
if detections is None: detections = []
if stats is None: stats = []
try:
for ib, (name, pil_img, anno, detection, stat) in \
enumerate(itertools.zip_longest(names, pil_images, annotations, detections, stats)):
img = np.asarray(pil_img).copy()
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
scale = [img.shape[1], img.shape[0], img.shape[1], img.shape[0]]
if detection is not None:
for icls, cls_det in enumerate(detection):
for det in cls_det:
conf = det[0]
if conf > 0.0:
bbox = det[1:]
bbox_pix = bbox * scale
type = labelmap[icls]
cv2.rectangle(
img,
(int(bbox_pix[0]), int(bbox_pix[1])),
(int(bbox_pix[2]), int(bbox_pix[3])),
det_color, 1)
cv2.putText(
img,
'{} {:.2f}'.format(type, conf),
(int(bbox_pix[0]), int(bbox_pix[1])+10),
cv2.FONT_HERSHEY_SIMPLEX,
0.4,
det_color)
if anno is not None and stat is not None:
for obj, num_matches in zip(anno, stat):
bbox = obj['bbox']
bbox_pix = bbox * scale
cv2.rectangle(
img,
(int(bbox_pix[0]), int(bbox_pix[1])),
(int(bbox_pix[2]), int(bbox_pix[3])),
anno_color, 1)
cv2.putText(
img,
obj['type'] + " M{}".format(num_matches), # M - number of matching anchors
(int(bbox_pix[0]), int(bbox_pix[1])+10),
cv2.FONT_HERSHEY_SIMPLEX,
0.4,
anno_color)
filename = name + '.png'
cv2.imwrite(os.path.join(dir, filename), img)
pass
except Exception as e:
pass
pass
| 0 | 0 | 0 |
5d1fdef795f2ddcf487065b8229340dd1325c90f | 4,984 | py | Python | playhouse/tests/test_gfk.py | alexlatchford/peewee | f3795767f7b46c8b5335ea0257df2a1d269fc85b | [
"MIT"
] | 1 | 2017-04-27T15:04:48.000Z | 2017-04-27T15:04:48.000Z | playhouse/tests/test_gfk.py | alexlatchford/peewee | f3795767f7b46c8b5335ea0257df2a1d269fc85b | [
"MIT"
] | null | null | null | playhouse/tests/test_gfk.py | alexlatchford/peewee | f3795767f7b46c8b5335ea0257df2a1d269fc85b | [
"MIT"
] | 3 | 2019-02-07T04:16:40.000Z | 2021-05-02T17:07:18.000Z | from peewee import *
from playhouse.gfk import *
from playhouse.tests.base import database_initializer
from playhouse.tests.base import ModelTestCase
db = database_initializer.get_in_memory_database()
| 29.491124 | 90 | 0.57183 | from peewee import *
from playhouse.gfk import *
from playhouse.tests.base import database_initializer
from playhouse.tests.base import ModelTestCase
db = database_initializer.get_in_memory_database()
class BaseModel(Model):
class Meta:
database = db
def add_tag(self, tag):
t = Tag(tag=tag)
t.object = self
t.save()
return t
class Tag(BaseModel):
tag = CharField()
object_type = CharField(null=True)
object_id = IntegerField(null=True)
object = GFKField()
class Meta:
indexes = (
(('tag', 'object_type', 'object_id'), True),
)
order_by = ('tag',)
class Appetizer(BaseModel):
name = CharField()
tags = ReverseGFK(Tag)
class Entree(BaseModel):
name = CharField()
tags = ReverseGFK(Tag)
class Dessert(BaseModel):
name = CharField()
tags = ReverseGFK(Tag)
class GFKTestCase(ModelTestCase):
requires = [Tag, Appetizer, Entree, Dessert]
data = {
Appetizer: (
('wings', ('fried', 'spicy')),
('mozzarella sticks', ('fried', 'sweet')),
('potstickers', ('fried',)),
('edamame', ('salty',)),
),
Entree: (
('phad thai', ('spicy',)),
('fried chicken', ('fried', 'salty')),
('tacos', ('fried', 'spicy')),
),
Dessert: (
('sundae', ('sweet',)),
('churro', ('fried', 'sweet')),
)
}
def create(self):
for model, foods in self.data.items():
for name, tags in foods:
inst = model.create(name=name)
for tag in tags:
inst.add_tag(tag)
def test_creation(self):
t = Tag.create(tag='a tag')
t.object = t
t.save()
t_db = Tag.get(Tag.id == t.id)
self.assertEqual(t_db.object_id, t_db._get_pk_value())
self.assertEqual(t_db.object_type, 'tag')
self.assertEqual(t_db.object, t_db)
def test_querying(self):
self.create()
tacos = Entree.get(Entree.name == 'tacos')
tags = Tag.select().where(Tag.object == tacos).order_by(Tag.tag)
self.assertEqual([tag.tag for tag in tags], ['fried', 'spicy'])
def _test_get_create(self, method):
a = Appetizer.create(name='walrus mix')
tag, created = method(tag='walrus-food', object=a)
self.assertTrue(created)
self.assertEqual(tag.object, a)
tag_db = Tag.get(Tag.id == tag.id)
self.assertEqual(tag_db.object, a)
tag, created = method(tag='walrus-food', object=a)
self.assertFalse(created)
self.assertEqual(Tag.select().count(), 1)
self.assertEqual(tag, tag_db)
tag2, created = method(tag='walrus-treats', object=a)
self.assertTrue(created)
tag2_db = Tag.get(Tag.id == tag2.id)
self.assertEqual(tag2_db.tag, 'walrus-treats')
self.assertEqual(tag2_db.object, a)
b = Appetizer.create(name='walrus-meal')
tag3, created = method(tag='walrus-treats', object=b)
self.assertTrue(created)
tag3_db = Tag.get(Tag.id == tag3.id)
self.assertEqual(tag3_db.tag, 'walrus-treats')
self.assertEqual(tag3_db.object, b)
def test_get_or_create(self):
self._test_get_create(Tag.get_or_create)
def test_gfk_api(self):
self.create()
# test instance api
for model, foods in self.data.items():
for food, tags in foods:
inst = model.get(model.name == food)
self.assertEqual([t.tag for t in inst.tags], list(tags))
# test class api and ``object`` api
apps_tags = [(t.tag, t.object.name) for t in Appetizer.tags.order_by(Tag.id)]
data_tags = []
for food, tags in self.data[Appetizer]:
for t in tags:
data_tags.append((t, food))
self.assertEqual(apps_tags, data_tags)
def test_missing(self):
t = Tag.create(tag='sour')
self.assertEqual(t.object, None)
t.object_type = 'appetizer'
t.object_id = 1
# accessing the descriptor will raise a DoesNotExist
self.assertRaises(Appetizer.DoesNotExist, getattr, t, 'object')
t.object_type = 'unknown'
t.object_id = 1
self.assertRaises(AttributeError, getattr, t, 'object')
def test_set_reverse(self):
# assign query
e = Entree.create(name='phad thai')
s = Tag.create(tag='spicy')
p = Tag.create(tag='peanuts')
t = Tag.create(tag='thai')
b = Tag.create(tag='beverage')
e.tags = Tag.select().where(Tag.tag != 'beverage')
self.assertEqual([t.tag for t in e.tags], ['peanuts', 'spicy', 'thai'])
e = Entree.create(name='panang curry')
c = Tag.create(tag='coconut')
e.tags = [p, t, c, s]
self.assertEqual([t.tag for t in e.tags], ['coconut', 'peanuts', 'spicy', 'thai'])
| 3,376 | 1,264 | 138 |
da3ccc219e0c0291ed1548d14f91f164941b5b52 | 3,566 | py | Python | TFTStatsBot/cogs/champions.py | Bmbus/TFTStatsBot | 65da0c871c39b9b8bdabdefbcddbdff27e85fcea | [
"MIT"
] | null | null | null | TFTStatsBot/cogs/champions.py | Bmbus/TFTStatsBot | 65da0c871c39b9b8bdabdefbcddbdff27e85fcea | [
"MIT"
] | null | null | null | TFTStatsBot/cogs/champions.py | Bmbus/TFTStatsBot | 65da0c871c39b9b8bdabdefbcddbdff27e85fcea | [
"MIT"
] | null | null | null | from discord.ext import commands
import requests
from discord import Embed
from disputils import BotEmbedPaginator
| 45.139241 | 128 | 0.568144 | from discord.ext import commands
import requests
from discord import Embed
from disputils import BotEmbedPaginator
class Champions(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.BASEURL = "https://solomid-resources.s3.amazonaws.com/blitz/tft/data/champions.json"
@commands.group(name="champ", aliases=["champion", "champs"], invoke_without_command=True)
async def _champ(self, ctx):
"""Gets all available champions!
Can only be executed on a server
"""
row_data = requests.get(self.BASEURL)
data = row_data.json()
_champs = []
for i in data:
_champs.append(i)
champs = ", ".join(_champs)
embed = Embed(title="All available Champions:", description=f"```{champs}```")
return await ctx.send(embed=embed)
@_champ.command(name="info", aliases=["information"])
@commands.cooldown(1, 6.0, commands.BucketType.user)
async def _champ_info(self, ctx, *, name:str):
"""Gets information about a Champion!
This command can only be executed on a server.
"""
row_data = requests.get(url=self.BASEURL)
data = row_data.json()
_title = f"ChampInfo ~ {name}"
__items = data[name]['items']
embeds = [
# General
Embed(title=_title, description="__**General**__")
.add_field(name="Origin:", value=data[name]["origin"][0], inline=False)
.add_field(name="Class:", value=data[name]["class"][0], inline=False)
.add_field(name="Cost:", value=data[name]["cost"], inline=False)
.add_field(name="Items:", value=", ".join(__items), inline=False)
.set_thumbnail(url=self.get_champ_img(name)),
# Ability
Embed(title=_title, description="__**Ability**__")
.add_field(name="Name:", value=data[name]["ability"]["name"], inline=False)
.add_field(name="Description:", value=data[name]["ability"]["description"], inline=False)
.add_field(name="Type:", value=data[name]["ability"]["stats"][0]["type"], inline=False)
.add_field(name="Value:", value=data[name]["ability"]["stats"][0]["value"], inline=False)
.set_thumbnail(url=self.get_champ_img(name)),
# Stats
Embed(title=_title, description="__**Stats**__")
.add_field(name="Offense", value=f"**Damage:** {data[name]['stats']['offense']['damage']}\n"
f"**Attack Speed:** {data[name]['stats']['offense']['attackSpeed']}\n"
f"**Damage per second:** {data[name]['stats']['offense']['dps']}\n"
f"**Range:** {data[name]['stats']['offense']['range']}", inline=False)
.add_field(name="Defense", value=f"**Health:** {data[name]['stats']['defense']['health']}\n"
f"**Armor:** {data[name]['stats']['defense']['armor']}\n"
f"**Magic Resist:** {data[name]['stats']['defense']['magicResist']}", inline=False)
.set_thumbnail(url=self.get_champ_img(name))
]
paginator = BotEmbedPaginator(ctx, embeds)
return await paginator.run()
@staticmethod
def get_champ_img(name:str):
"""Returns the image of an champion"""
return f"https://ddragon.leagueoflegends.com/cdn/9.14.1/img/champion/{name}.png"
def setup(bot):
bot.add_cog(Champions(bot)) | 150 | 3,254 | 46 |
25e8763f549dbe42f23a96b48a41e83507ebf36a | 196 | py | Python | contrib/plugins/example/cranecli_plugin_example/__init__.py | friendliai/crane-public | 6f173ebc676f888f097e1d878b600a91a1637867 | [
"Apache-2.0"
] | 2 | 2022-03-13T16:30:34.000Z | 2022-03-13T17:01:17.000Z | contrib/plugins/example/cranecli_plugin_example/__init__.py | friendliai/crane-public | 6f173ebc676f888f097e1d878b600a91a1637867 | [
"Apache-2.0"
] | 1 | 2022-03-13T16:30:20.000Z | 2022-03-13T16:30:20.000Z | contrib/plugins/example/cranecli_plugin_example/__init__.py | friendliai/crane-public | 6f173ebc676f888f097e1d878b600a91a1637867 | [
"Apache-2.0"
] | null | null | null | import typer
app = typer.Typer()
@app.callback('example_plugin')
def check_cmd_group():
"""Example plugin."""
@app.command("first_command")
def _first_command():
"""Example command."""
| 16.333333 | 31 | 0.683673 | import typer
app = typer.Typer()
@app.callback('example_plugin')
def check_cmd_group():
"""Example plugin."""
@app.command("first_command")
def _first_command():
"""Example command."""
| 0 | 0 | 0 |
8e3971f397a3ef8f4a832739b918a40f05532204 | 8,841 | py | Python | src/state_test.py | mapto/- | 532ec719c44eaad405d1bd7b339e92ecbdbe9021 | [
"MIT"
] | null | null | null | src/state_test.py | mapto/- | 532ec719c44eaad405d1bd7b339e92ecbdbe9021 | [
"MIT"
] | null | null | null | src/state_test.py | mapto/- | 532ec719c44eaad405d1bd7b339e92ecbdbe9021 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
import pytest # type: ignore
from state import Board, Piece, GameState, GameMove
import dataclasses, json
def test_2_players_board_init(monkeypatch):
"""Make sure if we have just two players in a 4 corner board for them
to be at the opposite corners instead of next to each other.
"""
board = Board.create([1, 3])
# Redundant asserts
assert board.players == [1, 3]
# Defaults asserts
assert board.pieces_per_player == 4
assert board.board_sides == 4
assert board.board_side_length == 14
assert board.finish_zone_length == 5
# Consistency asserts
assert board.player_shift == board.board_side_length * board.board_sides // len(
board.players
)
assert board.path_zone_length == len(board.players) * board.player_shift
assert (
board.end_progress
== board.player_shift * len(board.players) + board.finish_zone_length + 1
)
assert len(board.pieces) == len(board.players) * board.pieces_per_player
# Explicit asserts
assert board.pieces == [
Piece(0, 1, 0),
Piece(1, 1, 0),
Piece(2, 1, 0),
Piece(3, 1, 0),
Piece(0, 3, 0),
Piece(1, 3, 0),
Piece(2, 3, 0),
Piece(3, 3, 0),
]
def test_3_players_6_corner_board_init(monkeypatch):
"""Make sure if we have just 3 players in a 5 corner board for them
to be at the opposite corners instead of next to each other.
"""
board = Board.create([0, 2, 3], board_sides=6, board_side_length=9)
# Redundant asserts
assert board.players == [0, 2, 3]
assert board.board_sides == 6
assert board.board_side_length == 9
# Defaults asserts
assert board.finish_zone_length == 5
assert board.pieces_per_player == 4
# Consistency asserts
assert board.player_shift == board.board_side_length * board.board_sides // len(
board.players
)
assert board.path_zone_length == len(board.players) * board.player_shift
# end_progress == path_zone_length + finish_zone_length + 1 THAT IS
# end_progress == (board_sides * board_side_length) + finish_zone_length + 1
assert (
board.end_progress
== board.player_shift * len(board.players) + board.finish_zone_length + 1
)
assert len(board.pieces) == len(board.players) * board.pieces_per_player
# Explicit asserts
assert board.pieces == [
Piece(0, 0, 0),
Piece(1, 0, 0),
Piece(2, 0, 0),
Piece(3, 0, 0),
Piece(0, 2, 0),
Piece(1, 2, 0),
Piece(2, 2, 0),
Piece(3, 2, 0),
Piece(0, 3, 0),
Piece(1, 3, 0),
Piece(2, 3, 0),
Piece(3, 3, 0),
]
| 29.767677 | 86 | 0.651058 | #!/usr/bin/env python3
# coding: utf-8
import pytest # type: ignore
from state import Board, Piece, GameState, GameMove
import dataclasses, json
def test_default_board_init(monkeypatch):
board = Board.create()
# Defaults asserts
assert board.players == [0, 1, 2, 3]
assert board.pieces_per_player == 4
assert board.board_sides == 4
assert board.board_side_length == 14
assert board.finish_zone_length == 5
# Consistency asserts
assert board.player_shift == 14
assert (
board.end_progress
== board.player_shift * len(board.players) + board.finish_zone_length + 1
)
assert len(board.pieces) == len(board.players) * board.pieces_per_player
# Explicit asserts
assert list(filter(lambda p: p.player == 0, board.pieces)) == [
Piece(0, 0, 0),
Piece(1, 0, 0),
Piece(2, 0, 0),
Piece(3, 0, 0),
]
assert list(filter(lambda p: p.player == 1, board.pieces)) == [
Piece(0, 1, 0),
Piece(1, 1, 0),
Piece(2, 1, 0),
Piece(3, 1, 0),
]
assert list(filter(lambda p: p.player == 2, board.pieces)) == [
Piece(0, 2, 0),
Piece(1, 2, 0),
Piece(2, 2, 0),
Piece(3, 2, 0),
]
assert list(filter(lambda p: p.player == 3, board.pieces)) == [
Piece(0, 3, 0),
Piece(1, 3, 0),
Piece(2, 3, 0),
Piece(3, 3, 0),
]
def test_2_players_board_init(monkeypatch):
"""Make sure if we have just two players in a 4 corner board for them
to be at the opposite corners instead of next to each other.
"""
board = Board.create([1, 3])
# Redundant asserts
assert board.players == [1, 3]
# Defaults asserts
assert board.pieces_per_player == 4
assert board.board_sides == 4
assert board.board_side_length == 14
assert board.finish_zone_length == 5
# Consistency asserts
assert board.player_shift == board.board_side_length * board.board_sides // len(
board.players
)
assert board.path_zone_length == len(board.players) * board.player_shift
assert (
board.end_progress
== board.player_shift * len(board.players) + board.finish_zone_length + 1
)
assert len(board.pieces) == len(board.players) * board.pieces_per_player
# Explicit asserts
assert board.pieces == [
Piece(0, 1, 0),
Piece(1, 1, 0),
Piece(2, 1, 0),
Piece(3, 1, 0),
Piece(0, 3, 0),
Piece(1, 3, 0),
Piece(2, 3, 0),
Piece(3, 3, 0),
]
def test_3_players_6_corner_board_init(monkeypatch):
"""Make sure if we have just 3 players in a 5 corner board for them
to be at the opposite corners instead of next to each other.
"""
board = Board.create([0, 2, 3], board_sides=6, board_side_length=9)
# Redundant asserts
assert board.players == [0, 2, 3]
assert board.board_sides == 6
assert board.board_side_length == 9
# Defaults asserts
assert board.finish_zone_length == 5
assert board.pieces_per_player == 4
# Consistency asserts
assert board.player_shift == board.board_side_length * board.board_sides // len(
board.players
)
assert board.path_zone_length == len(board.players) * board.player_shift
# end_progress == path_zone_length + finish_zone_length + 1 THAT IS
# end_progress == (board_sides * board_side_length) + finish_zone_length + 1
assert (
board.end_progress
== board.player_shift * len(board.players) + board.finish_zone_length + 1
)
assert len(board.pieces) == len(board.players) * board.pieces_per_player
# Explicit asserts
assert board.pieces == [
Piece(0, 0, 0),
Piece(1, 0, 0),
Piece(2, 0, 0),
Piece(3, 0, 0),
Piece(0, 2, 0),
Piece(1, 2, 0),
Piece(2, 2, 0),
Piece(3, 2, 0),
Piece(0, 3, 0),
Piece(1, 3, 0),
Piece(2, 3, 0),
Piece(3, 3, 0),
]
def test_custom_board_init(monkeypatch):
board = Board.create([0, 1, 2, 3, 4], 1, 5, 10, 3)
# Redundant asserts
assert board.players == [0, 1, 2, 3, 4]
assert board.pieces_per_player == 1
assert board.board_sides == 5
assert board.board_side_length == 10
assert board.finish_zone_length == 3
# Consistency asserts
assert board.player_shift == board.board_side_length * board.board_sides // len(
board.players
)
assert board.path_zone_length == len(board.players) * board.player_shift
assert (
board.end_progress
== board.player_shift * len(board.players) + board.finish_zone_length + 1
)
assert len(board.pieces) == len(board.players) * board.pieces_per_player
# Explicit asserts
assert board.pieces == [
Piece(0, 0, 0),
Piece(0, 1, 0),
Piece(0, 2, 0),
Piece(0, 3, 0),
Piece(0, 4, 0),
]
def test_negative_create_wrong_players_board(monkeypatch):
# player index bigger then the board
# with pytest.raises(Exception):
# board = Board.create(players=[6, 1], board_sides=5)
# board with no players
with pytest.raises(Exception):
Board.create([])
# board with duplicate players
with pytest.raises(Exception):
Board.create([1, 1])
# board with too many players
with pytest.raises(Exception):
Board.create([0, 1, 2], board_sides=2)
def test_state_next_player(monkeypatch):
board = Board.create([0, 1, 3, 5])
state = GameState.create(board)
assert state.current_player == 0
# assert state.next_player() == 1
state.current_player = 1
# assert state.next_player() == 3
state.current_player = 3
# assert state.next_player() == 5
state.current_player = 5
# assert state.next_player() == 0
def test_game_state_defaults(monkeypatch):
board = Board.create()
state = GameState.create(board)
assert state.board == board
assert state.number == 0
assert state.dice == -1
assert state.winners == []
assert state.current_player == 0
assert state.valid_actions == [GameMove.roll_dice(player=0)]
def test_board_to_json(monkeypatch):
board = Board.create()
board_json = json.dumps(dataclasses.asdict(board))
# print(board_json) TODO: compare expected output
state = GameState.create(board)
state_json = json.dumps(dataclasses.asdict(state))
# print(state_json) TODO: compare expected output
def test_board_relative_position():
board = Board.create()
# Test relative position for each player
rel_pos_p0 = board.relative_position(piece=Piece(number=0, player=0, progress=20))
assert rel_pos_p0 == 20
rel_pos_p1 = board.relative_position(piece=Piece(number=0, player=1, progress=20))
assert rel_pos_p1 == 34
rel_pos_p2 = board.relative_position(piece=Piece(number=0, player=2, progress=20))
assert rel_pos_p2 == 48
rel_pos_p3 = board.relative_position(piece=Piece(number=0, player=3, progress=20))
assert rel_pos_p3 == 6
# Test a position outside of path_zone
with pytest.raises(Exception):
board.relative_position(piece=Piece(number=0, player=0, progress=61))
def test_board_is_on_start():
board = Board.create()
p0_on_start = board.is_on_start(piece=Piece(number=0, player=0, progress=0))
assert p0_on_start
p0_on_start = board.is_on_start(piece=Piece(number=0, player=0, progress=1))
assert not p0_on_start
p0_on_start = board.is_on_start(piece=Piece(number=0, player=0, progress=2))
assert not p0_on_start
def test_board_is_on_path():
board = Board.create()
p0_on_path = board.is_on_path(piece=Piece(number=0, player=0, progress=0))
assert not p0_on_path
p0_on_path = board.is_on_path(piece=Piece(number=0, player=0, progress=1))
assert p0_on_path
p0_on_path = board.is_on_path(piece=Piece(number=0, player=0, progress=10))
assert p0_on_path
p0_on_path = board.is_on_path(piece=Piece(number=0, player=0, progress=61))
assert not p0_on_path
def test_board_is_on_finish():
board = Board.create()
p0_on_finish = board.is_on_finish(piece=Piece(number=0, player=0, progress=56))
assert not p0_on_finish
p0_on_finish = board.is_on_finish(piece=Piece(number=0, player=0, progress=61))
assert p0_on_finish
p0_on_finish = board.is_on_finish(piece=Piece(number=0, player=0, progress=62))
assert not p0_on_finish
def test_board_is_on_target():
board = Board.create()
p0_on_target = board.is_on_target(piece=Piece(number=0, player=0, progress=61))
assert not p0_on_target
p0_on_target = board.is_on_target(piece=Piece(number=0, player=0, progress=62))
assert p0_on_target
p0_on_target = board.is_on_target(piece=Piece(number=0, player=0, progress=66))
assert not p0_on_target
| 5,855 | 0 | 253 |
c2b3c5c10be5ab1bf07ac062a8e5b7ccb3107bd2 | 697 | py | Python | hcipy/atmosphere/__init__.py | kian1377/hcipy | f398e82797b3adbc263e9a35d9389ba7b62342f2 | [
"MIT"
] | 55 | 2018-06-29T01:13:26.000Z | 2022-03-13T09:18:06.000Z | hcipy/atmosphere/__init__.py | kian1377/hcipy | f398e82797b3adbc263e9a35d9389ba7b62342f2 | [
"MIT"
] | 121 | 2018-06-12T05:01:05.000Z | 2022-02-10T20:11:13.000Z | hcipy/atmosphere/__init__.py | kian1377/hcipy | f398e82797b3adbc263e9a35d9389ba7b62342f2 | [
"MIT"
] | 21 | 2018-07-09T11:01:29.000Z | 2022-03-15T02:47:24.000Z | __all__ = [
'MultiLayerAtmosphere',
'AtmosphericLayer',
'phase_covariance_von_karman',
'phase_structure_function_von_karman',
'power_spectral_density_von_karman',
'Cn_squared_from_fried_parameter',
'fried_parameter_from_Cn_squared',
'seeing_to_fried_parameter',
'fried_parameter_to_seeing',
'FiniteAtmosphericLayer',
'InfiniteAtmosphericLayer',
'ModalAdaptiveOpticsLayer',
'make_standard_atmospheric_layers',
'make_las_campanas_atmospheric_layers'
]
from .atmospheric_model import *
from .finite_atmospheric_layer import *
from .infinite_atmospheric_layer import *
from .modal_adaptive_optics_layer import *
from .standard_atmosphere import *
| 30.304348 | 42 | 0.790531 | __all__ = [
'MultiLayerAtmosphere',
'AtmosphericLayer',
'phase_covariance_von_karman',
'phase_structure_function_von_karman',
'power_spectral_density_von_karman',
'Cn_squared_from_fried_parameter',
'fried_parameter_from_Cn_squared',
'seeing_to_fried_parameter',
'fried_parameter_to_seeing',
'FiniteAtmosphericLayer',
'InfiniteAtmosphericLayer',
'ModalAdaptiveOpticsLayer',
'make_standard_atmospheric_layers',
'make_las_campanas_atmospheric_layers'
]
from .atmospheric_model import *
from .finite_atmospheric_layer import *
from .infinite_atmospheric_layer import *
from .modal_adaptive_optics_layer import *
from .standard_atmosphere import *
| 0 | 0 | 0 |
640d992c68c2edd7b063e2365b93402278063ebb | 2,794 | py | Python | trait_documenter/module_trait_documenter.py | enthought/trait-documenter | b1da37986008d2558a0e9b13b13c7a75e7b15c7a | [
"BSD-3-Clause"
] | null | null | null | trait_documenter/module_trait_documenter.py | enthought/trait-documenter | b1da37986008d2558a0e9b13b13c7a75e7b15c7a | [
"BSD-3-Clause"
] | 18 | 2015-07-21T17:35:25.000Z | 2021-06-15T07:15:40.000Z | trait_documenter/module_trait_documenter.py | enthought/trait-documenter | b1da37986008d2558a0e9b13b13c7a75e7b15c7a | [
"BSD-3-Clause"
] | null | null | null | # ---------------------------------------------------------------------------
#
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# ---------------------------------------------------------------------------
from __future__ import unicode_literals
from sphinx.ext.autodoc import (
ModuleLevelDocumenter, ModuleDocumenter, annotation_option, SUPPRESS)
from .util import get_trait_definition, DefinitionError
class ModuleTraitDocumenter(ModuleLevelDocumenter):
""" Specialised Documenter subclass for module level traits.
The class defines a new documenter that recovers the trait definition
signature of class level traits.
"""
objtype = 'data'
member_order = 40
option_spec = dict(ModuleLevelDocumenter.option_spec)
option_spec["annotation"] = annotation_option
# must be higher than other data documenters
priority = -5
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
""" Check that the documented member is a trait instance.
"""
return (
isattr and
hasattr(member, 'as_ctrait') and
isinstance(parent, ModuleDocumenter))
def document_members(self, all_members=False):
""" Trait attributes have no members """
def add_directive_header(self, sig):
""" Add the sphinx directives.
Add the 'attribute' directive with the annotation option
set to the trait definition.
"""
ModuleLevelDocumenter.add_directive_header(self, sig)
if hasattr(self, 'get_sourcename'):
sourcename = self.get_sourcename()
else:
sourcename = '<autodoc>'
if not self.options.annotation:
try:
definition = get_trait_definition(
self.parent, self.object_name)
except DefinitionError as error:
self.directive.warn(error.args[0])
return
self.add_line(
' :annotation: = {0}'.format(definition), sourcename)
elif self.options.annotation is SUPPRESS:
pass
else:
self.add_line(
' :annotation: %s' % self.options.annotation, sourcename)
| 34.925 | 80 | 0.6267 | # ---------------------------------------------------------------------------
#
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# ---------------------------------------------------------------------------
from __future__ import unicode_literals
from sphinx.ext.autodoc import (
ModuleLevelDocumenter, ModuleDocumenter, annotation_option, SUPPRESS)
from .util import get_trait_definition, DefinitionError
class ModuleTraitDocumenter(ModuleLevelDocumenter):
""" Specialised Documenter subclass for module level traits.
The class defines a new documenter that recovers the trait definition
signature of class level traits.
"""
objtype = 'data'
member_order = 40
option_spec = dict(ModuleLevelDocumenter.option_spec)
option_spec["annotation"] = annotation_option
# must be higher than other data documenters
priority = -5
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
""" Check that the documented member is a trait instance.
"""
return (
isattr and
hasattr(member, 'as_ctrait') and
isinstance(parent, ModuleDocumenter))
def document_members(self, all_members=False):
""" Trait attributes have no members """
def add_content(self, more_content, no_docstring=False):
# Never try to get a docstring from the trait object.
ModuleLevelDocumenter.add_content(self, more_content, no_docstring=True)
def add_directive_header(self, sig):
""" Add the sphinx directives.
Add the 'attribute' directive with the annotation option
set to the trait definition.
"""
ModuleLevelDocumenter.add_directive_header(self, sig)
if hasattr(self, 'get_sourcename'):
sourcename = self.get_sourcename()
else:
sourcename = '<autodoc>'
if not self.options.annotation:
try:
definition = get_trait_definition(
self.parent, self.object_name)
except DefinitionError as error:
self.directive.warn(error.args[0])
return
self.add_line(
' :annotation: = {0}'.format(definition), sourcename)
elif self.options.annotation is SUPPRESS:
pass
else:
self.add_line(
' :annotation: %s' % self.options.annotation, sourcename)
| 178 | 0 | 27 |
c86e3d3957b37e243483942d1307c1790c27f327 | 1,478 | py | Python | photos/views.py | TracyOgutu/PersonalGallery | e1c856b2ae97beaa9a0d863bf5577566f0561648 | [
"Unlicense"
] | null | null | null | photos/views.py | TracyOgutu/PersonalGallery | e1c856b2ae97beaa9a0d863bf5577566f0561648 | [
"Unlicense"
] | 4 | 2020-06-06T00:18:34.000Z | 2021-09-08T01:32:07.000Z | photos/views.py | TracyOgutu/PersonalGallery | e1c856b2ae97beaa9a0d863bf5577566f0561648 | [
"Unlicense"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from .models import Image
# Create your views here.
| 36.95 | 111 | 0.716509 | from django.shortcuts import render
from django.http import HttpResponse
from .models import Image
# Create your views here.
def welcome(request):
photos = Image.display_photos()
return render(request, 'welcome.html',{"photos":photos})
def singlephoto(request,photoid):
try:
singlephoto=Image.objects.get(id=photoid)
except DoesNotExist:
raise Http404()
return render(request,'photodetail.html',{"photo":singlephoto})
def search_category(request):
if 'photocategory' in request.GET and request.GET["photocategory"]:
search_term = request.GET.get("photocategory")
searched_categories = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'searchcategory.html',{"message":message,"categoryphotos": searched_categories})
else:
message = "You haven't searched for any category"
return render(request, 'searchcategory.html',{"message":message})
def search_location(request):
if 'photolocation' in request.GET and request.GET["photolocation"]:
search_term=request.GET.get("photolocation")
searched_locations=Image.filter_by_location(search_term)
message=f"{search_term}"
return render(request,'searchlocation.html',{"message":message,"locationphotos":searched_locations})
else:
message ="You haven't searched for any location"
return render(request,'searchlocation.html',{"message":message}) | 1,261 | 0 | 91 |
443055790eec7eadaf8911fdb0558e270c1e2398 | 813 | py | Python | src/dictionaries/frequency/wordfreq/wordfreq-en.py | henge-tech/henge | 33d958cf4e170fe27c92fd6dd426558d81ba46cb | [
"MIT"
] | 2 | 2016-08-13T03:14:37.000Z | 2016-08-21T14:09:13.000Z | src/dictionaries/frequency/wordfreq/wordfreq-en.py | koseki/wordcircle | 17472c450b89fc780765dcb8228b27eb60dd6ea5 | [
"MIT"
] | 9 | 2017-09-18T08:37:47.000Z | 2022-02-26T03:35:15.000Z | src/dictionaries/frequency/wordfreq/wordfreq-en.py | koseki/wordcircle | 17472c450b89fc780765dcb8228b27eb60dd6ea5 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from wordfreq import word_frequency, iter_wordlist
import regex
iter = iter_wordlist('en', 'large')
re_nonlatin = regex.compile('[^-_\p{Latin}\d\.\']')
re_alphabet = regex.compile('[a-z]', regex.IGNORECASE)
re_underscore = regex.compile('_')
last_freq = -1
position = 0
current_line = 0
for word in iter:
current_line += 1
# skip non english words, emoji, etc.
if re_nonlatin.search(word):
continue
# skip '123.45', 'ŭ', etc.
if not re_alphabet.search(word):
continue
# skip 'x_x', 'r_e_t_w_e_e_t', etc.
if re_underscore.search(word):
continue
freq = word_frequency(word, 'en', 'large')
if freq != last_freq:
last_freq = freq
position = current_line
print("%d\t%s\t%f" % (position, word, freq * 1e6))
| 22.583333 | 54 | 0.635916 | #! /usr/bin/env python
from wordfreq import word_frequency, iter_wordlist
import regex
iter = iter_wordlist('en', 'large')
re_nonlatin = regex.compile('[^-_\p{Latin}\d\.\']')
re_alphabet = regex.compile('[a-z]', regex.IGNORECASE)
re_underscore = regex.compile('_')
last_freq = -1
position = 0
current_line = 0
for word in iter:
current_line += 1
# skip non english words, emoji, etc.
if re_nonlatin.search(word):
continue
# skip '123.45', 'ŭ', etc.
if not re_alphabet.search(word):
continue
# skip 'x_x', 'r_e_t_w_e_e_t', etc.
if re_underscore.search(word):
continue
freq = word_frequency(word, 'en', 'large')
if freq != last_freq:
last_freq = freq
position = current_line
print("%d\t%s\t%f" % (position, word, freq * 1e6))
| 0 | 0 | 0 |
f751044e28cb4bad581409c61f6d9e220bf09d27 | 3,184 | py | Python | setup.py | unicef/etools-permissions | 7a6da87c9829290af3cea458314e60dd6d1239fd | [
"Apache-2.0"
] | null | null | null | setup.py | unicef/etools-permissions | 7a6da87c9829290af3cea458314e60dd6d1239fd | [
"Apache-2.0"
] | null | null | null | setup.py | unicef/etools-permissions | 7a6da87c9829290af3cea458314e60dd6d1239fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import codecs
import os.path
import re
import subprocess
import sys
from codecs import open
from distutils import log
from distutils.errors import DistutilsError
from setuptools import find_packages, setup
from setuptools.command.install import install
from setuptools.command.sdist import sdist as BaseSDistCommand
ROOT = os.path.realpath(os.path.dirname(__file__))
init = os.path.join(ROOT, 'src', 'etools_permissions', '__init__.py')
_version_re = re.compile(r'__version__\s+=\s+(.*)')
_name_re = re.compile(r'NAME\s+=\s+(.*)')
sys.path.insert(0, os.path.join(ROOT, 'src'))
with open(init, 'rb') as f:
content = f.read().decode('utf-8')
VERSION = str(ast.literal_eval(_version_re.search(content).group(1)))
NAME = str(ast.literal_eval(_name_re.search(content).group(1)))
class VerifyTagVersion(install):
"""Verify that the git tag matches version"""
setup(name=NAME,
version=VERSION,
url='https://github.com/unicef/etools-permissions',
author='UNICEF',
author_email='dev@unicef.org',
license="Apache 2 License",
description='Django package that handles permissions',
long_description=codecs.open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages(where='src'),
include_package_data=True,
install_requires=read('install.pip'),
extras_require={
'test': read('install.pip', 'testing.pip'),
},
platforms=['any'],
classifiers=[
'Environment :: Web Environment',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Intended Audience :: Developers'],
scripts=[],
cmdclass={
'sdist': SDistCommand,
"verify": VerifyTagVersion,
}
)
| 31.524752 | 139 | 0.617148 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import codecs
import os.path
import re
import subprocess
import sys
from codecs import open
from distutils import log
from distutils.errors import DistutilsError
from setuptools import find_packages, setup
from setuptools.command.install import install
from setuptools.command.sdist import sdist as BaseSDistCommand
ROOT = os.path.realpath(os.path.dirname(__file__))
init = os.path.join(ROOT, 'src', 'etools_permissions', '__init__.py')
_version_re = re.compile(r'__version__\s+=\s+(.*)')
_name_re = re.compile(r'NAME\s+=\s+(.*)')
sys.path.insert(0, os.path.join(ROOT, 'src'))
with open(init, 'rb') as f:
content = f.read().decode('utf-8')
VERSION = str(ast.literal_eval(_version_re.search(content).group(1)))
NAME = str(ast.literal_eval(_name_re.search(content).group(1)))
def read(*files):
content = []
for f in files:
content.extend(codecs.open(os.path.join(ROOT, 'src', 'requirements', f), 'r').readlines())
return "\n".join(filter(lambda l:not l.startswith('-'), content))
def check(cmd, filename):
out = subprocess.run(cmd, stdout=subprocess.PIPE)
f = os.path.join('src', 'requirements', filename)
reqs = codecs.open(os.path.join(ROOT, f), 'r').readlines()
existing = {re.split("(==|>=|<=>|<|)", name[:-1])[0] for name in reqs}
declared = {re.split("(==|>=|<=>|<|)", name)[0] for name in out.stdout.decode('utf8').split("\n") if name and not name.startswith('-')}
if existing != declared:
msg = """Requirements file not updated.
Run 'make requiremets'
""".format(' '.join(cmd), f)
raise DistutilsError(msg)
class SDistCommand(BaseSDistCommand):
def run(self):
checks = {'install.pip': ['pipenv', 'lock', '--requirements'],
'testing.pip': ['pipenv', 'lock', '-d', '--requirements']}
for filename, cmd in checks.items():
check (cmd, filename)
super().run()
class VerifyTagVersion(install):
"""Verify that the git tag matches version"""
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {} does not match the version of this app: {}".format(
tag,
VERSION
)
sys.exit(info)
setup(name=NAME,
version=VERSION,
url='https://github.com/unicef/etools-permissions',
author='UNICEF',
author_email='dev@unicef.org',
license="Apache 2 License",
description='Django package that handles permissions',
long_description=codecs.open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages(where='src'),
include_package_data=True,
install_requires=read('install.pip'),
extras_require={
'test': read('install.pip', 'testing.pip'),
},
platforms=['any'],
classifiers=[
'Environment :: Web Environment',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Intended Audience :: Developers'],
scripts=[],
cmdclass={
'sdist': SDistCommand,
"verify": VerifyTagVersion,
}
)
| 1,227 | 16 | 123 |
a29321a5dfee1bcf168d56d8a42fcfb8f629e1cb | 1,123 | py | Python | Python3/Books/Douson/chapter12/rotate_sprite.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Books/Douson/chapter12/rotate_sprite.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Books/Douson/chapter12/rotate_sprite.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | # Rotate Sprite
# Demonstrates rotating a sprite
from livewires import games
games.init(screen_width = 640, screen_height = 480, fps = 50)
class Ship(games.Sprite):
""" A rotating ship. """
def update(self):
""" Rotate based on keys pressed. """
if games.keyboard.is_pressed(games.K_RIGHT):
self.angle += 1
if games.keyboard.is_pressed(games.K_LEFT):
self.angle -= 1
if games.keyboard.is_pressed(games.K_1):
self.angle = 0
if games.keyboard.is_pressed(games.K_2):
self.angle = 90
if games.keyboard.is_pressed(games.K_3):
self.angle = 180
if games.keyboard.is_pressed(games.K_4):
self.angle = 270
main()
| 28.794872 | 70 | 0.606411 | # Rotate Sprite
# Demonstrates rotating a sprite
from livewires import games
games.init(screen_width = 640, screen_height = 480, fps = 50)
class Ship(games.Sprite):
""" A rotating ship. """
def update(self):
""" Rotate based on keys pressed. """
if games.keyboard.is_pressed(games.K_RIGHT):
self.angle += 1
if games.keyboard.is_pressed(games.K_LEFT):
self.angle -= 1
if games.keyboard.is_pressed(games.K_1):
self.angle = 0
if games.keyboard.is_pressed(games.K_2):
self.angle = 90
if games.keyboard.is_pressed(games.K_3):
self.angle = 180
if games.keyboard.is_pressed(games.K_4):
self.angle = 270
def main():
nebula_image = games.load_image("nebula.jpg", transparent = False)
games.screen.background = nebula_image
ship_image = games.load_image("ship.bmp")
the_ship = Ship(image = ship_image,
x = games.screen.width/2,
y = games.screen.height/2)
games.screen.add(the_ship)
games.screen.mainloop()
main()
| 346 | 0 | 24 |
0e56b0b9402261570a2145b88b4a3fc791c04b2d | 106 | py | Python | Code/data/__init__.py | SimpleLonely/DataIntegration | 7222c5fde66608a8fa68ae398c5f8116fe3776f3 | [
"MIT"
] | null | null | null | Code/data/__init__.py | SimpleLonely/DataIntegration | 7222c5fde66608a8fa68ae398c5f8116fe3776f3 | [
"MIT"
] | 2 | 2021-03-31T19:52:58.000Z | 2021-12-13T20:43:37.000Z | Code/data/__init__.py | SimpleLonely/DataIntegration | 7222c5fde66608a8fa68ae398c5f8116fe3776f3 | [
"MIT"
] | 1 | 2020-07-04T12:26:27.000Z | 2020-07-04T12:26:27.000Z | __all__ = ['get_bond_yields','get_company','get_fund','get_manager','get_stock_holders','return_rate_dao'] | 106 | 106 | 0.792453 | __all__ = ['get_bond_yields','get_company','get_fund','get_manager','get_stock_holders','return_rate_dao'] | 0 | 0 | 0 |
c5d9bcd78d1ba9621a55491bdce0f549cc198d5e | 964 | py | Python | examples/07_compare_extthiem3d_grfsteady.py | JarnoHerr/AnaFlow | a7c56cdadf90d652f80bc1e1d38d3687d0365a63 | [
"MIT"
] | null | null | null | examples/07_compare_extthiem3d_grfsteady.py | JarnoHerr/AnaFlow | a7c56cdadf90d652f80bc1e1d38d3687d0365a63 | [
"MIT"
] | null | null | null | examples/07_compare_extthiem3d_grfsteady.py | JarnoHerr/AnaFlow | a7c56cdadf90d652f80bc1e1d38d3687d0365a63 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from anaflow import ext_thiem_3d, ext_grf_steady
from anaflow.tools.coarse_graining import K_CG
rad = np.geomspace(0.05, 4) # radius from the pumping well in [0, 4]
r_ref = 2.0 # reference radius
var = 0.5 # variance of the log-transmissivity
len_scale = 10.0 # correlation length of the log-transmissivity
KG = 1e-4 # the geometric mean of the transmissivity
anis = 0.7 # aniso ratio
rate = -1e-4 # pumping rate
head1 = ext_thiem_3d(rad, r_ref, KG, var, len_scale, anis, 1, rate)
head2 = ext_grf_steady(rad, r_ref, K_CG, rate=rate, cond_gmean=KG, var=var, len_scale=len_scale, anis=anis)
plt.plot(rad, head1, label="Ext Thiem 3D")
plt.plot(rad, head2, label="grf(K_CG)", linestyle="--")
plt.xlabel("r in [m]")
plt.ylabel("h in [m]")
plt.legend()
plt.tight_layout()
plt.show()
| 35.703704 | 107 | 0.645228 | # -*- coding: utf-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from anaflow import ext_thiem_3d, ext_grf_steady
from anaflow.tools.coarse_graining import K_CG
rad = np.geomspace(0.05, 4) # radius from the pumping well in [0, 4]
r_ref = 2.0 # reference radius
var = 0.5 # variance of the log-transmissivity
len_scale = 10.0 # correlation length of the log-transmissivity
KG = 1e-4 # the geometric mean of the transmissivity
anis = 0.7 # aniso ratio
rate = -1e-4 # pumping rate
head1 = ext_thiem_3d(rad, r_ref, KG, var, len_scale, anis, 1, rate)
head2 = ext_grf_steady(rad, r_ref, K_CG, rate=rate, cond_gmean=KG, var=var, len_scale=len_scale, anis=anis)
plt.plot(rad, head1, label="Ext Thiem 3D")
plt.plot(rad, head2, label="grf(K_CG)", linestyle="--")
plt.xlabel("r in [m]")
plt.ylabel("h in [m]")
plt.legend()
plt.tight_layout()
plt.show()
| 0 | 0 | 0 |
25307647b0c510b3e974b4291d18d6741f2f4c90 | 5,900 | py | Python | source/analysis/analysis_controller.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T14:08:20.000Z | 2021-04-09T14:08:20.000Z | source/analysis/analysis_controller.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 2 | 2021-04-28T15:05:01.000Z | 2021-11-10T15:12:56.000Z | source/analysis/analysis_controller.py | JoZimmer/ParOptBeam | 50d15d8d822a2718f2932807e06c4a7e02f866a3 | [
"BSD-3-Clause"
] | 2 | 2021-02-01T08:49:45.000Z | 2021-08-10T02:07:36.000Z | from os.path import join, isdir
from os import makedirs
from matplotlib.backends.backend_pdf import PdfPages
from source.model.structure_model import StraightBeam
from source.auxiliary.validate_and_assign_defaults import validate_and_assign_defaults
from source.auxiliary.other_utilities import get_adjusted_path_string
from source.auxiliary import global_definitions as GD
class AnalysisController(object):
"""
Dervied class for the dynamic analysis of a given structure model
"""
POSSIBLE_ANALYSES = ['eigenvalue_analysis',
'dynamic_analysis',
'static_analysis']
# using these as default or fallback settings
DEFAULT_SETTINGS = {
"global_output_folder": "some/path",
"model_properties": {},
"report_options": {},
"runs": [],
"skin_model_parameters": {}}
| 44.360902 | 119 | 0.589322 | from os.path import join, isdir
from os import makedirs
from matplotlib.backends.backend_pdf import PdfPages
from source.model.structure_model import StraightBeam
from source.auxiliary.validate_and_assign_defaults import validate_and_assign_defaults
from source.auxiliary.other_utilities import get_adjusted_path_string
from source.auxiliary import global_definitions as GD
class AnalysisController(object):
"""
Dervied class for the dynamic analysis of a given structure model
"""
POSSIBLE_ANALYSES = ['eigenvalue_analysis',
'dynamic_analysis',
'static_analysis']
# using these as default or fallback settings
DEFAULT_SETTINGS = {
"global_output_folder": "some/path",
"model_properties": {},
"report_options": {},
"runs": [],
"skin_model_parameters": {}}
def __init__(self, model, parameters):
if not (isinstance(model, StraightBeam)):
err_msg = "The proivded model is of type \"" + \
str(type(model)) + "\"\n"
err_msg += "Has to be of type \"<class \'StraigthBeam\'>\""
raise Exception(err_msg)
self.model = model
# validating and assign model parameters
validate_and_assign_defaults(
AnalysisController.DEFAULT_SETTINGS, parameters)
self.parameters = parameters
if get_adjusted_path_string(self.parameters['global_output_folder']) == get_adjusted_path_string("some/path"):
self.global_output_folder = join("output", self.model.name)
else:
self.global_output_folder = join(
"output", get_adjusted_path_string(self.parameters['global_output_folder']))
# make sure that the absolute path to the desired output folder exists
if not isdir(self.global_output_folder):
makedirs(self.global_output_folder)
print(self.global_output_folder +
' set as absolute folder path in AnalysisController')
if self.parameters['report_options']['combine_plots_into_pdf']:
file_name = 'analyses_results_report.pdf'
self.report_pdf = PdfPages(
join(self.global_output_folder, file_name))
else:
self.report_pdf = None
self.display_plots = self.parameters['report_options']['display_plots_on_screen']
self.skin_model_params = None
if self.parameters['report_options']['use_skin_model']:
self.skin_model_params = {"geometry": self.parameters["skin_model_parameters"]["geometry"],
"length": self.model.parameters["lx"],
"record_animation": self.parameters["skin_model_parameters"]["record_animation"],
"visualize_line_structure": self.parameters["skin_model_parameters"][
"visualize_line_structure"],
"beam_direction": self.parameters["skin_model_parameters"]["beam_direction"],
"scaling_vector": self.parameters["skin_model_parameters"]["scaling_vector"],
"num_of_dofs_per_node": GD.DOFS_PER_NODE[self.model.domain_size],
"eigenmode_scaling_factor": self.parameters["skin_model_parameters"][
"eigenmode_scaling_factor"],
"dynamic_scaling_factor": self.parameters["skin_model_parameters"][
"dynamic_scaling_factor"],
"dofs_input": {}}
self.analyses = []
for analysis_param in parameters['runs']:
if analysis_param['type'] == 'eigenvalue_analysis':
from source.analysis.eigenvalue_analysis import EigenvalueAnalysis
self.analyses.append(EigenvalueAnalysis(
self.model, analysis_param))
pass
elif analysis_param['type'] == 'dynamic_analysis':
# if analysis_param['settings']['run_in_modal_coordinates']:
# from source.analysis.dynamic_analysis import DynamicAnalysis
# self.analyses.append(DynamicAnalysis(
# self.model, analysis_param))
# else:
from source.analysis.dynamic_analysis import DynamicAnalysis
self.analyses.append(DynamicAnalysis(
self.model, analysis_param))
elif analysis_param['type'] == 'static_analysis':
from source.analysis.static_analysis import StaticAnalysis
self.analyses.append(StaticAnalysis(
self.model, analysis_param))
else:
err_msg = "The analysis type \"" + \
analysis_param['type']
err_msg += "\" is not available \n"
err_msg += "Choose one of: \""
err_msg += '\", \"'.join(
AnalysisController.POSSIBLE_ANALYSES) + '\"'
raise Exception(err_msg)
def solve(self):
for analysis in self.analyses:
analysis.solve()
def postprocess(self):
if self.parameters['model_properties']['write']:
self.model.write_properties(self.global_output_folder)
if self.parameters['model_properties']['plot']:
self.model.plot_properties(self.report_pdf, self.display_plots)
for analysis in self.analyses:
analysis.postprocess(self.global_output_folder, self.report_pdf,
self.display_plots, self.skin_model_params)
try:
self.report_pdf.close()
except:
pass
| 4,935 | 0 | 81 |
51531174e63cb89f2b96f409e0af0903be0a85f6 | 2,339 | py | Python | trodesnetwork-0.0.9/trodesnetwork-0.0.9/trodesnetwork/trodes/digital.py | JohnLauFoo/clc_packages_Yu | 259f01d9b5c02154ce258734d519ae8995cd0991 | [
"MIT"
] | 1 | 2021-11-13T17:21:44.000Z | 2021-11-13T17:21:44.000Z | trodesnetwork-0.0.9/trodesnetwork-0.0.9/trodesnetwork/trodes/digital.py | JohnLauFoo/clc_packages_Yu | 259f01d9b5c02154ce258734d519ae8995cd0991 | [
"MIT"
] | null | null | null | trodesnetwork-0.0.9/trodesnetwork-0.0.9/trodesnetwork/trodes/digital.py | JohnLauFoo/clc_packages_Yu | 259f01d9b5c02154ce258734d519ae8995cd0991 | [
"MIT"
] | null | null | null | import trodesnetwork.socket as socket
import trodesnetwork.trodes as trodes
import threading
'''
Use this class to subscribe to analog sources
Requires input of a channel map object. The channel map is just
a JSON-like dictionary of the XML HardwareConfiguration node
in the `.trodesconfig` file.
Requires a server_address to connect to the server.
It can be used like this:
subscriber = trodes.DigitalClient(
server_address=self.network_address,
channel_map=config.channel_map,
channel_name='ECU_Din8')
'''
'''
Subscriber wraps subscription in a thread and callback
Callback can be used to call a Qt signal
''' | 33.414286 | 98 | 0.690894 | import trodesnetwork.socket as socket
import trodesnetwork.trodes as trodes
import threading
'''
Use this class to subscribe to analog sources
Requires input of a channel map object. The channel map is just
a JSON-like dictionary of the XML HardwareConfiguration node
in the `.trodesconfig` file.
Requires a server_address to connect to the server.
It can be used like this:
subscriber = trodes.DigitalClient(
server_address=self.network_address,
channel_map=config.channel_map,
channel_name='ECU_Din8')
'''
class DigitalClient():
def __init__(self, server_address, channel_map, channel_name):
self.channel_map = trodes.HardwareChannelMap(channel_map)
device_idx, channel_idx = self.channel_map.find_channel(channel_name)
index = self.channel_map.calculate_digital_index(device_idx, channel_idx)
self.raw_client = RawDigitalClient(server_address=server_address, index=index)
def receive(self):
return self.raw_client.receive()
class RawDigitalClient():
def __init__(self, server_address, index):
self.index = index
self.byte_id = index // 8
self.bit_id = index % 8
self.subscriber = socket.SourceSubscriber('source.digital', server_address=server_address)
def receive(self):
rec = self.subscriber.receive()
timestamp = rec['localTimestamp']
data = rec['digitalData'][0]
bit = (data[self.byte_id] >> self.bit_id) & 1
return timestamp, bit
'''
Subscriber wraps subscription in a thread and callback
Callback can be used to call a Qt signal
'''
class DigitalSubscriber():
def __init__(self, server_address, channel_map, channel_name, callback):
self.thread = DigitalSubscriber.DigitalSubscriberThread(
server_address, channel_map, channel_name, callback)
self.thread.start()
class DigitalSubscriberThread(threading.Thread):
def __init__(self, server_address, channel_map, channel_name, callback):
super().__init__(daemon=True)
self.subscriber = DigitalClient(server_address, channel_map, channel_name)
self.callback = callback
def run(self):
while True:
res = self.subscriber.receive()
self.callback(res) | 1,359 | 159 | 173 |
29f49e0b71b6c2f444d13c431f5ac81f00bcac2b | 2,087 | py | Python | tests/resources/responsemock.py | merretbuurman/esgf-pid | df511387904ad215cd84ef29ef0c902ce6cec826 | [
"Apache-2.0"
] | null | null | null | tests/resources/responsemock.py | merretbuurman/esgf-pid | df511387904ad215cd84ef29ef0c902ce6cec826 | [
"Apache-2.0"
] | 7 | 2017-02-22T15:24:54.000Z | 2021-05-06T22:43:15.000Z | tests/resources/responsemock.py | merretbuurman/esgf-pid | df511387904ad215cd84ef29ef0c902ce6cec826 | [
"Apache-2.0"
] | 5 | 2016-08-23T08:52:00.000Z | 2020-03-25T21:28:31.000Z |
class MockRequest(object):
'''
This is a mocked Request object containing only an url,
as this is the only attribute accessed during the tests.
There is a default value for it, but it can also be passed.
'''
class MockSolrResponse(object):
'''
This is a mocked Response object (can be used to replace
a response from any call to "requests.get" or
"request.put" or "request.delete", ...).
It contains a request, a status code and some JSON content.
For all of these, there is default values, but they can also
be passed.
Some standard cases are available, e.g. or "handle not found",
which has a specific combination of HTTP status code, handle
response code and content.
'''
| 35.982759 | 129 | 0.604217 |
class MockRequest(object):
'''
This is a mocked Request object containing only an url,
as this is the only attribute accessed during the tests.
There is a default value for it, but it can also be passed.
'''
def __init__(self, url=None):
if url is not None:
self.url = url
else:
self.url = 'http://foo.foo'
class MockSolrResponse(object):
'''
This is a mocked Response object (can be used to replace
a response from any call to "requests.get" or
"request.put" or "request.delete", ...).
It contains a request, a status code and some JSON content.
For all of these, there is default values, but they can also
be passed.
Some standard cases are available, e.g. or "handle not found",
which has a specific combination of HTTP status code, handle
response code and content.
'''
def __init__(self, status_code=None, content=None, request=None, success=False, notfound=False, empty=False):
self.content = None
self.status_code = None
self.request = None
# Some predefined cases:
if success:
self.status_code = 200
self.content = '{"responseHeader":{}, "response":{}, "facet_counts": {"facet_fields": {"bla": ["blub",1,"miau",4]}}}'
elif notfound:
self.status_code = 404
self.content = ''
# User-defined overrides predefined cases:
if content is not None:
self.content = content
if status_code is not None:
self.status_code = status_code
if request is not None:
self.request = request
# Defaults (they do not override):
if self.content is None:
self.content = '{"responseHeader":{}, "response":{}, "facet_counts": {}}'
if self.status_code is None:
self.status_code = 200
if self.request is None:
self.request = MockRequest()
# Special case: Content should be None:
if self.content is 'NONE':
self.content = None
| 1,292 | 0 | 52 |
38776c3e628fb4fa238fe4f7201f3af52af17c74 | 17,389 | py | Python | qiskit/transpiler/passes/mapping/legacy_swap.py | biplab37/qiskit-aakash | e10b204887606f1f75bdfde182bb0c6d0a322c68 | [
"Apache-2.0"
] | 22 | 2019-08-15T04:39:15.000Z | 2022-03-06T05:17:04.000Z | qiskit/transpiler/passes/mapping/legacy_swap.py | biplab37/qiskit-aakash | e10b204887606f1f75bdfde182bb0c6d0a322c68 | [
"Apache-2.0"
] | 2 | 2020-10-26T07:12:12.000Z | 2021-12-09T16:22:51.000Z | qiskit/transpiler/passes/mapping/legacy_swap.py | biplab37/qiskit-aakash | e10b204887606f1f75bdfde182bb0c6d0a322c68 | [
"Apache-2.0"
] | 9 | 2019-09-05T05:33:00.000Z | 2021-10-09T16:04:53.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
A pass implementing the legacy swapper.
Based on Sergey Bravyi's algorithm.
"""
import sys
import numpy as np
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.circuit import QuantumRegister
from qiskit.extensions.standard import SwapGate
class LegacySwap(TransformationPass):
"""
Maps a DAGCircuit onto a `coupling_map` adding swap gates.
"""
def __init__(self,
coupling_map,
initial_layout=None,
trials=20,
seed=None):
"""
Maps a DAGCircuit onto a `coupling_map` using swap gates.
Args:
coupling_map (CouplingMap): Directed graph represented a coupling map.
initial_layout (Layout): initial layout of qubits in mapping
trials (int): the number of attempts the randomized algorithm makes.
seed (int): initial seed.
"""
super().__init__()
self.coupling_map = coupling_map
self.initial_layout = initial_layout
self.trials = trials
self.seed = seed
def run(self, dag):
"""Map a DAGCircuit onto a CouplingGraph using swap gates.
Args:
dag (DAGCircuit): input DAG circuit
Returns:
DAGCircuit: object containing a circuit equivalent to
circuit_graph that respects couplings in coupling_map, and
a layout dict mapping qubits of circuit_graph into qubits
of coupling_map. The layout may differ from the initial_layout
if the first layer of gates cannot be executed on the
initial_layout.
Raises:
TranspilerError: if there was any error during the mapping or with the
parameters.
"""
if dag.width() > self.coupling_map.size():
raise TranspilerError("Not enough qubits in CouplingGraph")
# Schedule the input circuit
layerlist = list(dag.layers())
if self.initial_layout is None and self.property_set["layout"]:
self.initial_layout = self.property_set["layout"]
if self.initial_layout is not None:
# update initial_layout from a user given dict{(regname,idx): (regname,idx)}
# to an expected dict{(reg,idx): (reg,idx)}
virtual_qubits = self.initial_layout.get_virtual_bits()
self.initial_layout = {(v.register.name, v.index): ('q', self.initial_layout[v]) for v
in virtual_qubits}
device_register = QuantumRegister(self.coupling_map.size(), 'q')
initial_layout = {dag.qregs[k[0]][k[1]]: device_register[v[1]]
for k, v in self.initial_layout.items()}
# Check the input layout
circ_qubits = dag.qubits()
coup_qubits = [(QuantumRegister(self.coupling_map.size(), 'q'), wire) for wire in
self.coupling_map.physical_qubits]
qubit_subset = []
for k, v in initial_layout.items():
qubit_subset.append(v)
if k not in circ_qubits:
raise TranspilerError("initial_layout qubit %s[%d] not in input "
"DAGCircuit" % (k[0].name, k[1]))
if v not in coup_qubits:
raise TranspilerError("initial_layout qubit %s[%d] not in input "
"CouplingGraph" % (v[0].name, v[1]))
else:
# Supply a default layout
qubit_subset = [QuantumRegister(self.coupling_map.size(), 'q')[wire] for wire in
self.coupling_map.physical_qubits]
qubit_subset = qubit_subset[0:dag.width()]
initial_layout = {a: b for a, b in zip(dag.qubits(), qubit_subset)}
# Find swap circuit to preceed to each layer of input circuit
layout = initial_layout.copy()
# Construct an empty DAGCircuit with one qreg "q"
# and the same set of cregs as the input circuit
dagcircuit_output = DAGCircuit()
dagcircuit_output.name = dag.name
dagcircuit_output.add_qreg(QuantumRegister(self.coupling_map.size(), "q"))
for creg in dag.cregs.values():
dagcircuit_output.add_creg(creg)
# Make a trivial wire mapping between the subcircuits
# returned by swap_mapper_layer_update and the circuit
# we are building
identity_wire_map = {}
q = QuantumRegister(self.coupling_map.size(), 'q')
for j in range(self.coupling_map.size()):
identity_wire_map[q[j]] = q[j]
for creg in dag.cregs.values():
for j in range(creg.size):
identity_wire_map[creg[j]] = creg[j]
first_layer = True # True until first layer is output
# Iterate over layers
for i, layer in enumerate(layerlist):
# Attempt to find a permutation for this layer
success_flag, best_circ, best_d, best_layout, trivial_flag \
= self.layer_permutation(layer["partition"], layout, qubit_subset)
# If this fails, try one gate at a time in this layer
if not success_flag:
serial_layerlist = list(layer["graph"].serial_layers())
# Go through each gate in the layer
for j, serial_layer in enumerate(serial_layerlist):
success_flag, best_circ, best_d, best_layout, trivial_flag \
= self.layer_permutation(serial_layer["partition"], layout, qubit_subset)
# Give up if we fail again
if not success_flag:
raise TranspilerError("swap_mapper failed: " +
"layer %d, sublayer %d" % (i, j))
# If this layer is only single-qubit gates,
# and we have yet to see multi-qubit gates,
# continue to the next inner iteration
if trivial_flag and first_layer:
continue
# Update the record of qubit positions for each inner iteration
layout = best_layout
# Update the QASM
dagcircuit_output.compose_back(
self.swap_mapper_layer_update(j,
first_layer,
best_layout,
best_d,
best_circ,
serial_layerlist),
identity_wire_map)
# Update initial layout
if first_layer:
initial_layout = layout
first_layer = False
else:
# Update the record of qubit positions for each iteration
layout = best_layout
# Update the QASM
dagcircuit_output.compose_back(
self.swap_mapper_layer_update(i,
first_layer,
best_layout,
best_d,
best_circ,
layerlist),
identity_wire_map)
# Update initial layout
if first_layer:
initial_layout = layout
first_layer = False
# If first_layer is still set, the circuit only has single-qubit gates
# so we can use the initial layout to output the entire circuit
if first_layer:
layout = initial_layout
for i, layer in enumerate(layerlist):
dagcircuit_output.compose_back(layer["graph"], layout)
return dagcircuit_output
def layer_permutation(self, layer_partition, layout, qubit_subset):
"""Find a swap circuit that implements a permutation for this layer.
The goal is to swap qubits such that qubits in the same two-qubit gates
are adjacent.
Based on Sergey Bravyi's algorithm.
The layer_partition is a list of (qu)bit lists and each qubit is a
tuple (qreg, index).
The layout is a dict mapping qubits in the circuit to qubits in the
coupling graph and represents the current positions of the data.
The qubit_subset is the subset of qubits in the coupling graph that
we have chosen to map into.
The coupling is a CouplingGraph.
TRIALS is the number of attempts the randomized algorithm makes.
Returns: success_flag, best_circ, best_d, best_layout, trivial_flag
If success_flag is True, then best_circ contains a DAGCircuit with
the swap circuit, best_d contains the depth of the swap circuit, and
best_layout contains the new positions of the data qubits after the
swap circuit has been applied. The trivial_flag is set if the layer
has no multi-qubit gates.
"""
if self.seed is None:
self.seed = np.random.randint(0, np.iinfo(np.int32).max)
rng = np.random.RandomState(self.seed)
rev_layout = {b: a for a, b in layout.items()}
gates = []
for layer in layer_partition:
if len(layer) > 2:
raise TranspilerError("Layer contains >2 qubit gates")
if len(layer) == 2:
gates.append(tuple(layer))
# Can we already apply the gates?
dist = sum(
[self.coupling_map.distance(layout[g[0]].index, layout[g[1]].index) for g in gates])
if dist == len(gates):
circ = DAGCircuit()
circ.add_qreg(QuantumRegister(self.coupling_map.size(), "q"))
return True, circ, 0, layout, bool(gates)
# Begin loop over trials of randomized algorithm
n = self.coupling_map.size()
best_d = sys.maxsize # initialize best depth
best_circ = None # initialize best swap circuit
best_layout = None # initialize best final layout
QR = QuantumRegister(self.coupling_map.size(), "q")
for _ in range(self.trials):
trial_layout = layout.copy()
rev_trial_layout = rev_layout.copy()
# SWAP circuit constructed this trial
trial_circ = DAGCircuit()
trial_circ.add_qreg(QR)
# Compute Sergey's randomized distance
xi = {}
for i in self.coupling_map.physical_qubits:
xi[(QR, i)] = {}
for i in self.coupling_map.physical_qubits:
i = (QR, i)
for j in self.coupling_map.physical_qubits:
j = (QR, j)
scale = 1 + rng.normal(0, 1 / n)
xi[i][j] = scale * self.coupling_map.distance(i[1], j[1]) ** 2
xi[j][i] = xi[i][j]
# Loop over depths d up to a max depth of 2n+1
d = 1
# Circuit for this swap slice
circ = DAGCircuit()
circ.add_qreg(QR)
# Identity wire-map for composing the circuits
identity_wire_map = {QR[j]: QR[j] for j in range(n)}
while d < 2 * n + 1:
# Set of available qubits
qubit_set = set(qubit_subset)
# While there are still qubits available
while qubit_set:
# Compute the objective function
min_cost = sum([xi[trial_layout[g[0]]][trial_layout[g[1]]]
for g in gates])
# Try to decrease objective function
progress_made = False
# Loop over edges of coupling graph
for e in self.coupling_map.get_edges():
e = [QR[edge] for edge in e]
# Are the qubits available?
if e[0] in qubit_set and e[1] in qubit_set:
# Try this edge to reduce the cost
new_layout = trial_layout.copy()
new_layout[rev_trial_layout[e[0]]] = e[1]
new_layout[rev_trial_layout[e[1]]] = e[0]
rev_new_layout = rev_trial_layout.copy()
rev_new_layout[e[0]] = rev_trial_layout[e[1]]
rev_new_layout[e[1]] = rev_trial_layout[e[0]]
# Compute the objective function
new_cost = sum([xi[new_layout[g[0]]][new_layout[g[1]]]
for g in gates])
# Record progress if we succeed
if new_cost < min_cost:
progress_made = True
min_cost = new_cost
opt_layout = new_layout
rev_opt_layout = rev_new_layout
opt_edge = e
# Were there any good choices?
if progress_made:
qubit_set.remove(opt_edge[0])
qubit_set.remove(opt_edge[1])
trial_layout = opt_layout
rev_trial_layout = rev_opt_layout
circ.apply_operation_back(
SwapGate(),
[opt_edge[0], opt_edge[1]],
[])
else:
break
# We have either run out of qubits or failed to improve
# Compute the coupling graph distance_qubits
dist = sum([self.coupling_map.distance(trial_layout[g[0]].index,
trial_layout[g[1]].index) for g in gates])
# If all gates can be applied now, we are finished
# Otherwise we need to consider a deeper swap circuit
if dist == len(gates):
trial_circ.compose_back(circ, identity_wire_map)
break
# Increment the depth
d += 1
# Either we have succeeded at some depth d < dmax or failed
dist = sum([self.coupling_map.distance(trial_layout[g[0]].index,
trial_layout[g[1]].index) for g in gates])
if dist == len(gates):
if d < best_d:
best_circ = trial_circ
best_layout = trial_layout
best_d = min(best_d, d)
if best_circ is None:
return False, None, None, None, False
return True, best_circ, best_d, best_layout, False
def swap_mapper_layer_update(self, i, first_layer, best_layout, best_d,
best_circ, layer_list):
"""Update the QASM string for an iteration of swap_mapper.
i = layer number
first_layer = True if this is the first layer with multi-qubit gates
best_layout = layout returned from swap algorithm
best_d = depth returned from swap algorithm
best_circ = swap circuit returned from swap algorithm
layer_list = list of circuit objects for each layer
Return DAGCircuit object to append to the output DAGCircuit.
"""
layout = best_layout
dagcircuit_output = DAGCircuit()
QR = QuantumRegister(self.coupling_map.size(), 'q')
dagcircuit_output.add_qreg(QR)
# Identity wire-map for composing the circuits
identity_wire_map = {QR[j]: QR[j] for j in range(self.coupling_map.size())}
# If this is the first layer with multi-qubit gates,
# output all layers up to this point and ignore any
# swap gates. Set the initial layout.
if first_layer:
# Output all layers up to this point
for j in range(i + 1):
dagcircuit_output.compose_back(layer_list[j]["graph"], layout)
# Otherwise, we output the current layer and the associated swap gates.
else:
# Output any swaps
if best_d > 0:
dagcircuit_output.compose_back(best_circ, identity_wire_map)
# Output this layer
dagcircuit_output.compose_back(layer_list[i]["graph"], layout)
return dagcircuit_output
| 43.690955 | 98 | 0.546782 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
A pass implementing the legacy swapper.
Based on Sergey Bravyi's algorithm.
"""
import sys
import numpy as np
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.circuit import QuantumRegister
from qiskit.extensions.standard import SwapGate
class LegacySwap(TransformationPass):
"""
Maps a DAGCircuit onto a `coupling_map` adding swap gates.
"""
def __init__(self,
coupling_map,
initial_layout=None,
trials=20,
seed=None):
"""
Maps a DAGCircuit onto a `coupling_map` using swap gates.
Args:
coupling_map (CouplingMap): Directed graph represented a coupling map.
initial_layout (Layout): initial layout of qubits in mapping
trials (int): the number of attempts the randomized algorithm makes.
seed (int): initial seed.
"""
super().__init__()
self.coupling_map = coupling_map
self.initial_layout = initial_layout
self.trials = trials
self.seed = seed
def run(self, dag):
"""Map a DAGCircuit onto a CouplingGraph using swap gates.
Args:
dag (DAGCircuit): input DAG circuit
Returns:
DAGCircuit: object containing a circuit equivalent to
circuit_graph that respects couplings in coupling_map, and
a layout dict mapping qubits of circuit_graph into qubits
of coupling_map. The layout may differ from the initial_layout
if the first layer of gates cannot be executed on the
initial_layout.
Raises:
TranspilerError: if there was any error during the mapping or with the
parameters.
"""
if dag.width() > self.coupling_map.size():
raise TranspilerError("Not enough qubits in CouplingGraph")
# Schedule the input circuit
layerlist = list(dag.layers())
if self.initial_layout is None and self.property_set["layout"]:
self.initial_layout = self.property_set["layout"]
if self.initial_layout is not None:
# update initial_layout from a user given dict{(regname,idx): (regname,idx)}
# to an expected dict{(reg,idx): (reg,idx)}
virtual_qubits = self.initial_layout.get_virtual_bits()
self.initial_layout = {(v.register.name, v.index): ('q', self.initial_layout[v]) for v
in virtual_qubits}
device_register = QuantumRegister(self.coupling_map.size(), 'q')
initial_layout = {dag.qregs[k[0]][k[1]]: device_register[v[1]]
for k, v in self.initial_layout.items()}
# Check the input layout
circ_qubits = dag.qubits()
coup_qubits = [(QuantumRegister(self.coupling_map.size(), 'q'), wire) for wire in
self.coupling_map.physical_qubits]
qubit_subset = []
for k, v in initial_layout.items():
qubit_subset.append(v)
if k not in circ_qubits:
raise TranspilerError("initial_layout qubit %s[%d] not in input "
"DAGCircuit" % (k[0].name, k[1]))
if v not in coup_qubits:
raise TranspilerError("initial_layout qubit %s[%d] not in input "
"CouplingGraph" % (v[0].name, v[1]))
else:
# Supply a default layout
qubit_subset = [QuantumRegister(self.coupling_map.size(), 'q')[wire] for wire in
self.coupling_map.physical_qubits]
qubit_subset = qubit_subset[0:dag.width()]
initial_layout = {a: b for a, b in zip(dag.qubits(), qubit_subset)}
# Find swap circuit to preceed to each layer of input circuit
layout = initial_layout.copy()
# Construct an empty DAGCircuit with one qreg "q"
# and the same set of cregs as the input circuit
dagcircuit_output = DAGCircuit()
dagcircuit_output.name = dag.name
dagcircuit_output.add_qreg(QuantumRegister(self.coupling_map.size(), "q"))
for creg in dag.cregs.values():
dagcircuit_output.add_creg(creg)
# Make a trivial wire mapping between the subcircuits
# returned by swap_mapper_layer_update and the circuit
# we are building
identity_wire_map = {}
q = QuantumRegister(self.coupling_map.size(), 'q')
for j in range(self.coupling_map.size()):
identity_wire_map[q[j]] = q[j]
for creg in dag.cregs.values():
for j in range(creg.size):
identity_wire_map[creg[j]] = creg[j]
first_layer = True # True until first layer is output
# Iterate over layers
for i, layer in enumerate(layerlist):
# Attempt to find a permutation for this layer
success_flag, best_circ, best_d, best_layout, trivial_flag \
= self.layer_permutation(layer["partition"], layout, qubit_subset)
# If this fails, try one gate at a time in this layer
if not success_flag:
serial_layerlist = list(layer["graph"].serial_layers())
# Go through each gate in the layer
for j, serial_layer in enumerate(serial_layerlist):
success_flag, best_circ, best_d, best_layout, trivial_flag \
= self.layer_permutation(serial_layer["partition"], layout, qubit_subset)
# Give up if we fail again
if not success_flag:
raise TranspilerError("swap_mapper failed: " +
"layer %d, sublayer %d" % (i, j))
# If this layer is only single-qubit gates,
# and we have yet to see multi-qubit gates,
# continue to the next inner iteration
if trivial_flag and first_layer:
continue
# Update the record of qubit positions for each inner iteration
layout = best_layout
# Update the QASM
dagcircuit_output.compose_back(
self.swap_mapper_layer_update(j,
first_layer,
best_layout,
best_d,
best_circ,
serial_layerlist),
identity_wire_map)
# Update initial layout
if first_layer:
initial_layout = layout
first_layer = False
else:
# Update the record of qubit positions for each iteration
layout = best_layout
# Update the QASM
dagcircuit_output.compose_back(
self.swap_mapper_layer_update(i,
first_layer,
best_layout,
best_d,
best_circ,
layerlist),
identity_wire_map)
# Update initial layout
if first_layer:
initial_layout = layout
first_layer = False
# If first_layer is still set, the circuit only has single-qubit gates
# so we can use the initial layout to output the entire circuit
if first_layer:
layout = initial_layout
for i, layer in enumerate(layerlist):
dagcircuit_output.compose_back(layer["graph"], layout)
return dagcircuit_output
def layer_permutation(self, layer_partition, layout, qubit_subset):
"""Find a swap circuit that implements a permutation for this layer.
The goal is to swap qubits such that qubits in the same two-qubit gates
are adjacent.
Based on Sergey Bravyi's algorithm.
The layer_partition is a list of (qu)bit lists and each qubit is a
tuple (qreg, index).
The layout is a dict mapping qubits in the circuit to qubits in the
coupling graph and represents the current positions of the data.
The qubit_subset is the subset of qubits in the coupling graph that
we have chosen to map into.
The coupling is a CouplingGraph.
TRIALS is the number of attempts the randomized algorithm makes.
Returns: success_flag, best_circ, best_d, best_layout, trivial_flag
If success_flag is True, then best_circ contains a DAGCircuit with
the swap circuit, best_d contains the depth of the swap circuit, and
best_layout contains the new positions of the data qubits after the
swap circuit has been applied. The trivial_flag is set if the layer
has no multi-qubit gates.
"""
if self.seed is None:
self.seed = np.random.randint(0, np.iinfo(np.int32).max)
rng = np.random.RandomState(self.seed)
rev_layout = {b: a for a, b in layout.items()}
gates = []
for layer in layer_partition:
if len(layer) > 2:
raise TranspilerError("Layer contains >2 qubit gates")
if len(layer) == 2:
gates.append(tuple(layer))
# Can we already apply the gates?
dist = sum(
[self.coupling_map.distance(layout[g[0]].index, layout[g[1]].index) for g in gates])
if dist == len(gates):
circ = DAGCircuit()
circ.add_qreg(QuantumRegister(self.coupling_map.size(), "q"))
return True, circ, 0, layout, bool(gates)
# Begin loop over trials of randomized algorithm
n = self.coupling_map.size()
best_d = sys.maxsize # initialize best depth
best_circ = None # initialize best swap circuit
best_layout = None # initialize best final layout
QR = QuantumRegister(self.coupling_map.size(), "q")
for _ in range(self.trials):
trial_layout = layout.copy()
rev_trial_layout = rev_layout.copy()
# SWAP circuit constructed this trial
trial_circ = DAGCircuit()
trial_circ.add_qreg(QR)
# Compute Sergey's randomized distance
xi = {}
for i in self.coupling_map.physical_qubits:
xi[(QR, i)] = {}
for i in self.coupling_map.physical_qubits:
i = (QR, i)
for j in self.coupling_map.physical_qubits:
j = (QR, j)
scale = 1 + rng.normal(0, 1 / n)
xi[i][j] = scale * self.coupling_map.distance(i[1], j[1]) ** 2
xi[j][i] = xi[i][j]
# Loop over depths d up to a max depth of 2n+1
d = 1
# Circuit for this swap slice
circ = DAGCircuit()
circ.add_qreg(QR)
# Identity wire-map for composing the circuits
identity_wire_map = {QR[j]: QR[j] for j in range(n)}
while d < 2 * n + 1:
# Set of available qubits
qubit_set = set(qubit_subset)
# While there are still qubits available
while qubit_set:
# Compute the objective function
min_cost = sum([xi[trial_layout[g[0]]][trial_layout[g[1]]]
for g in gates])
# Try to decrease objective function
progress_made = False
# Loop over edges of coupling graph
for e in self.coupling_map.get_edges():
e = [QR[edge] for edge in e]
# Are the qubits available?
if e[0] in qubit_set and e[1] in qubit_set:
# Try this edge to reduce the cost
new_layout = trial_layout.copy()
new_layout[rev_trial_layout[e[0]]] = e[1]
new_layout[rev_trial_layout[e[1]]] = e[0]
rev_new_layout = rev_trial_layout.copy()
rev_new_layout[e[0]] = rev_trial_layout[e[1]]
rev_new_layout[e[1]] = rev_trial_layout[e[0]]
# Compute the objective function
new_cost = sum([xi[new_layout[g[0]]][new_layout[g[1]]]
for g in gates])
# Record progress if we succeed
if new_cost < min_cost:
progress_made = True
min_cost = new_cost
opt_layout = new_layout
rev_opt_layout = rev_new_layout
opt_edge = e
# Were there any good choices?
if progress_made:
qubit_set.remove(opt_edge[0])
qubit_set.remove(opt_edge[1])
trial_layout = opt_layout
rev_trial_layout = rev_opt_layout
circ.apply_operation_back(
SwapGate(),
[opt_edge[0], opt_edge[1]],
[])
else:
break
# We have either run out of qubits or failed to improve
# Compute the coupling graph distance_qubits
dist = sum([self.coupling_map.distance(trial_layout[g[0]].index,
trial_layout[g[1]].index) for g in gates])
# If all gates can be applied now, we are finished
# Otherwise we need to consider a deeper swap circuit
if dist == len(gates):
trial_circ.compose_back(circ, identity_wire_map)
break
# Increment the depth
d += 1
# Either we have succeeded at some depth d < dmax or failed
dist = sum([self.coupling_map.distance(trial_layout[g[0]].index,
trial_layout[g[1]].index) for g in gates])
if dist == len(gates):
if d < best_d:
best_circ = trial_circ
best_layout = trial_layout
best_d = min(best_d, d)
if best_circ is None:
return False, None, None, None, False
return True, best_circ, best_d, best_layout, False
def swap_mapper_layer_update(self, i, first_layer, best_layout, best_d,
best_circ, layer_list):
"""Update the QASM string for an iteration of swap_mapper.
i = layer number
first_layer = True if this is the first layer with multi-qubit gates
best_layout = layout returned from swap algorithm
best_d = depth returned from swap algorithm
best_circ = swap circuit returned from swap algorithm
layer_list = list of circuit objects for each layer
Return DAGCircuit object to append to the output DAGCircuit.
"""
layout = best_layout
dagcircuit_output = DAGCircuit()
QR = QuantumRegister(self.coupling_map.size(), 'q')
dagcircuit_output.add_qreg(QR)
# Identity wire-map for composing the circuits
identity_wire_map = {QR[j]: QR[j] for j in range(self.coupling_map.size())}
# If this is the first layer with multi-qubit gates,
# output all layers up to this point and ignore any
# swap gates. Set the initial layout.
if first_layer:
# Output all layers up to this point
for j in range(i + 1):
dagcircuit_output.compose_back(layer_list[j]["graph"], layout)
# Otherwise, we output the current layer and the associated swap gates.
else:
# Output any swaps
if best_d > 0:
dagcircuit_output.compose_back(best_circ, identity_wire_map)
# Output this layer
dagcircuit_output.compose_back(layer_list[i]["graph"], layout)
return dagcircuit_output
| 0 | 0 | 0 |
cc0e6a9f0ec7d82d3157a57807c5e477b2c5ecae | 616 | py | Python | src/unicon/plugins/ironware/settings.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 18 | 2019-11-23T23:14:53.000Z | 2022-01-10T01:17:08.000Z | src/unicon/plugins/ironware/settings.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 12 | 2020-11-09T20:39:25.000Z | 2022-03-22T12:46:59.000Z | src/unicon/plugins/ironware/settings.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 32 | 2020-02-12T15:42:22.000Z | 2022-03-15T16:42:10.000Z | """
Module:
unicon.plugins.ironware.settings
Author:
James Di Trapani <james@ditrapani.com.au> - https://github.com/jamesditrapani
Description:
Define/Override Generic Settings specific to the Ironware NOS
"""
from unicon.plugins.generic.settings import GenericSettings
__author__ = "James Di Trapani <james@ditrapani.com.au>"
| 24.64 | 81 | 0.717532 | """
Module:
unicon.plugins.ironware.settings
Author:
James Di Trapani <james@ditrapani.com.au> - https://github.com/jamesditrapani
Description:
Define/Override Generic Settings specific to the Ironware NOS
"""
from unicon.plugins.generic.settings import GenericSettings
__author__ = "James Di Trapani <james@ditrapani.com.au>"
class IronWareSettings(GenericSettings):
def __init__(self):
# inherit any parent settings
super().__init__()
self.CONNECTION_TIMEOUT = 60*5
self.HA_INIT_EXEC_COMMANDS = ['terminal length 0']
self.HA_INIT_CONFIG_COMMANDS = []
| 203 | 19 | 50 |
bcfabb13a4472146d128e09dc0769e2bdd86cab1 | 1,354 | py | Python | exemples/get-leaf-location.py | RenzoF/pycarwings2 | 3fd51b4211fa39bc40b0121f7648fbcb79cab2bc | [
"Apache-2.0"
] | 1 | 2020-10-28T13:59:34.000Z | 2020-10-28T13:59:34.000Z | exemples/get-leaf-location.py | gym22/pycarwings2 | d4dfe9cd198a1207ae18ec17c8345c7e3a545a39 | [
"Apache-2.0"
] | null | null | null | exemples/get-leaf-location.py | gym22/pycarwings2 | d4dfe9cd198a1207ae18ec17c8345c7e3a545a39 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#import sys
# sys.path.append('/home/ruben/leaf/pycarwings2/pycarwings2')
import pycarwings2
import time
from ConfigParser import SafeConfigParser
import logging
import sys
import pprint
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
parser = SafeConfigParser()
candidates = ['config.ini', 'my_config.ini']
found = parser.read(candidates)
username = parser.get('get-leaf-info', 'username')
password = parser.get('get-leaf-info', 'password')
logging.debug("login = %s , password = %s" % (username, password))
print "Prepare Session"
s = pycarwings2.Session(username, password, "NE")
print "Login..."
l = s.get_leaf()
print "request_location"
result_key = l.request_location()
while True:
location_status = l.get_status_from_location(result_key)
if location_status is None:
print "Waiting for response (sleep 10)"
time.sleep(10)
else:
lat = location_status.latitude
lon = location_status.longitude
print("lat: {} long: {}".format(lat, lon))
# OpenStreetMap url, ctrl click in terminal to open browser,
# for example, my parking lot ;)
# http://www.openstreetmap.org/search?query=52.37309+4.89217#map=19/52.37310/4.89220
z = 19 # zoom level, lower is bigger area
print("http://www.openstreetmap.org/search?query={}%20{}#map={}/{}/{}".format(lat,lon,z,lat,lon))
break | 28.208333 | 101 | 0.717134 | #!/usr/bin/python
#import sys
# sys.path.append('/home/ruben/leaf/pycarwings2/pycarwings2')
import pycarwings2
import time
from ConfigParser import SafeConfigParser
import logging
import sys
import pprint
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
parser = SafeConfigParser()
candidates = ['config.ini', 'my_config.ini']
found = parser.read(candidates)
username = parser.get('get-leaf-info', 'username')
password = parser.get('get-leaf-info', 'password')
logging.debug("login = %s , password = %s" % (username, password))
print "Prepare Session"
s = pycarwings2.Session(username, password, "NE")
print "Login..."
l = s.get_leaf()
print "request_location"
result_key = l.request_location()
while True:
location_status = l.get_status_from_location(result_key)
if location_status is None:
print "Waiting for response (sleep 10)"
time.sleep(10)
else:
lat = location_status.latitude
lon = location_status.longitude
print("lat: {} long: {}".format(lat, lon))
# OpenStreetMap url, ctrl click in terminal to open browser,
# for example, my parking lot ;)
# http://www.openstreetmap.org/search?query=52.37309+4.89217#map=19/52.37310/4.89220
z = 19 # zoom level, lower is bigger area
print("http://www.openstreetmap.org/search?query={}%20{}#map={}/{}/{}".format(lat,lon,z,lat,lon))
break | 0 | 0 | 0 |
6a203b222c70a6343746df7be86eaefb49d9a5b6 | 2,622 | py | Python | pypy/translator/oosupport/database.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/translator/oosupport/database.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | pypy/translator/oosupport/database.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | from pypy.translator.oosupport.constant import is_primitive
from pypy.rpython.ootypesystem import ootype
| 33.615385 | 75 | 0.685736 | from pypy.translator.oosupport.constant import is_primitive
from pypy.rpython.ootypesystem import ootype
class Database(object):
def __init__(self, genoo):
self.genoo = genoo
self.cts = genoo.TypeSystem(self)
self._pending_nodes = set()
self._rendered_nodes = set()
self._unique_counter = 0
self.constant_generator = genoo.ConstantGenerator(self)
self.locked = False # new pending nodes are not allowed here
# ____________________________________________________________
# Miscellaneous
def unique(self):
""" Every time it is called, returns a unique integer. Used in
various places. """
self._unique_counter+=1
return self._unique_counter-1
def class_name(self, OOINSTANCE):
""" Returns the backend class name of the type corresponding
to OOINSTANCE"""
raise NotImplementedError
# ____________________________________________________________
# Generation phases
def gen_constants(self, ilasm):
""" Renders the constants uncovered during the graph walk"""
self.locked = True # new pending nodes are not allowed here
self.constant_generator.gen_constants(ilasm)
self.locked = False
# ____________________________________________________________
# Generation phases
def record_delegate(self, OOTYPE):
""" Returns a backend-specific type for a delegate class...
details currently undefined. """
raise NotImplementedError
# ____________________________________________________________
# Node creation
#
# Creates nodes for various kinds of things.
def pending_class(self, INSTANCE):
""" Returns a Node representing the ootype.Instance provided """
raise NotImplementedError
def pending_function(self, graph):
""" Returns a Node representing the graph, which is being used as
a static function """
raise NotImplementedError
# ____________________________________________________________
# Basic Worklist Manipulation
def pending_node(self, node):
""" Adds a node to the worklist, so long as it is not already there
and has not already been rendered. """
assert not self.locked # sanity check
if node in self._pending_nodes or node in self._rendered_nodes:
return
self._pending_nodes.add(node)
node.dependencies()
def len_pending(self):
return len(self._pending_nodes)
def pop(self):
return self._pending_nodes.pop()
| 388 | 2,105 | 23 |
87528d07a78c77bbc785544cc66be4a15f2c6f6b | 521 | py | Python | files/main.py | Jelaque/Topics-on-database | fd9ab659203dbd205b9e255d920b4ebc1cacd2a5 | [
"MIT"
] | null | null | null | files/main.py | Jelaque/Topics-on-database | fd9ab659203dbd205b9e255d920b4ebc1cacd2a5 | [
"MIT"
] | null | null | null | files/main.py | Jelaque/Topics-on-database | fd9ab659203dbd205b9e255d920b4ebc1cacd2a5 | [
"MIT"
] | null | null | null | from ml100k import recommenderMl100k
import time as tm
from distances import recommender
s = recommender(0)
s.readMovies()
'''
s = recommender(0,k=3,metric='manhattan')
s.readBooks()
#print(s.jaccard(s.data['Stephen'],s.data['Amy']))
print(s.ProjectedRanting('Patrick C','Scarface'))
'''
'''
r = recommenderMl100k(0,metric='cosine')
r.loadMovieLens('../datasets/ml-100k/')
#print(r.cosine(r.data['278833"'],r.data['278858"']))
#print(r.jaccard(r.data['278804'],r.data['211']))
print(r.computeNearestNeighbor("100"))
''' | 26.05 | 53 | 0.706334 | from ml100k import recommenderMl100k
import time as tm
from distances import recommender
s = recommender(0)
s.readMovies()
'''
s = recommender(0,k=3,metric='manhattan')
s.readBooks()
#print(s.jaccard(s.data['Stephen'],s.data['Amy']))
print(s.ProjectedRanting('Patrick C','Scarface'))
'''
'''
r = recommenderMl100k(0,metric='cosine')
r.loadMovieLens('../datasets/ml-100k/')
#print(r.cosine(r.data['278833"'],r.data['278858"']))
#print(r.jaccard(r.data['278804'],r.data['211']))
print(r.computeNearestNeighbor("100"))
''' | 0 | 0 | 0 |
facc083e9f0d0807df4902b88bbc45dae0e14c0e | 13,750 | py | Python | notebooks/scripts/graphs.py | mtsnel006/covid19za | 5db79ecb616041ff7980230d5995d90d6dbc86f5 | [
"MIT"
] | 266 | 2020-03-13T13:39:38.000Z | 2022-03-18T06:51:57.000Z | notebooks/scripts/graphs.py | mtsnel006/covid19za | 5db79ecb616041ff7980230d5995d90d6dbc86f5 | [
"MIT"
] | 287 | 2020-03-13T12:22:50.000Z | 2022-02-22T16:06:24.000Z | notebooks/scripts/graphs.py | mtsnel006/covid19za | 5db79ecb616041ff7980230d5995d90d6dbc86f5 | [
"MIT"
] | 263 | 2020-03-13T11:44:05.000Z | 2022-03-27T15:11:52.000Z | import os
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from textwrap import wrap
### NOTE: `conda install basemap`
import conda
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
from mpl_toolkits.basemap import Basemap
from matplotlib import ticker
def vertical_bar_chart(df, x, y, label, sort, figsize=(13, 9), ascending=True):
"""
This customize vertical bar chart from seaborn(sns as aliased above)
Args:
df: dataframe
x: x-axis column
y: y-axis column
label: string to label the graph
figsize: figure size to make chart small or big
ascending: ascending order from smallest to biggest
sort: which column to sort by
Returns:
None
"""
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=figsize)
#sns.set_color_codes(sns.color_palette(["#0088c0"]))
# Text on the top of each barplot
ax = sns.barplot(x=x, y=y, data=df.sort_values(sort, ascending=ascending),
label=label, color="b", palette=["#0088c0"])
total = df[y].sum()
for p in ax.patches:
ax.annotate(str(format(p.get_height()/total * 100, '.2f')) + '%' + ' (' + str(int(p.get_height())) + ')',
(p.get_x() + p.get_width() / 2., p.get_height()),
ha = 'center', va = 'center',
xytext = (0, 10), textcoords = 'offset points')
y_value=['{:,.0f}'.format(x/total * 100) + '%' for x in ax.get_yticks()]
plt.yticks(list(plt.yticks()[0]) + [10])
ax.set_yticklabels(y_value)
plt.xlabel('')
plt.ylabel('')
sns.despine(left=True, bottom=True)
def horizontal_bar_chart(df, x, y, label, figsize=(16, 16)):
"""
This customize horizontal bar chart from seaborn(sns as aliased above)
Args:
df: dataframe
x: x-axis column
y: y-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=figsize)
ax = sns.barplot(x=x, y=y, data=df,
label=label, color="b", palette=["#0088c0"])
total = df.values[:, 1].sum()
for i, v in enumerate(df.values[:, 1]):
ax.text(v + 0.1, i + .25, str(format(v / total * 100, '.2f')) + '% (' + str(v) + ')')
labels = [ '\n'.join(wrap(l, 20)) for l in df.values[:, 0]]
ax.set_yticklabels(labels)
x_value=['{:,.0f}'.format(x/total * 100) + '%' for x in ax.get_xticks()]
plt.xticks(list(plt.xticks()[0]) + [10])
ax.set_xticklabels(x_value)
plt.ylabel('')
plt.xlabel('')
sns.despine(left=True, bottom=True)
def line_graph(df, column, figsize=(12, 8)):
"""
This customize line chart from matplotlib(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
fig, ax = plt.subplots(figsize=figsize)
line_data = df[column].value_counts().reset_index().sort_values(by='index')
line_data['Cumulative Frequency'] = line_data[column].cumsum()
line_data.plot(x='index', y=column, style='o-', ax=ax, label='Daily Infection')
line_data.plot(x='index', y='Cumulative Frequency', style='ro-', ax=ax)
plt.xticks(rotation=90)
plt.xlabel('')
def general_line_graph(df, x, y, figsize=(12, 8)):
"""
This customize line chart from matplotlib(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
fig, ax = plt.subplots(figsize=figsize)
df.plot(x=x, y=y, style='o-', ax=ax, label='Daily Tests')
plt.xticks(rotation=90)
plt.xlabel('')
def pie_chart(df, column):
"""
This customize pie chart from matplotlib(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
X = df[column].value_counts()
colors = ['#0088C0', '#82DAFF']
plt.pie(X.values, labels=X.index, colors=colors,
startangle=90,
explode = (0, 0),
textprops={'fontsize': 14},
autopct = '%1.2f%%')
plt.axis('equal')
plt.show()
def flat_globe(travel, colors):
"""
This customize map chart from Basemap(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
plt.figure(figsize = (30,30))
m = Basemap(projection='gall')
m.fillcontinents(color="#61993b",lake_color="#008ECC")
m.drawmapboundary(fill_color="#5D9BFF")
m.drawcountries(color='#585858',linewidth = 1)
m.drawstates(linewidth = 0.2)
m.drawcoastlines(linewidth=1)
countries = list(travel.Source.unique())
for item in countries:
for index, row in travel[travel.Source == item].drop_duplicates().iterrows():
x2, y2 = m.gcpoints( row["Source_Lat"], row["Source_Lon"], row["Dest_Lat"], row["Dest_Lon"], 20)
plt.plot(x2,y2,color=colors[countries.index(item)],linewidth=0.8)
plt.show()
def globe(travel, colors):
"""
This customize map chart from Basemap(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
plt.figure(figsize=(16,16))
m = Basemap(projection='ortho', lat_0=0, lon_0=0)
m.drawmapboundary(fill_color='#5D9BFF')
m.fillcontinents(color='#0D9C29',lake_color='#008ECC')
m.drawcountries(color='#585858',linewidth=1)
m.drawcoastlines()
countries = list(travel.Source.unique())
for item in countries:
for index, row in travel[travel.Source == item].drop_duplicates().iterrows():
x2, y2 = m.gcpoints( row["Source_Lat"], row["Source_Lon"], row["Dest_Lat"], row["Dest_Lon"], 20)
plt.plot(x2,y2,color=colors[countries.index(item)],linewidth=0.8)
plt.show()
def plot_covid19za_grouwth(df, provinces, min_cases=100, ls='-', figsize=(12, 8)):
"""
This shows covid19za growth since the first case was reported
from each province
"""
fig, ax = plt.subplots(figsize=figsize)
df = (df.set_index('date'))
df.index = pd.to_datetime(df.index, dayfirst=True)
for province in provinces:
df1 = df.loc[(df.province == province)].groupby(['date']).agg({'country': ['count']})
df1.columns = ['new cases']
df1['cummulative'] = df1['new cases'].cumsum()
(df1.reset_index()['cummulative']
.plot(label=province, ls=ls))
x = np.linspace(0, plt.xlim()[1])
plt.plot(x,x+(1.33), ls='--', color='k', label='33% daily growth')
plt.title('Data up to {}'.format(df.index.max().strftime('%B %d, %Y')))
plt.xlabel('Days from first confirmed case')
plt.ylabel('Confirmed cases')
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.set_xticks(range(0,int(plt.xlim()[1])+1))
plt.legend(bbox_to_anchor=(1.0, 1.0))
sns.despine()
plt.annotate('Based on Coronavirus COVID-19 (2019-nCoV) Data Repository for South Africa \
[Hosted by DSFSI group at University of Pretoria]',
(0.1, 0.01), xycoords='figure fraction', fontsize=10)
def flat_mutipath_globe(df_travel, path_route, colors, all_starting_countries):
"""
This is flat structure for multistop
"""
plt.figure(figsize = (30,30))
m = Basemap(projection='gall')
m.fillcontinents(color="#61993b",lake_color="#008ECC")
m.drawmapboundary(fill_color="#5D9BFF")
m.drawcountries(color='#585858',linewidth = 1)
m.drawstates(linewidth = 0.2)
m.drawcoastlines(linewidth=1)
for path_rout in path_route:
if path_rout[0][0] == 'USA;Mexico':
point_a = df_travel[df_travel.country_or_province_travelled == path_rout[0][0].split(';')[0]]
point_b = df_travel[df_travel.country_or_province_travelled == path_rout[0][0].split(';')[1]]
point_c = df_travel[df_travel.country_or_province_travelled == path_rout[0][1]]
point_d = df_travel[df_travel.country_or_province_travelled == path_rout[1]]
x2, y2 = m.gcpoints(point_a["latitude"],point_a["longitude"],point_b["latitude"],point_b["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].split(';')[0])],linewidth=3)
# m.scatter(point_a["latitude"],point_a["longitude"], marker='^',color="#EC7063", s=500,zorder=5)
# plt.text(point_a["latitude"],point_a["longitude"]+10000,path_rout[0][0].split(';')[0].replace('the ', ''),fontsize=20,fontweight='bold',ha='center',va='bottom',color="black")
x2, y2 = m.gcpoints(point_b["latitude"],point_b["longitude"],point_c["latitude"],point_c["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].split(';')[0])],linewidth=3)
x2, y2 = m.gcpoints(point_c["latitude"],point_c["longitude"],point_d["latitude"],point_d["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].split(';')[0])],linewidth=3)
elif len(path_rout[0]) == 2:
point_a = df_travel[df_travel.country_or_province_travelled == path_rout[0][0].replace('the ', '')]
point_b = df_travel[df_travel.country_or_province_travelled == path_rout[0][1].replace('the ', '')]
point_c = df_travel[df_travel.country_or_province_travelled == path_rout[1].replace('LP', 'LIM')]
x2, y2 = m.gcpoints(point_a["latitude"],point_a["longitude"],point_b["latitude"],point_b["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].replace('the ', ''))],linewidth=3)
# m.scatter(x2, y2, marker='^',color="#EC7063", s=500,zorder=5)
# plt.text(x2,y2,path_rout[0][0].replace('the ', ''),fontsize=20,fontweight='bold',ha='center',va='bottom',color="black")
x2, y2 = m.gcpoints(point_b["latitude"],point_b["longitude"],point_c["latitude"],point_c["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].replace('the ', ''))],linewidth=3)
elif len(path_rout[0]) == 3:
point_a = df_travel[df_travel.country_or_province_travelled == path_rout[0][0]]
point_b = df_travel[df_travel.country_or_province_travelled == path_rout[0][1]]
point_c = df_travel[df_travel.country_or_province_travelled == path_rout[0][2]]
point_d = df_travel[df_travel.country_or_province_travelled == path_rout[1]]
x2, y2 = m.gcpoints(point_a["latitude"],point_a["longitude"],point_b["latitude"],point_b["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=0.8)
# m.scatter(x2, y2, marker='^',color="#EC7063", s=500,zorder=5)
# plt.text(x2,y2,path_rout[0][0],fontsize=20,fontweight='bold',ha='center',va='bottom',color="black")
x2, y2 = m.gcpoints(point_b["latitude"],point_b["longitude"],point_c["latitude"],point_c["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=0.8)
x2, y2 = m.gcpoints(point_c["latitude"],point_c["longitude"],point_d["latitude"],point_d["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
elif len(path_rout[0]) == 4:
point_a = df_travel[df_travel.country_or_province_travelled == path_rout[0][0]]
point_b = df_travel[df_travel.country_or_province_travelled == path_rout[0][1]]
point_c = df_travel[df_travel.country_or_province_travelled == path_rout[0][2]]
point_d = df_travel[df_travel.country_or_province_travelled == path_rout[0][3]]
point_e = df_travel[df_travel.country_or_province_travelled == path_rout[1]]
x2, y2 = m.gcpoints(point_a["latitude"],point_a["longitude"],point_b["latitude"],point_b["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
# m.scatter(x2, y2, marker='^',color="#EC7063", s=500,zorder=5)
# plt.text(x2,y2,path_rout[0][0],fontsize=20,fontweight='bold',ha='center',va='bottom',color="black")
x2, y2 = m.gcpoints(point_b["latitude"],point_b["longitude"],point_c["latitude"],point_c["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
x2, y2 = m.gcpoints(point_c["latitude"],point_c["longitude"],point_d["latitude"],point_d["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
x2, y2 = m.gcpoints(point_d["latitude"],point_d["longitude"],point_e["latitude"],point_e["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
plt.show()
| 47.250859 | 188 | 0.627564 | import os
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from textwrap import wrap
### NOTE: `conda install basemap`
import conda
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
from mpl_toolkits.basemap import Basemap
from matplotlib import ticker
def vertical_bar_chart(df, x, y, label, sort, figsize=(13, 9), ascending=True):
"""
This customize vertical bar chart from seaborn(sns as aliased above)
Args:
df: dataframe
x: x-axis column
y: y-axis column
label: string to label the graph
figsize: figure size to make chart small or big
ascending: ascending order from smallest to biggest
sort: which column to sort by
Returns:
None
"""
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=figsize)
#sns.set_color_codes(sns.color_palette(["#0088c0"]))
# Text on the top of each barplot
ax = sns.barplot(x=x, y=y, data=df.sort_values(sort, ascending=ascending),
label=label, color="b", palette=["#0088c0"])
total = df[y].sum()
for p in ax.patches:
ax.annotate(str(format(p.get_height()/total * 100, '.2f')) + '%' + ' (' + str(int(p.get_height())) + ')',
(p.get_x() + p.get_width() / 2., p.get_height()),
ha = 'center', va = 'center',
xytext = (0, 10), textcoords = 'offset points')
y_value=['{:,.0f}'.format(x/total * 100) + '%' for x in ax.get_yticks()]
plt.yticks(list(plt.yticks()[0]) + [10])
ax.set_yticklabels(y_value)
plt.xlabel('')
plt.ylabel('')
sns.despine(left=True, bottom=True)
def horizontal_bar_chart(df, x, y, label, figsize=(16, 16)):
"""
This customize horizontal bar chart from seaborn(sns as aliased above)
Args:
df: dataframe
x: x-axis column
y: y-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=figsize)
ax = sns.barplot(x=x, y=y, data=df,
label=label, color="b", palette=["#0088c0"])
total = df.values[:, 1].sum()
for i, v in enumerate(df.values[:, 1]):
ax.text(v + 0.1, i + .25, str(format(v / total * 100, '.2f')) + '% (' + str(v) + ')')
labels = [ '\n'.join(wrap(l, 20)) for l in df.values[:, 0]]
ax.set_yticklabels(labels)
x_value=['{:,.0f}'.format(x/total * 100) + '%' for x in ax.get_xticks()]
plt.xticks(list(plt.xticks()[0]) + [10])
ax.set_xticklabels(x_value)
plt.ylabel('')
plt.xlabel('')
sns.despine(left=True, bottom=True)
def line_graph(df, column, figsize=(12, 8)):
"""
This customize line chart from matplotlib(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
fig, ax = plt.subplots(figsize=figsize)
line_data = df[column].value_counts().reset_index().sort_values(by='index')
line_data['Cumulative Frequency'] = line_data[column].cumsum()
line_data.plot(x='index', y=column, style='o-', ax=ax, label='Daily Infection')
line_data.plot(x='index', y='Cumulative Frequency', style='ro-', ax=ax)
plt.xticks(rotation=90)
plt.xlabel('')
def general_line_graph(df, x, y, figsize=(12, 8)):
"""
This customize line chart from matplotlib(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
fig, ax = plt.subplots(figsize=figsize)
df.plot(x=x, y=y, style='o-', ax=ax, label='Daily Tests')
plt.xticks(rotation=90)
plt.xlabel('')
def pie_chart(df, column):
"""
This customize pie chart from matplotlib(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
X = df[column].value_counts()
colors = ['#0088C0', '#82DAFF']
plt.pie(X.values, labels=X.index, colors=colors,
startangle=90,
explode = (0, 0),
textprops={'fontsize': 14},
autopct = '%1.2f%%')
plt.axis('equal')
plt.show()
def flat_globe(travel, colors):
"""
This customize map chart from Basemap(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
plt.figure(figsize = (30,30))
m = Basemap(projection='gall')
m.fillcontinents(color="#61993b",lake_color="#008ECC")
m.drawmapboundary(fill_color="#5D9BFF")
m.drawcountries(color='#585858',linewidth = 1)
m.drawstates(linewidth = 0.2)
m.drawcoastlines(linewidth=1)
countries = list(travel.Source.unique())
for item in countries:
for index, row in travel[travel.Source == item].drop_duplicates().iterrows():
x2, y2 = m.gcpoints( row["Source_Lat"], row["Source_Lon"], row["Dest_Lat"], row["Dest_Lon"], 20)
plt.plot(x2,y2,color=colors[countries.index(item)],linewidth=0.8)
plt.show()
def globe(travel, colors):
"""
This customize map chart from Basemap(plt as aliased above)
Args:
df: dataframe
column: x-axis column
label: string to label the graph
figsize: figure size to make chart small or big
Returns:
None
"""
plt.figure(figsize=(16,16))
m = Basemap(projection='ortho', lat_0=0, lon_0=0)
m.drawmapboundary(fill_color='#5D9BFF')
m.fillcontinents(color='#0D9C29',lake_color='#008ECC')
m.drawcountries(color='#585858',linewidth=1)
m.drawcoastlines()
countries = list(travel.Source.unique())
for item in countries:
for index, row in travel[travel.Source == item].drop_duplicates().iterrows():
x2, y2 = m.gcpoints( row["Source_Lat"], row["Source_Lon"], row["Dest_Lat"], row["Dest_Lon"], 20)
plt.plot(x2,y2,color=colors[countries.index(item)],linewidth=0.8)
plt.show()
def plot_covid19za_grouwth(df, provinces, min_cases=100, ls='-', figsize=(12, 8)):
"""
This shows covid19za growth since the first case was reported
from each province
"""
fig, ax = plt.subplots(figsize=figsize)
df = (df.set_index('date'))
df.index = pd.to_datetime(df.index, dayfirst=True)
for province in provinces:
df1 = df.loc[(df.province == province)].groupby(['date']).agg({'country': ['count']})
df1.columns = ['new cases']
df1['cummulative'] = df1['new cases'].cumsum()
(df1.reset_index()['cummulative']
.plot(label=province, ls=ls))
x = np.linspace(0, plt.xlim()[1])
plt.plot(x,x+(1.33), ls='--', color='k', label='33% daily growth')
plt.title('Data up to {}'.format(df.index.max().strftime('%B %d, %Y')))
plt.xlabel('Days from first confirmed case')
plt.ylabel('Confirmed cases')
ax.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
ax.set_xticks(range(0,int(plt.xlim()[1])+1))
plt.legend(bbox_to_anchor=(1.0, 1.0))
sns.despine()
plt.annotate('Based on Coronavirus COVID-19 (2019-nCoV) Data Repository for South Africa \
[Hosted by DSFSI group at University of Pretoria]',
(0.1, 0.01), xycoords='figure fraction', fontsize=10)
def flat_mutipath_globe(df_travel, path_route, colors, all_starting_countries):
"""
This is flat structure for multistop
"""
plt.figure(figsize = (30,30))
m = Basemap(projection='gall')
m.fillcontinents(color="#61993b",lake_color="#008ECC")
m.drawmapboundary(fill_color="#5D9BFF")
m.drawcountries(color='#585858',linewidth = 1)
m.drawstates(linewidth = 0.2)
m.drawcoastlines(linewidth=1)
for path_rout in path_route:
if path_rout[0][0] == 'USA;Mexico':
point_a = df_travel[df_travel.country_or_province_travelled == path_rout[0][0].split(';')[0]]
point_b = df_travel[df_travel.country_or_province_travelled == path_rout[0][0].split(';')[1]]
point_c = df_travel[df_travel.country_or_province_travelled == path_rout[0][1]]
point_d = df_travel[df_travel.country_or_province_travelled == path_rout[1]]
x2, y2 = m.gcpoints(point_a["latitude"],point_a["longitude"],point_b["latitude"],point_b["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].split(';')[0])],linewidth=3)
# m.scatter(point_a["latitude"],point_a["longitude"], marker='^',color="#EC7063", s=500,zorder=5)
# plt.text(point_a["latitude"],point_a["longitude"]+10000,path_rout[0][0].split(';')[0].replace('the ', ''),fontsize=20,fontweight='bold',ha='center',va='bottom',color="black")
x2, y2 = m.gcpoints(point_b["latitude"],point_b["longitude"],point_c["latitude"],point_c["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].split(';')[0])],linewidth=3)
x2, y2 = m.gcpoints(point_c["latitude"],point_c["longitude"],point_d["latitude"],point_d["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].split(';')[0])],linewidth=3)
elif len(path_rout[0]) == 2:
point_a = df_travel[df_travel.country_or_province_travelled == path_rout[0][0].replace('the ', '')]
point_b = df_travel[df_travel.country_or_province_travelled == path_rout[0][1].replace('the ', '')]
point_c = df_travel[df_travel.country_or_province_travelled == path_rout[1].replace('LP', 'LIM')]
x2, y2 = m.gcpoints(point_a["latitude"],point_a["longitude"],point_b["latitude"],point_b["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].replace('the ', ''))],linewidth=3)
# m.scatter(x2, y2, marker='^',color="#EC7063", s=500,zorder=5)
# plt.text(x2,y2,path_rout[0][0].replace('the ', ''),fontsize=20,fontweight='bold',ha='center',va='bottom',color="black")
x2, y2 = m.gcpoints(point_b["latitude"],point_b["longitude"],point_c["latitude"],point_c["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0].replace('the ', ''))],linewidth=3)
elif len(path_rout[0]) == 3:
point_a = df_travel[df_travel.country_or_province_travelled == path_rout[0][0]]
point_b = df_travel[df_travel.country_or_province_travelled == path_rout[0][1]]
point_c = df_travel[df_travel.country_or_province_travelled == path_rout[0][2]]
point_d = df_travel[df_travel.country_or_province_travelled == path_rout[1]]
x2, y2 = m.gcpoints(point_a["latitude"],point_a["longitude"],point_b["latitude"],point_b["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=0.8)
# m.scatter(x2, y2, marker='^',color="#EC7063", s=500,zorder=5)
# plt.text(x2,y2,path_rout[0][0],fontsize=20,fontweight='bold',ha='center',va='bottom',color="black")
x2, y2 = m.gcpoints(point_b["latitude"],point_b["longitude"],point_c["latitude"],point_c["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=0.8)
x2, y2 = m.gcpoints(point_c["latitude"],point_c["longitude"],point_d["latitude"],point_d["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
elif len(path_rout[0]) == 4:
point_a = df_travel[df_travel.country_or_province_travelled == path_rout[0][0]]
point_b = df_travel[df_travel.country_or_province_travelled == path_rout[0][1]]
point_c = df_travel[df_travel.country_or_province_travelled == path_rout[0][2]]
point_d = df_travel[df_travel.country_or_province_travelled == path_rout[0][3]]
point_e = df_travel[df_travel.country_or_province_travelled == path_rout[1]]
x2, y2 = m.gcpoints(point_a["latitude"],point_a["longitude"],point_b["latitude"],point_b["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
# m.scatter(x2, y2, marker='^',color="#EC7063", s=500,zorder=5)
# plt.text(x2,y2,path_rout[0][0],fontsize=20,fontweight='bold',ha='center',va='bottom',color="black")
x2, y2 = m.gcpoints(point_b["latitude"],point_b["longitude"],point_c["latitude"],point_c["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
x2, y2 = m.gcpoints(point_c["latitude"],point_c["longitude"],point_d["latitude"],point_d["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
x2, y2 = m.gcpoints(point_d["latitude"],point_d["longitude"],point_e["latitude"],point_e["longitude"], 20)
plt.plot(x2,y2,color = colors[all_starting_countries.index(path_rout[0][0])],linewidth=3)
plt.show()
| 0 | 0 | 0 |
05cf4149c5c572c18ab5076c38f9371a9b8b39cc | 855 | py | Python | lesson-04/classwork6.py | weibak/lessons | 414f030650427d7167c2e58ecb9f858c2e5edb40 | [
"BSD-3-Clause"
] | null | null | null | lesson-04/classwork6.py | weibak/lessons | 414f030650427d7167c2e58ecb9f858c2e5edb40 | [
"BSD-3-Clause"
] | null | null | null | lesson-04/classwork6.py | weibak/lessons | 414f030650427d7167c2e58ecb9f858c2e5edb40 | [
"BSD-3-Clause"
] | null | null | null | """
Вывести в порядке возрастания все простые числа, расположенные между n и m (включая сами числа n и m),
а также количество x этих чисел.
"""
start_number = int(input("Enter start number:"))
end_number = int(input("Enter end number:"))
# Generate elements from start_number to end_number (including)
my_count = 0
for element in range(start_number, end_number + 1):
# Check if this element is the prime number
is_prime = True
for divider in range(2, element):
# If we've found any divider the remainder of which is zero
# So current element is not the prime number
if divider > 1 and element % divider == 0:
is_prime = False
break
# Current element is the prime number
if is_prime:
print(element)
my_count += 1
print("Total count of prime numbers")
print(my_count)
| 29.482759 | 102 | 0.676023 | """
Вывести в порядке возрастания все простые числа, расположенные между n и m (включая сами числа n и m),
а также количество x этих чисел.
"""
start_number = int(input("Enter start number:"))
end_number = int(input("Enter end number:"))
# Generate elements from start_number to end_number (including)
my_count = 0
for element in range(start_number, end_number + 1):
# Check if this element is the prime number
is_prime = True
for divider in range(2, element):
# If we've found any divider the remainder of which is zero
# So current element is not the prime number
if divider > 1 and element % divider == 0:
is_prime = False
break
# Current element is the prime number
if is_prime:
print(element)
my_count += 1
print("Total count of prime numbers")
print(my_count)
| 0 | 0 | 0 |
537aa9bfd787ebe3ac95f20a3cad16a9e5582a19 | 1,131 | py | Python | test2.py | Atul-Anand-Jha/Email_Automation_Python | 2bd558ef4d58d2ad9e1807b227872db655dfa0bd | [
"MIT"
] | 2 | 2020-11-07T13:50:33.000Z | 2020-11-09T04:34:52.000Z | test2.py | Atul-Anand-Jha/Email-Automation-Python | 2bd558ef4d58d2ad9e1807b227872db655dfa0bd | [
"MIT"
] | null | null | null | test2.py | Atul-Anand-Jha/Email-Automation-Python | 2bd558ef4d58d2ad9e1807b227872db655dfa0bd | [
"MIT"
] | null | null | null | import pandas as pd
import smtplib
import imghdr
from email.message import EmailMessage
SenderAddress = "XYZ@gmail.com"
password = "ndXX@XX3$#XXX"
e = pd.read_excel("email.xlsx")
emails = e['Emails'].values
names = e["Names"].values
file = "banner.jpg"
msg = EmailMessage()
msg['Subject'] = "Hello world - dynamic"
msg['From'] = SenderAddress
print(f"The receiver's mail ids are : \n\n{emails}")
with smtplib.SMTP("smtp.gmail.com", 587, timeout=15) as server:
server.starttls()
server.login(SenderAddress, password)
# msg = f"Hello {this is an email form python"
# subject = "Hello world"
# body = "Subject: {}\n\n{}".format(subject,msg)
with open(file, 'rb') as f:
file_data = f.read()
file_type = imghdr.what(f.name)
file_name = f.name
for email,name in zip(emails,names):
msg['To'] = email
body = f"Hello {name};\n\n\nThis is an email from python"
# msg = "Subject: {}\n\n{}".format(subject,body)
msg.set_content(body)
msg.add_attachment(file_data, maintype='image', subtype=file_type, filename=file_name)
server.send_message(msg)
# server.sendmail(SenderAddress, email, msg)
server.quit()
| 28.275 | 88 | 0.697613 | import pandas as pd
import smtplib
import imghdr
from email.message import EmailMessage
SenderAddress = "XYZ@gmail.com"
password = "ndXX@XX3$#XXX"
e = pd.read_excel("email.xlsx")
emails = e['Emails'].values
names = e["Names"].values
file = "banner.jpg"
msg = EmailMessage()
msg['Subject'] = "Hello world - dynamic"
msg['From'] = SenderAddress
print(f"The receiver's mail ids are : \n\n{emails}")
with smtplib.SMTP("smtp.gmail.com", 587, timeout=15) as server:
server.starttls()
server.login(SenderAddress, password)
# msg = f"Hello {this is an email form python"
# subject = "Hello world"
# body = "Subject: {}\n\n{}".format(subject,msg)
with open(file, 'rb') as f:
file_data = f.read()
file_type = imghdr.what(f.name)
file_name = f.name
for email,name in zip(emails,names):
msg['To'] = email
body = f"Hello {name};\n\n\nThis is an email from python"
# msg = "Subject: {}\n\n{}".format(subject,body)
msg.set_content(body)
msg.add_attachment(file_data, maintype='image', subtype=file_type, filename=file_name)
server.send_message(msg)
# server.sendmail(SenderAddress, email, msg)
server.quit()
| 0 | 0 | 0 |
aa61305c266822997c121859d13f0a5ce52bfaef | 737 | py | Python | ocrd_models/ocrd_page_user_methods/set_points.py | hnesk/core | 5a79220bc31572410e705d13ca178cf284cdc9fb | [
"Apache-2.0"
] | 91 | 2018-05-23T12:52:11.000Z | 2022-03-19T20:43:49.000Z | ocrd_models/ocrd_page_user_methods/set_points.py | hnesk/core | 5a79220bc31572410e705d13ca178cf284cdc9fb | [
"Apache-2.0"
] | 636 | 2018-04-23T15:57:31.000Z | 2022-03-31T11:46:11.000Z | ocrd_models/ocrd_page_user_methods/set_points.py | hnesk/core | 5a79220bc31572410e705d13ca178cf284cdc9fb | [
"Apache-2.0"
] | 25 | 2018-05-22T11:53:09.000Z | 2021-07-20T13:07:43.000Z | def set_points(self, points):
"""
Set coordinate polygon by given string.
Moreover, invalidate the parent's ``pc:AlternativeImage``s
(because they will have been cropped with a bbox
of the previous polygon).
"""
if hasattr(self, 'parent_object_'):
parent = self.parent_object_
if hasattr(parent, 'invalidate_AlternativeImage'):
# RegionType, TextLineType, WordType, GlyphType:
parent.invalidate_AlternativeImage()
elif hasattr(parent, 'parent_object_') and hasattr(parent.parent_object_, 'invalidate_AlternativeImage'):
# BorderType:
parent.parent_object_.invalidate_AlternativeImage(feature_selector='cropped')
self.points = points
| 43.352941 | 113 | 0.693351 | def set_points(self, points):
"""
Set coordinate polygon by given string.
Moreover, invalidate the parent's ``pc:AlternativeImage``s
(because they will have been cropped with a bbox
of the previous polygon).
"""
if hasattr(self, 'parent_object_'):
parent = self.parent_object_
if hasattr(parent, 'invalidate_AlternativeImage'):
# RegionType, TextLineType, WordType, GlyphType:
parent.invalidate_AlternativeImage()
elif hasattr(parent, 'parent_object_') and hasattr(parent.parent_object_, 'invalidate_AlternativeImage'):
# BorderType:
parent.parent_object_.invalidate_AlternativeImage(feature_selector='cropped')
self.points = points
| 0 | 0 | 0 |
01053b6bf07baa6c1e179859a8d3d680039a21b1 | 3,455 | py | Python | utils.py | JiekaiJia/pettingzoo_comunication | 1e85d5edb87ac867385649616e030284c0b6910f | [
"MIT"
] | 1 | 2021-11-14T13:16:16.000Z | 2021-11-14T13:16:16.000Z | utils.py | JiekaiJia/pettingzoo_comunication | 1e85d5edb87ac867385649616e030284c0b6910f | [
"MIT"
] | null | null | null | utils.py | JiekaiJia/pettingzoo_comunication | 1e85d5edb87ac867385649616e030284c0b6910f | [
"MIT"
] | null | null | null | """This module provides functions that make sure environment to be compatible with RLlib. If Rllib is not used, please
directly use the wrapper in comm_channel.py."""
import numpy as np
from pettingzoo.utils.conversions import to_parallel_wrapper
from pettingzoo.utils.wrappers import AssertOutOfBoundsWrapper, OrderEnforcingWrapper
from ray.rllib.env import PettingZooEnv
from ray.rllib.env.wrappers.pettingzoo_env import ParallelPettingZooEnv
from supersuit import pad_action_space_v0, pad_observations_v0
from comm_channel import ParallelCommWrapper, CommWrapper
def main_comm_env(base_env, comm_dict):
"""Wrap the communication channel into Pettingzoo main environment, and padding the environment."""
return comm_env
def main_env(base_env):
"""Padding the environment."""
return env
def parallel_comm_env(base_env, comm_dict):
"""Wrap the communication channel into Pettingzoo parallel environment, and padding the environment."""
return comm_env
def parallel_env(base_env):
"""Padding the parallel environment."""
return env
| 33.221154 | 118 | 0.661939 | """This module provides functions that make sure environment to be compatible with RLlib. If Rllib is not used, please
directly use the wrapper in comm_channel.py."""
import numpy as np
from pettingzoo.utils.conversions import to_parallel_wrapper
from pettingzoo.utils.wrappers import AssertOutOfBoundsWrapper, OrderEnforcingWrapper
from ray.rllib.env import PettingZooEnv
from ray.rllib.env.wrappers.pettingzoo_env import ParallelPettingZooEnv
from supersuit import pad_action_space_v0, pad_observations_v0
from comm_channel import ParallelCommWrapper, CommWrapper
def main_comm_env(base_env, comm_dict):
"""Wrap the communication channel into Pettingzoo main environment, and padding the environment."""
def comm_env(**kwargs):
raw_env = base_env.raw_env(**kwargs)
# Set all agents to silent
for agent in raw_env.world.agents:
agent.silent = True
env = AssertOutOfBoundsWrapper(raw_env)
env = OrderEnforcingWrapper(env)
env = CommWrapper(env, comm_dict)
env = pad_observations_v0(env)
env = pad_action_space_v0(env)
env = _PettingZooEnv(env)
return env
return comm_env
def main_env(base_env):
"""Padding the environment."""
def env(**kwargs):
env = base_env.env(**kwargs)
env = pad_observations_v0(env)
env = pad_action_space_v0(env)
env = _PettingZooEnv(env)
return env
return env
def parallel_comm_env(base_env, comm_dict):
"""Wrap the communication channel into Pettingzoo parallel environment, and padding the environment."""
def comm_env(**kwargs):
raw_env = base_env.raw_env(**kwargs)
# Set all agents to silent
for agent in raw_env.world.agents:
agent.silent = True
env = AssertOutOfBoundsWrapper(raw_env)
env = OrderEnforcingWrapper(env)
env = to_parallel_wrapper(env)
env = ParallelCommWrapper(env, comm_dict)
env = pad_observations_v0(env)
env = pad_action_space_v0(env)
env = _ParallelPettingZooEnv(env)
return env
return comm_env
def parallel_env(base_env):
"""Padding the parallel environment."""
def env(**kwargs):
env = base_env.parallel_env(**kwargs)
env = pad_observations_v0(env)
env = pad_action_space_v0(env)
env = _ParallelPettingZooEnv(env)
return env
return env
class _PettingZooEnv(PettingZooEnv):
def __init__(self, env):
super().__init__(env)
def step(self, action_dict):
# Ensure the input actions are discrete number.
for k, v in action_dict.items():
if isinstance(v, (np.int64, np.int32, np.int16, np.int8, int)):
pass
elif not v:
pass
else:
action_dict[k] = np.argmax(v)
return super().step(action_dict)
class _ParallelPettingZooEnv(ParallelPettingZooEnv):
def __init__(self, env):
super().__init__(env)
def step(self, action_dict):
# Ensure the input actions are discrete number.
for k, v in action_dict.items():
if isinstance(v, (np.int64, np.int32, np.int16, np.int8, int)):
pass
else:
action_dict[k] = np.argmax(v)
return super().step(action_dict)
def init_comm_dict(env):
return {'comm_bits': 0, 'receivers': {agent: [] for agent in env.possible_agents}}
| 2,053 | 46 | 279 |
5efadc3c4ba7eb75a402097a914dbfc40578375a | 1,417 | py | Python | src/lektorium/repo/interface.py | sphericalpm/lektorium | 9b3b72c03495f269494a6a83bf102d79b1f5eeb3 | [
"MIT"
] | 18 | 2019-07-16T06:10:05.000Z | 2021-11-27T12:57:47.000Z | src/lektorium/repo/interface.py | sphericalpm/lektorium | 9b3b72c03495f269494a6a83bf102d79b1f5eeb3 | [
"MIT"
] | 34 | 2019-07-15T17:21:38.000Z | 2021-02-09T14:27:39.000Z | src/lektorium/repo/interface.py | sphericalpm/lektorium | 9b3b72c03495f269494a6a83bf102d79b1f5eeb3 | [
"MIT"
] | 6 | 2019-07-16T09:16:46.000Z | 2019-10-16T08:48:50.000Z | import abc
import random
import string
from typing import Generator, Iterable, Mapping, Optional, Tuple
| 20.242857 | 74 | 0.654199 | import abc
import random
import string
from typing import Generator, Iterable, Mapping, Optional, Tuple
class ExceptionBase(Exception):
pass
class DuplicateEditSession(ExceptionBase):
pass
class InvalidSessionState(ExceptionBase):
pass
class SessionNotFound(ExceptionBase):
pass
class Repo(metaclass=abc.ABCMeta):
DEFAULT_USER = ('User Interface Py', 'user@interface.py')
def generate_session_id(self) -> str:
session_id = None
while not session_id or session_id in self.sessions:
session_id = ''.join(random.sample(string.ascii_lowercase, 8))
return session_id
@property
@abc.abstractmethod
def sites(self) -> Iterable:
pass
@property
@abc.abstractmethod
def sessions(self) -> Mapping:
pass
@property
@abc.abstractmethod
def parked_sessions(self) -> Generator:
pass
@abc.abstractmethod
def create_session(
self,
site_id: str,
custodian: Optional[Tuple[str, str]] = None,
) -> str:
pass
@abc.abstractmethod
def destroy_session(self, session_id: str) -> None:
pass
@abc.abstractmethod
def park_session(self, session_id: str) -> None:
pass
@abc.abstractmethod
def unpark_session(self, session_id: str) -> None:
pass
@abc.abstractmethod
async def init_sessions(self):
pass
| 538 | 655 | 115 |
0820dda78f060e137ddbdd471c8564150d4f60cc | 1,844 | py | Python | devtest/roles/__init__.py | pycopia/devtest | 9ec93045ba4bab5b20ce99dc61cebd5b5a234d01 | [
"Apache-2.0"
] | null | null | null | devtest/roles/__init__.py | pycopia/devtest | 9ec93045ba4bab5b20ce99dc61cebd5b5a234d01 | [
"Apache-2.0"
] | null | null | null | devtest/roles/__init__.py | pycopia/devtest | 9ec93045ba4bab5b20ce99dc61cebd5b5a234d01 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementations of abstract role interfaces. Test cases can get objects from
here via the testbed attribute. The `get_role` method queries the implementation
field of an equipment, that should point to something in here. But it could be
in another package.
"""
import abc
from .. import importlib
from .. import config
class BaseRole(metaclass=abc.ABCMeta):
"""Base, abstract, role for equipment role controllers."""
class SoftwareRole(metaclass=abc.ABCMeta):
"""Base, abstract, role for software objects.
Usually, this is an emulator of some kind."""
def get_role(classpath):
"""Get a role implementation by its path name."""
return importlib.get_class(classpath, __name__)
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab:fileencoding=utf-8
| 27.939394 | 80 | 0.70282 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementations of abstract role interfaces. Test cases can get objects from
here via the testbed attribute. The `get_role` method queries the implementation
field of an equipment, that should point to something in here. But it could be
in another package.
"""
import abc
from .. import importlib
from .. import config
class BaseRole(metaclass=abc.ABCMeta):
"""Base, abstract, role for equipment role controllers."""
def __init__(self, equipment):
cf = config.get_config()
self.config = cf.roles.get(equipment["role"], config.ConfigDict())
self._equipment = equipment
self.initialize()
def initialize(self):
pass
def finalize(self):
pass
def close(self):
pass
class SoftwareRole(metaclass=abc.ABCMeta):
"""Base, abstract, role for software objects.
Usually, this is an emulator of some kind."""
def __init__(self, software):
self._software = software
self.initialize()
def initialize(self):
pass
def finalize(self):
pass
def close(self):
pass
def get_role(classpath):
"""Get a role implementation by its path name."""
return importlib.get_class(classpath, __name__)
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab:fileencoding=utf-8
| 311 | 0 | 214 |
4112cc494680d71604be0aece6b9d93ed3587371 | 555 | py | Python | tests/unitary/LiquidityGaugeV3/test_checkpoint.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 217 | 2020-06-24T14:01:21.000Z | 2022-03-29T08:35:24.000Z | tests/unitary/LiquidityGaugeV3/test_checkpoint.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 25 | 2020-06-24T09:39:02.000Z | 2022-03-22T17:03:00.000Z | tests/unitary/LiquidityGaugeV3/test_checkpoint.py | AqualisDAO/curve-dao-contracts | beec73a068da8ed01c0f710939dc5adb776d565b | [
"MIT"
] | 110 | 2020-07-10T22:45:49.000Z | 2022-03-29T02:51:08.000Z | import brownie
YEAR = 86400 * 365
| 29.210526 | 68 | 0.727928 | import brownie
YEAR = 86400 * 365
def test_user_checkpoint(accounts, gauge_v3):
gauge_v3.user_checkpoint(accounts[1], {"from": accounts[1]})
def test_user_checkpoint_new_period(accounts, chain, gauge_v3):
gauge_v3.user_checkpoint(accounts[1], {"from": accounts[1]})
chain.sleep(int(YEAR * 1.1))
gauge_v3.user_checkpoint(accounts[1], {"from": accounts[1]})
def test_user_checkpoint_wrong_account(accounts, gauge_v3):
with brownie.reverts("dev: unauthorized"):
gauge_v3.user_checkpoint(accounts[2], {"from": accounts[1]})
| 448 | 0 | 69 |
19ecc76fe435519b4c3484ec1cf271408e0a23af | 85 | py | Python | src/Paterns/run.py | seyedalirahimi/tehran-stocks | e22950f0534ad4962c9a2f00560675a1d8c8d94d | [
"MIT"
] | null | null | null | src/Paterns/run.py | seyedalirahimi/tehran-stocks | e22950f0534ad4962c9a2f00560675a1d8c8d94d | [
"MIT"
] | null | null | null | src/Paterns/run.py | seyedalirahimi/tehran-stocks | e22950f0534ad4962c9a2f00560675a1d8c8d94d | [
"MIT"
] | null | null | null | from ta.momentum import rsi
if __name__ == "__main__":
_rsi14 = rsi(Closes, 14)
| 17 | 28 | 0.682353 | from ta.momentum import rsi
if __name__ == "__main__":
_rsi14 = rsi(Closes, 14)
| 0 | 0 | 0 |
6aecb23c654ce6f5077ef0135d4d267781e730af | 50 | py | Python | python/testData/refactoring/move/importAs/after/src/b.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/refactoring/move/importAs/after/src/b.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/refactoring/move/importAs/after/src/b.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | import lib1 as iks
| 10 | 19 | 0.62 | import lib1 as iks
def f(x):
return iks.I(x) | 8 | 0 | 23 |
6a17f2469eaf20aab41dc48c7e885593c2f915e8 | 2,322 | py | Python | setup.py | pberkes/enaml | cbcbee929e3117dfe56c0b06dc2385acc832b0e8 | [
"BSD-3-Clause-Clear"
] | null | null | null | setup.py | pberkes/enaml | cbcbee929e3117dfe56c0b06dc2385acc832b0e8 | [
"BSD-3-Clause-Clear"
] | null | null | null | setup.py | pberkes/enaml | cbcbee929e3117dfe56c0b06dc2385acc832b0e8 | [
"BSD-3-Clause-Clear"
] | null | null | null | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import sys
from setuptools import setup, find_packages, Extension
ext_modules = [
Extension(
'enaml.weakmethod',
['enaml/src/weakmethod.cpp'],
language='c++',
),
Extension(
'enaml.callableref',
['enaml/src/callableref.cpp'],
language='c++',
),
Extension(
'enaml.signaling',
['enaml/src/signaling.cpp'],
language='c++',
),
Extension(
'enaml.core.funchelper',
['enaml/src/funchelper.cpp'],
language='c++',
),
Extension(
'enaml.colorext',
['enaml/src/colorext.cpp'],
language='c++',
),
Extension(
'enaml.fontext',
['enaml/src/fontext.cpp'],
language='c++',
),
Extension(
'enaml.core.dynamicscope',
['enaml/src/dynamicscope.cpp'],
language='c++',
),
Extension(
'enaml.core.alias',
['enaml/src/alias.cpp'],
language='c++',
)
]
if sys.platform == 'win32':
ext_modules.append(
Extension(
'enaml.winutil',
['enaml/src/winutil.cpp'],
libraries=['user32', 'gdi32'],
language='c++'
)
)
setup(
name='enaml',
version='0.8.8',
author='The Nucleic Development Team',
author_email='sccolbert@gmail.com',
url='https://github.com/nucleic/enaml',
description='Declarative DSL for building rich user interfaces in Python',
long_description=open('README.md').read(),
requires=['atom', 'PyQt', 'ply', 'casuarius'],
install_requires=['distribute'],
packages=find_packages(),
package_data={
'enaml.applib': ['*.enaml'],
'enaml.stdlib': ['*.enaml'],
'enaml.qt.docking': [
'dock_images/*.png',
'dock_images/*.py',
'enaml_dock_resources.qrc'
],
},
entry_points={'console_scripts': ['enaml-run = enaml.runner:main']},
ext_modules=ext_modules,
)
| 25.8 | 79 | 0.518949 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import sys
from setuptools import setup, find_packages, Extension
ext_modules = [
Extension(
'enaml.weakmethod',
['enaml/src/weakmethod.cpp'],
language='c++',
),
Extension(
'enaml.callableref',
['enaml/src/callableref.cpp'],
language='c++',
),
Extension(
'enaml.signaling',
['enaml/src/signaling.cpp'],
language='c++',
),
Extension(
'enaml.core.funchelper',
['enaml/src/funchelper.cpp'],
language='c++',
),
Extension(
'enaml.colorext',
['enaml/src/colorext.cpp'],
language='c++',
),
Extension(
'enaml.fontext',
['enaml/src/fontext.cpp'],
language='c++',
),
Extension(
'enaml.core.dynamicscope',
['enaml/src/dynamicscope.cpp'],
language='c++',
),
Extension(
'enaml.core.alias',
['enaml/src/alias.cpp'],
language='c++',
)
]
if sys.platform == 'win32':
ext_modules.append(
Extension(
'enaml.winutil',
['enaml/src/winutil.cpp'],
libraries=['user32', 'gdi32'],
language='c++'
)
)
setup(
name='enaml',
version='0.8.8',
author='The Nucleic Development Team',
author_email='sccolbert@gmail.com',
url='https://github.com/nucleic/enaml',
description='Declarative DSL for building rich user interfaces in Python',
long_description=open('README.md').read(),
requires=['atom', 'PyQt', 'ply', 'casuarius'],
install_requires=['distribute'],
packages=find_packages(),
package_data={
'enaml.applib': ['*.enaml'],
'enaml.stdlib': ['*.enaml'],
'enaml.qt.docking': [
'dock_images/*.png',
'dock_images/*.py',
'enaml_dock_resources.qrc'
],
},
entry_points={'console_scripts': ['enaml-run = enaml.runner:main']},
ext_modules=ext_modules,
)
| 0 | 0 | 0 |
9f11efd5d577c1b8e6eeefaf3cd91e56b5e55c1d | 566 | py | Python | python_program/myProject/ProjectTest/TestCase/test_information.py | luei1987kg/July_Learn | 3ac6eab5d4442f9e4c2a254e0933382a52921b99 | [
"MIT"
] | null | null | null | python_program/myProject/ProjectTest/TestCase/test_information.py | luei1987kg/July_Learn | 3ac6eab5d4442f9e4c2a254e0933382a52921b99 | [
"MIT"
] | null | null | null | python_program/myProject/ProjectTest/TestCase/test_information.py | luei1987kg/July_Learn | 3ac6eab5d4442f9e4c2a254e0933382a52921b99 | [
"MIT"
] | null | null | null | __author__='administrator'
# -*- coding:utf-8 -*-
import unittest
import time
# if __name__=="__main__":
# unittest.main()
# tester=Test()
# tester.setUp()
# tester.test01()
# tester.test02()
# tester.test03()
# tester.tearDown() | 20.962963 | 31 | 0.558304 | __author__='administrator'
# -*- coding:utf-8 -*-
import unittest
import time
class Test(unittest.TestCase):
def setUp(self):
print "start!"
def tearDown(self):
time.sleep(1)
print"end!"
def test01(self):
print"执行测试用例01"
def test03(self):
print"执行测试用例03"
def test02(self):
print "执行测试用例02"
# if __name__=="__main__":
# unittest.main()
# tester=Test()
# tester.setUp()
# tester.test01()
# tester.test02()
# tester.test03()
# tester.tearDown() | 161 | 9 | 162 |
6f50dd888966d3ffd28e4e4c221644a077b33442 | 15,416 | py | Python | lenstronomy/ImSim/Numerics/convolution.py | lucateo/lenstronomy | 3ab6cfd4adea2222f02d3f0f1a9cb5390c533aab | [
"MIT"
] | 107 | 2017-08-25T20:03:51.000Z | 2022-03-30T19:52:21.000Z | lenstronomy/ImSim/Numerics/convolution.py | pierrefleury/lenstronomy | 5973f9b45761bab434bb273a1882ca3b45f5264b | [
"MIT"
] | 235 | 2017-06-07T13:30:53.000Z | 2022-03-28T12:44:04.000Z | lenstronomy/ImSim/Numerics/convolution.py | pierrefleury/lenstronomy | 5973f9b45761bab434bb273a1882ca3b45f5264b | [
"MIT"
] | 68 | 2018-02-01T15:47:20.000Z | 2022-03-27T12:44:32.000Z | from scipy import fftpack, ndimage, signal
import numpy as np
import threading
#from scipy._lib._version import NumpyVersion
_rfft_mt_safe = True # (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
import lenstronomy.Util.kernel_util as kernel_util
import lenstronomy.Util.util as util
import lenstronomy.Util.image_util as image_util
from lenstronomy.Util.package_util import exporter
export, __all__ = exporter()
@export
class PixelKernelConvolution(object):
"""
class to compute convolutions for a given pixelized kernel (fft, grid)
"""
def __init__(self, kernel, convolution_type='fft_static'):
"""
:param kernel: 2d array, convolution kernel
:param convolution_type: string, 'fft', 'grid', 'fft_static' mode of 2d convolution
"""
self._kernel = kernel
if convolution_type not in ['fft', 'grid', 'fft_static']:
raise ValueError('convolution_type %s not supported!' % convolution_type)
self._type = convolution_type
self._pre_computed = False
def pixel_kernel(self, num_pix=None):
"""
access pixelated kernel
:param num_pix: size of returned kernel (odd number per axis). If None, return the original kernel.
:return: pixel kernel centered
"""
if num_pix is not None:
return kernel_util.cut_psf(self._kernel, num_pix)
return self._kernel
def copy_transpose(self):
"""
:return: copy of the class with kernel set to the transpose of original one
"""
return PixelKernelConvolution(self._kernel.T, convolution_type=self._type)
def convolution2d(self, image):
"""
:param image: 2d array (image) to be convolved
:return: fft convolution
"""
if self._type == 'fft':
image_conv = signal.fftconvolve(image, self._kernel, mode='same')
elif self._type == 'fft_static':
image_conv = self._static_fft(image, mode='same')
elif self._type == 'grid':
image_conv = signal.convolve2d(image, self._kernel, mode='same')
else:
raise ValueError('convolution_type %s not supported!' % self._type)
return image_conv
def _static_fft(self, image, mode='same'):
"""
scipy fft convolution with saved static fft kernel
:param image: 2d numpy array to be convolved
:return:
"""
in1 = image
in1 = np.asarray(in1)
if self._pre_computed is False:
self._s1, self._s2, self._complex_result, self._shape, self._fshape, self._fslice, self._sp2 = self._static_pre_compute(image)
self._pre_computed = True
s1, s2, complex_result, shape, fshape, fslice, sp2 = self._s1, self._s2, self._complex_result, self._shape, self._fshape, self._fslice, self._sp2
#if in1.ndim == in2.ndim == 0: # scalar inputs
# return in1 * in2
#elif not in1.ndim == in2.ndim:
# raise ValueError("in1 and in2 should have the same dimensionality")
#elif in1.size == 0 or in2.size == 0: # empty arrays
# return np.array([])
# Check that input sizes are compatible with 'valid' mode
#if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
# only applicable for 'valid' mode
# in1, s1, in2, s2 = in2, s2, in1, s1
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape)
ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape)
ret = fftpack.ifftn(sp1 * sp2)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _static_pre_compute(self, image):
"""
pre-compute Fourier transformed kernel and shape quantities to speed up convolution
:param image: 2d numpy array
:return:
"""
in1 = image
in2 = self._kernel
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complexfloating) or
np.issubdtype(in2.dtype, np.complexfloating))
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
# if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
# only applicable for 'valid' mode
# in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp2 = np.fft.rfftn(in2, fshape)
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp2 = fftpack.fftn(in2, fshape)
return s1, s2, complex_result, shape, fshape, fslice, sp2
def re_size_convolve(self, image_low_res, image_high_res=None):
"""
:param image_high_res: supersampled image/model to be convolved on a regular pixel grid
:return: convolved and re-sized image
"""
return self.convolution2d(image_low_res)
@export
class SubgridKernelConvolution(object):
"""
class to compute the convolution on a supersampled grid with partial convolution computed on the regular grid
"""
def __init__(self, kernel_supersampled, supersampling_factor, supersampling_kernel_size=None, convolution_type='fft_static'):
"""
:param kernel_supersampled: kernel in supersampled pixels
:param supersampling_factor: supersampling factor relative to the image pixel grid
:param supersampling_kernel_size: number of pixels (in units of the image pixels) that are convolved with the
supersampled kernel
"""
n_high = len(kernel_supersampled)
self._supersampling_factor = supersampling_factor
numPix = int(n_high / self._supersampling_factor)
#if self._supersampling_factor % 2 == 0:
# self._kernel = kernel_util.averaging_even_kernel(kernel_supersampled, self._supersampling_factor)
#else:
# self._kernel = util.averaging(kernel_supersampled, numGrid=n_high, numPix=numPix)
if supersampling_kernel_size is None:
kernel_low_res, kernel_high_res = np.zeros((3, 3)), kernel_supersampled
self._low_res_convolution = False
else:
kernel_low_res, kernel_high_res = kernel_util.split_kernel(kernel_supersampled, supersampling_kernel_size,
self._supersampling_factor)
self._low_res_convolution = True
self._low_res_conv = PixelKernelConvolution(kernel_low_res, convolution_type=convolution_type)
self._high_res_conv = PixelKernelConvolution(kernel_high_res, convolution_type=convolution_type)
def convolution2d(self, image):
"""
:param image: 2d array (high resoluton image) to be convolved and re-sized
:return: convolved image
"""
image_high_res_conv = self._high_res_conv.convolution2d(image)
image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor)
if self._low_res_convolution is True:
image_resized = image_util.re_size(image, self._supersampling_factor)
image_resized_conv += self._low_res_conv.convolution2d(image_resized)
return image_resized_conv
def re_size_convolve(self, image_low_res, image_high_res):
"""
:param image_high_res: supersampled image/model to be convolved on a regular pixel grid
:return: convolved and re-sized image
"""
image_high_res_conv = self._high_res_conv.convolution2d(image_high_res)
image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor)
if self._low_res_convolution is True:
image_resized_conv += self._low_res_conv.convolution2d(image_low_res)
return image_resized_conv
@export
class MultiGaussianConvolution(object):
"""
class to perform a convolution consisting of multiple 2d Gaussians
This is aimed to lead to a speed-up without significant loss of accuracy do to the simplified convolution kernel
relative to a pixelized kernel.
"""
def __init__(self, sigma_list, fraction_list, pixel_scale, supersampling_factor=1, supersampling_convolution=False,
truncation=2):
"""
:param sigma_list: list of std value of Gaussian kernel
:param fraction_list: fraction of flux to be convoled with each Gaussian kernel
:param pixel_scale: scale of pixel width (to convert sigmas into units of pixels)
:param truncation: float. Truncate the filter at this many standard deviations.
Default is 4.0.
"""
self._num_gaussians = len(sigma_list)
self._sigmas_scaled = np.array(sigma_list) / pixel_scale
if supersampling_convolution is True:
self._sigmas_scaled *= supersampling_factor
self._fraction_list = fraction_list / np.sum(fraction_list)
assert len(self._sigmas_scaled) == len(self._fraction_list)
self._truncation = truncation
self._pixel_scale = pixel_scale
self._supersampling_factor = supersampling_factor
self._supersampling_convolution = supersampling_convolution
def convolution2d(self, image):
"""
2d convolution
:param image: 2d numpy array, image to be convolved
:return: convolved image, 2d numpy array
"""
image_conv = None
for i in range(self._num_gaussians):
if image_conv is None:
image_conv = ndimage.filters.gaussian_filter(image, self._sigmas_scaled[i], mode='nearest',
truncate=self._truncation) * self._fraction_list[i]
else:
image_conv += ndimage.filters.gaussian_filter(image, self._sigmas_scaled[i], mode='nearest',
truncate=self._truncation) * self._fraction_list[i]
return image_conv
def re_size_convolve(self, image_low_res, image_high_res):
"""
:param image_high_res: supersampled image/model to be convolved on a regular pixel grid
:return: convolved and re-sized image
"""
if self._supersampling_convolution is True:
image_high_res_conv = self.convolution2d(image_high_res)
image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor)
else:
image_resized_conv = self.convolution2d(image_low_res)
return image_resized_conv
def pixel_kernel(self, num_pix):
"""
computes a pixelized kernel from the MGE parameters
:param num_pix: int, size of kernel (odd number per axis)
:return: pixel kernel centered
"""
from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian
mg = MultiGaussian()
x, y = util.make_grid(numPix=num_pix, deltapix=self._pixel_scale)
kernel = mg.function(x, y, amp=self._fraction_list, sigma=self._sigmas_scaled)
kernel = util.array2image(kernel)
return kernel / np.sum(kernel)
@export
class FWHMGaussianConvolution(object):
"""
uses a two-dimensional Gaussian function with same FWHM of given kernel as approximation
"""
def __init__(self, kernel, truncation=4):
"""
:param kernel: 2d kernel
:param truncation: sigma scaling of kernel truncation
"""
fwhm = kernel_util.fwhm_kernel(kernel)
self._sigma = util.fwhm2sigma(fwhm)
self._truncation = truncation
def convolution2d(self, image):
"""
2d convolution
:param image: 2d numpy array, image to be convolved
:return: convolved image, 2d numpy array
"""
image_conv = ndimage.filters.gaussian_filter(image, self._sigma, mode='nearest', truncate=self._truncation)
return image_conv
@export
class MGEConvolution(object):
"""
approximates a 2d kernel with an azimuthal Multi-Gaussian expansion
"""
def __init__(self, kernel, pixel_scale, order=1):
"""
:param kernel: 2d convolution kernel (centered, odd axis number)
:param order: order of Multi-Gaussian Expansion
"""
#kernel_util.fwhm_kernel(kernel)
amps, sigmas, norm = kernel_util.mge_kernel(kernel, order=order)
# make instance o MultiGaussian convolution kernel
self._mge_conv = MultiGaussianConvolution(sigma_list=sigmas*pixel_scale, fraction_list=np.array(amps) / np.sum(amps),
pixel_scale=pixel_scale, truncation=4)
self._kernel = kernel
# store difference between MGE approximation and real kernel
def convolution2d(self, image):
"""
:param image:
:return:
"""
return self._mge_conv.convolution2d(image)
def kernel_difference(self):
"""
:return: difference between true kernel and MGE approximation
"""
kernel_mge = self._mge_conv.pixel_kernel(num_pix=len(self._kernel))
return self._kernel - kernel_mge
| 41.219251 | 153 | 0.644006 | from scipy import fftpack, ndimage, signal
import numpy as np
import threading
#from scipy._lib._version import NumpyVersion
_rfft_mt_safe = True # (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
import lenstronomy.Util.kernel_util as kernel_util
import lenstronomy.Util.util as util
import lenstronomy.Util.image_util as image_util
from lenstronomy.Util.package_util import exporter
export, __all__ = exporter()
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
@export
class PixelKernelConvolution(object):
"""
class to compute convolutions for a given pixelized kernel (fft, grid)
"""
def __init__(self, kernel, convolution_type='fft_static'):
"""
:param kernel: 2d array, convolution kernel
:param convolution_type: string, 'fft', 'grid', 'fft_static' mode of 2d convolution
"""
self._kernel = kernel
if convolution_type not in ['fft', 'grid', 'fft_static']:
raise ValueError('convolution_type %s not supported!' % convolution_type)
self._type = convolution_type
self._pre_computed = False
def pixel_kernel(self, num_pix=None):
"""
access pixelated kernel
:param num_pix: size of returned kernel (odd number per axis). If None, return the original kernel.
:return: pixel kernel centered
"""
if num_pix is not None:
return kernel_util.cut_psf(self._kernel, num_pix)
return self._kernel
def copy_transpose(self):
"""
:return: copy of the class with kernel set to the transpose of original one
"""
return PixelKernelConvolution(self._kernel.T, convolution_type=self._type)
def convolution2d(self, image):
"""
:param image: 2d array (image) to be convolved
:return: fft convolution
"""
if self._type == 'fft':
image_conv = signal.fftconvolve(image, self._kernel, mode='same')
elif self._type == 'fft_static':
image_conv = self._static_fft(image, mode='same')
elif self._type == 'grid':
image_conv = signal.convolve2d(image, self._kernel, mode='same')
else:
raise ValueError('convolution_type %s not supported!' % self._type)
return image_conv
def _static_fft(self, image, mode='same'):
"""
scipy fft convolution with saved static fft kernel
:param image: 2d numpy array to be convolved
:return:
"""
in1 = image
in1 = np.asarray(in1)
if self._pre_computed is False:
self._s1, self._s2, self._complex_result, self._shape, self._fshape, self._fslice, self._sp2 = self._static_pre_compute(image)
self._pre_computed = True
s1, s2, complex_result, shape, fshape, fslice, sp2 = self._s1, self._s2, self._complex_result, self._shape, self._fshape, self._fslice, self._sp2
#if in1.ndim == in2.ndim == 0: # scalar inputs
# return in1 * in2
#elif not in1.ndim == in2.ndim:
# raise ValueError("in1 and in2 should have the same dimensionality")
#elif in1.size == 0 or in2.size == 0: # empty arrays
# return np.array([])
# Check that input sizes are compatible with 'valid' mode
#if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
# only applicable for 'valid' mode
# in1, s1, in2, s2 = in2, s2, in1, s1
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape)
ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape)
ret = fftpack.ifftn(sp1 * sp2)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _static_pre_compute(self, image):
"""
pre-compute Fourier transformed kernel and shape quantities to speed up convolution
:param image: 2d numpy array
:return:
"""
in1 = image
in2 = self._kernel
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complexfloating) or
np.issubdtype(in2.dtype, np.complexfloating))
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
# if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
# only applicable for 'valid' mode
# in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp2 = np.fft.rfftn(in2, fshape)
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp2 = fftpack.fftn(in2, fshape)
return s1, s2, complex_result, shape, fshape, fslice, sp2
def re_size_convolve(self, image_low_res, image_high_res=None):
"""
:param image_high_res: supersampled image/model to be convolved on a regular pixel grid
:return: convolved and re-sized image
"""
return self.convolution2d(image_low_res)
@export
class SubgridKernelConvolution(object):
"""
class to compute the convolution on a supersampled grid with partial convolution computed on the regular grid
"""
def __init__(self, kernel_supersampled, supersampling_factor, supersampling_kernel_size=None, convolution_type='fft_static'):
"""
:param kernel_supersampled: kernel in supersampled pixels
:param supersampling_factor: supersampling factor relative to the image pixel grid
:param supersampling_kernel_size: number of pixels (in units of the image pixels) that are convolved with the
supersampled kernel
"""
n_high = len(kernel_supersampled)
self._supersampling_factor = supersampling_factor
numPix = int(n_high / self._supersampling_factor)
#if self._supersampling_factor % 2 == 0:
# self._kernel = kernel_util.averaging_even_kernel(kernel_supersampled, self._supersampling_factor)
#else:
# self._kernel = util.averaging(kernel_supersampled, numGrid=n_high, numPix=numPix)
if supersampling_kernel_size is None:
kernel_low_res, kernel_high_res = np.zeros((3, 3)), kernel_supersampled
self._low_res_convolution = False
else:
kernel_low_res, kernel_high_res = kernel_util.split_kernel(kernel_supersampled, supersampling_kernel_size,
self._supersampling_factor)
self._low_res_convolution = True
self._low_res_conv = PixelKernelConvolution(kernel_low_res, convolution_type=convolution_type)
self._high_res_conv = PixelKernelConvolution(kernel_high_res, convolution_type=convolution_type)
def convolution2d(self, image):
"""
:param image: 2d array (high resoluton image) to be convolved and re-sized
:return: convolved image
"""
image_high_res_conv = self._high_res_conv.convolution2d(image)
image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor)
if self._low_res_convolution is True:
image_resized = image_util.re_size(image, self._supersampling_factor)
image_resized_conv += self._low_res_conv.convolution2d(image_resized)
return image_resized_conv
def re_size_convolve(self, image_low_res, image_high_res):
"""
:param image_high_res: supersampled image/model to be convolved on a regular pixel grid
:return: convolved and re-sized image
"""
image_high_res_conv = self._high_res_conv.convolution2d(image_high_res)
image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor)
if self._low_res_convolution is True:
image_resized_conv += self._low_res_conv.convolution2d(image_low_res)
return image_resized_conv
@export
class MultiGaussianConvolution(object):
"""
class to perform a convolution consisting of multiple 2d Gaussians
This is aimed to lead to a speed-up without significant loss of accuracy do to the simplified convolution kernel
relative to a pixelized kernel.
"""
def __init__(self, sigma_list, fraction_list, pixel_scale, supersampling_factor=1, supersampling_convolution=False,
truncation=2):
"""
:param sigma_list: list of std value of Gaussian kernel
:param fraction_list: fraction of flux to be convoled with each Gaussian kernel
:param pixel_scale: scale of pixel width (to convert sigmas into units of pixels)
:param truncation: float. Truncate the filter at this many standard deviations.
Default is 4.0.
"""
self._num_gaussians = len(sigma_list)
self._sigmas_scaled = np.array(sigma_list) / pixel_scale
if supersampling_convolution is True:
self._sigmas_scaled *= supersampling_factor
self._fraction_list = fraction_list / np.sum(fraction_list)
assert len(self._sigmas_scaled) == len(self._fraction_list)
self._truncation = truncation
self._pixel_scale = pixel_scale
self._supersampling_factor = supersampling_factor
self._supersampling_convolution = supersampling_convolution
def convolution2d(self, image):
"""
2d convolution
:param image: 2d numpy array, image to be convolved
:return: convolved image, 2d numpy array
"""
image_conv = None
for i in range(self._num_gaussians):
if image_conv is None:
image_conv = ndimage.filters.gaussian_filter(image, self._sigmas_scaled[i], mode='nearest',
truncate=self._truncation) * self._fraction_list[i]
else:
image_conv += ndimage.filters.gaussian_filter(image, self._sigmas_scaled[i], mode='nearest',
truncate=self._truncation) * self._fraction_list[i]
return image_conv
def re_size_convolve(self, image_low_res, image_high_res):
"""
:param image_high_res: supersampled image/model to be convolved on a regular pixel grid
:return: convolved and re-sized image
"""
if self._supersampling_convolution is True:
image_high_res_conv = self.convolution2d(image_high_res)
image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor)
else:
image_resized_conv = self.convolution2d(image_low_res)
return image_resized_conv
def pixel_kernel(self, num_pix):
"""
computes a pixelized kernel from the MGE parameters
:param num_pix: int, size of kernel (odd number per axis)
:return: pixel kernel centered
"""
from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian
mg = MultiGaussian()
x, y = util.make_grid(numPix=num_pix, deltapix=self._pixel_scale)
kernel = mg.function(x, y, amp=self._fraction_list, sigma=self._sigmas_scaled)
kernel = util.array2image(kernel)
return kernel / np.sum(kernel)
@export
class FWHMGaussianConvolution(object):
"""
uses a two-dimensional Gaussian function with same FWHM of given kernel as approximation
"""
def __init__(self, kernel, truncation=4):
"""
:param kernel: 2d kernel
:param truncation: sigma scaling of kernel truncation
"""
fwhm = kernel_util.fwhm_kernel(kernel)
self._sigma = util.fwhm2sigma(fwhm)
self._truncation = truncation
def convolution2d(self, image):
"""
2d convolution
:param image: 2d numpy array, image to be convolved
:return: convolved image, 2d numpy array
"""
image_conv = ndimage.filters.gaussian_filter(image, self._sigma, mode='nearest', truncate=self._truncation)
return image_conv
@export
class MGEConvolution(object):
"""
approximates a 2d kernel with an azimuthal Multi-Gaussian expansion
"""
def __init__(self, kernel, pixel_scale, order=1):
"""
:param kernel: 2d convolution kernel (centered, odd axis number)
:param order: order of Multi-Gaussian Expansion
"""
#kernel_util.fwhm_kernel(kernel)
amps, sigmas, norm = kernel_util.mge_kernel(kernel, order=order)
# make instance o MultiGaussian convolution kernel
self._mge_conv = MultiGaussianConvolution(sigma_list=sigmas*pixel_scale, fraction_list=np.array(amps) / np.sum(amps),
pixel_scale=pixel_scale, truncation=4)
self._kernel = kernel
# store difference between MGE approximation and real kernel
def convolution2d(self, image):
"""
:param image:
:return:
"""
return self._mge_conv.convolution2d(image)
def kernel_difference(self):
"""
:return: difference between true kernel and MGE approximation
"""
kernel_mge = self._mge_conv.pixel_kernel(num_pix=len(self._kernel))
return self._kernel - kernel_mge
| 316 | 0 | 23 |
9b6f47641a5011280fdcbe941a421e6f089aa809 | 490 | py | Python | 2_3_plistlib.py | gregneagle/mtc2013_python | 210aa9f216a143f1723d1f9b04dfc79c545f4df6 | [
"Apache-2.0"
] | 4 | 2015-05-23T16:05:45.000Z | 2017-09-17T17:12:56.000Z | 2_3_plistlib.py | gregneagle/mtc2013_python | 210aa9f216a143f1723d1f9b04dfc79c545f4df6 | [
"Apache-2.0"
] | null | null | null | 2_3_plistlib.py | gregneagle/mtc2013_python | 210aa9f216a143f1723d1f9b04dfc79c545f4df6 | [
"Apache-2.0"
] | null | null | null | import plistlib
filename = "/Applications/Safari.app/Contents/Info.plist"
info = plistlib.readPlist(filename)
info["CFBundleGetInfoString"]
version = info["CFBundleShortVersionString"]
print version
print info["CFBundleURLTypes"]
print info["CFBundleURLTypes"][0]
print info["CFBundleURLTypes"][0]["CFBundleURLSchemes"]
print info["CFBundleURLTypes"][0]["CFBundleURLSchemes"][0]
filename = "/Library/Preferences/com.apple.loginwindow.plist"
plistinfo = plistlib.readPlist(filename)
| 24.5 | 61 | 0.789796 | import plistlib
filename = "/Applications/Safari.app/Contents/Info.plist"
info = plistlib.readPlist(filename)
info["CFBundleGetInfoString"]
version = info["CFBundleShortVersionString"]
print version
print info["CFBundleURLTypes"]
print info["CFBundleURLTypes"][0]
print info["CFBundleURLTypes"][0]["CFBundleURLSchemes"]
print info["CFBundleURLTypes"][0]["CFBundleURLSchemes"][0]
filename = "/Library/Preferences/com.apple.loginwindow.plist"
plistinfo = plistlib.readPlist(filename)
| 0 | 0 | 0 |
64bf82724827db0750f356eef07f3af56df5720b | 668 | py | Python | kpi/unical_accounts/migrations/0008_alter_user_codice_fiscale_alter_user_email.py | UniversitaDellaCalabria/kpiManagement | d045a464298e17f50e005b89ba3b71e53d57f368 | [
"Apache-2.0"
] | null | null | null | kpi/unical_accounts/migrations/0008_alter_user_codice_fiscale_alter_user_email.py | UniversitaDellaCalabria/kpiManagement | d045a464298e17f50e005b89ba3b71e53d57f368 | [
"Apache-2.0"
] | null | null | null | kpi/unical_accounts/migrations/0008_alter_user_codice_fiscale_alter_user_email.py | UniversitaDellaCalabria/kpiManagement | d045a464298e17f50e005b89ba3b71e53d57f368 | [
"Apache-2.0"
] | 1 | 2022-03-28T10:48:38.000Z | 2022-03-28T10:48:38.000Z | # Generated by Django 4.0 on 2022-03-28 09:59
from django.db import migrations, models
| 25.692308 | 75 | 0.579341 | # Generated by Django 4.0 on 2022-03-28 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unical_accounts', '0007_user_created_by'),
]
operations = [
migrations.AlterField(
model_name='user',
name='codice_fiscale',
field=models.CharField(
max_length=16, unique=True, verbose_name='Codice Fiscale'),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(
max_length=254, unique=True, verbose_name='email address'),
),
]
| 0 | 556 | 23 |
738d2e0e79482160026dbee0a2e0a9bb2f953f8a | 6,255 | py | Python | src/questions/views.py | saadmk11/yourquery | 5bc64f91846908803becb4e0cb6fece417bbe49a | [
"MIT"
] | null | null | null | src/questions/views.py | saadmk11/yourquery | 5bc64f91846908803becb4e0cb6fece417bbe49a | [
"MIT"
] | null | null | null | src/questions/views.py | saadmk11/yourquery | 5bc64f91846908803becb4e0cb6fece417bbe49a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from .forms import QuestionForm, AnswerForm
from .models import Category, Question, Answer, SendNotification
# Create your views here.
@login_required()
@login_required()
@login_required()
@login_required()
@login_required()
@login_required()
| 32.748691 | 72 | 0.639488 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from .forms import QuestionForm, AnswerForm
from .models import Category, Question, Answer, SendNotification
# Create your views here.
def question_list(request):
queryset = Question.objects.all()
query = request.GET.get("q")
if query:
queryset = queryset.filter(
Q(qus__icontains=query)|
Q(category__name__icontains=query)
).distinct()
paginator = Paginator(queryset, 12)
page = request.GET.get("page")
try:
query_list = paginator.page(page)
except PageNotAnInteger:
query_list = paginator.page(1)
except EmptyPage:
query_list = paginator.page(paginator.num_pages)
context = { "query_list": query_list }
return render(request, "questions/question_list.html", context)
def question_detail(request, slug=None):
question = get_object_or_404(Question, slug=slug)
answers_list = Answer.objects.filter(question=question)
context = { "question": question,
"answers_list": answers_list,
}
if request.user.is_authenticated:
form = AnswerForm(request.POST or None)
if form.is_valid():
answer = form.save(commit=False)
answer.user = request.user
answer.question = question
answer.save()
messages.success(request, 'Answer was Posted.')
form = AnswerForm()
context = { "question": question,
"form": form,
"answers_list": answers_list,
}
return render(request, "questions/question_detail.html", context)
@login_required()
def question_ask(request):
form = QuestionForm(request.POST or None)
if form.is_valid():
question = form.save(commit=False)
question.user = request.user
question.save()
messages.success(request, 'Question was Posted.')
return redirect(question.get_absolute_url())
context = { "form": form,
"title": "Ask Question"
}
return render(request, "questions/ask.html", context)
@login_required()
def question_update(request, slug=None):
instance = get_object_or_404(Question, slug=slug)
if instance.user != request.user:
raise Http404
else:
form = QuestionForm(request.POST or None, instance=instance)
if form.is_valid():
question = form.save(commit=False)
question.user = request.user
question.save()
messages.success(request, 'Question was Updated.')
return redirect(question.get_absolute_url())
context = { "form": form,
"title": "Edit Question"
}
return render(request, "questions/ask.html", context)
@login_required()
def question_delete(request, slug=None):
question = get_object_or_404(Question, slug=slug)
if not request.user.is_authenticated:
raise Http404
else:
if question.user != request.user:
raise Http404
else:
question.delete()
messages.error(request, 'Question was Deleted.')
return redirect(request.user.get_absolute_url())
@login_required()
def answer_update(request, slug=None, pk=None):
question = get_object_or_404(Question, slug=slug)
instance = get_object_or_404(Answer, pk=pk)
if instance.user != request.user:
raise Http404
else:
form = AnswerForm(request.POST or None, instance=instance)
if form.is_valid():
answer = form.save(commit=False)
answer.user = request.user
answer.question = question
answer.save()
messages.success(request, 'Answer was Updated.')
return redirect(question.get_absolute_url())
context = { "form": form,
"title": "Update Answer"
}
return render(request, "questions/answer.html", context)
@login_required()
def answer_delete(request, slug=None, pk=None):
question = get_object_or_404(Question, slug=slug)
answer = get_object_or_404(Answer, pk=pk)
if not request.user.is_authenticated:
raise Http404
else:
if answer.user != request.user:
raise Http404
else:
answer.delete()
messages.error(request, 'Answer was Deleted.')
return redirect(question.get_absolute_url())
def category_list(request):
categories = Category.objects.all()
context = { "categories": categories }
return render(request, "questions/category_list.html", context)
def category(request, slug=None):
category = get_object_or_404(Category, slug=slug)
queryset = category.question_set.all()
query = request.GET.get("q")
if query:
queryset = queryset.filter(
Q(qus__icontains=query)
).distinct()
paginator = Paginator(queryset, 12)
page = request.GET.get("page")
try:
query_list = paginator.page(page)
except PageNotAnInteger:
query_list = paginator.page(1)
except EmptyPage:
query_list = paginator.page(paginator.num_pages)
context = { "query_list": query_list,
"category": category
}
return render(request, "questions/category.html", context)
@login_required()
def notification(request):
user = request.user
notification = SendNotification.objects.filter(user=user)
notification.update(viewed=True)
paginator = Paginator(notification, 12)
page = request.GET.get("page")
try:
query_list = paginator.page(page)
except PageNotAnInteger:
query_list = paginator.page(1)
except EmptyPage:
query_list = paginator.page(paginator.num_pages)
context = { "query_list": query_list }
return render(request, "questions/notification.html", context)
| 5,412 | 0 | 225 |
4e31b05eb0d20a8423ef63d1655c310369f5df37 | 261 | py | Python | wotpy/protocols/http/__init__.py | JKRhb/wot-py | 3eaa780189b686c82b7dbdea404fd8077bd3c9f9 | [
"MIT"
] | 24 | 2019-02-15T09:00:27.000Z | 2021-12-23T05:45:03.000Z | wotpy/protocols/http/__init__.py | JKRhb/wot-py | 3eaa780189b686c82b7dbdea404fd8077bd3c9f9 | [
"MIT"
] | 20 | 2020-03-17T09:41:51.000Z | 2021-07-14T12:29:02.000Z | wotpy/protocols/http/__init__.py | JKRhb/wot-py | 3eaa780189b686c82b7dbdea404fd8077bd3c9f9 | [
"MIT"
] | 5 | 2019-10-10T13:38:20.000Z | 2021-12-22T14:22:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
HTTP Protocol Binding implementation.
.. autosummary::
:toctree: _http
wotpy.protocols.http.handlers
wotpy.protocols.http.client
wotpy.protocols.http.enums
wotpy.protocols.http.server
"""
| 17.4 | 37 | 0.681992 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
HTTP Protocol Binding implementation.
.. autosummary::
:toctree: _http
wotpy.protocols.http.handlers
wotpy.protocols.http.client
wotpy.protocols.http.enums
wotpy.protocols.http.server
"""
| 0 | 0 | 0 |
d2c0f445720e982d0abf551cb916c5f8646b23fd | 3,588 | py | Python | QCPU_Setup/DWave-library/dist-packages/dwave_networkx2/algorithms/independent_set.py | cogrpar/qcpuWARE | 9b8233e830f8cfacbef787781b2279e42f26fec5 | [
"Apache-2.0"
] | 1 | 2022-02-01T14:40:05.000Z | 2022-02-01T14:40:05.000Z | QCPU_Setup/DWave-library/dist-packages/dwave_networkx2/algorithms/independent_set.py | cogrpar/qcpuWARE | 9b8233e830f8cfacbef787781b2279e42f26fec5 | [
"Apache-2.0"
] | null | null | null | QCPU_Setup/DWave-library/dist-packages/dwave_networkx2/algorithms/independent_set.py | cogrpar/qcpuWARE | 9b8233e830f8cfacbef787781b2279e42f26fec5 | [
"Apache-2.0"
] | 1 | 2022-02-01T14:40:31.000Z | 2022-02-01T14:40:31.000Z | from dwave_networkx.utils import binary_quadratic_model_sampler
__all__ = ["maximum_independent_set", "is_independent_set"]
@binary_quadratic_model_sampler(1)
def maximum_independent_set(G, sampler=None, **sampler_args):
"""Returns an approximate maximum independent set.
Defines a QUBO with ground states corresponding to a
maximum independent set and uses the sampler to sample from
it.
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges. A maximum
independent set is an independent set of largest possible size.
Parameters
----------
G : NetworkX graph
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
indep_nodes : list
List of nodes that the form a maximum independent set, as
determined by the given sampler.
Examples
--------
>>> G = nx.path_graph(5)
>>> dnx.maximum_independent_set(G, sampler)
[0, 2, 4]
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample.
https://en.wikipedia.org/wiki/Independent_set_(graph_theory)
https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization
References
----------
.. [AL] Lucas, A. (2014). Ising formulations of many NP problems.
Frontiers in Physics, Volume 2, Article 5.
"""
# We assume that the sampler can handle an unstructured QUBO problem, so let's set one up.
# Let us define the largest independent set to be S.
# For each node n in the graph, we assign a boolean variable v_n, where v_n = 1 when n
# is in S and v_n = 0 otherwise.
# We call the matrix defining our QUBO problem Q.
# On the diagnonal, we assign the linear bias for each node to be -1. This means that each
# node is biased towards being in S
# On the off diagnonal, we assign the off-diagonal terms of Q to be 2. Thus, if both
# nodes are in S, the overall energy is increased by 2.
Q = {(node, node): -1 for node in G}
Q.update({edge: 2 for edge in G.edges})
# use the sampler to find low energy states
response = sampler.sample_qubo(Q, **sampler_args)
# we want the lowest energy sample
sample = next(iter(response))
# nodes that are spin up or true are exactly the ones in S.
return [node for node in sample if sample[node] > 0]
def is_independent_set(G, indep_nodes):
"""Determines whether the given nodes form an independent set.
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges.
Parameters
----------
G : NetworkX graph
indep_nodes : list
List of nodes that the form a maximum independent set, as
determined by the given sampler.
Returns
-------
is_independent : bool
True if indep_nodes form an independent set.
"""
return not bool(G.subgraph(indep_nodes).edges)
| 33.53271 | 94 | 0.686734 | from dwave_networkx.utils import binary_quadratic_model_sampler
__all__ = ["maximum_independent_set", "is_independent_set"]
@binary_quadratic_model_sampler(1)
def maximum_independent_set(G, sampler=None, **sampler_args):
"""Returns an approximate maximum independent set.
Defines a QUBO with ground states corresponding to a
maximum independent set and uses the sampler to sample from
it.
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges. A maximum
independent set is an independent set of largest possible size.
Parameters
----------
G : NetworkX graph
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
indep_nodes : list
List of nodes that the form a maximum independent set, as
determined by the given sampler.
Examples
--------
>>> G = nx.path_graph(5)
>>> dnx.maximum_independent_set(G, sampler)
[0, 2, 4]
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample.
https://en.wikipedia.org/wiki/Independent_set_(graph_theory)
https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization
References
----------
.. [AL] Lucas, A. (2014). Ising formulations of many NP problems.
Frontiers in Physics, Volume 2, Article 5.
"""
# We assume that the sampler can handle an unstructured QUBO problem, so let's set one up.
# Let us define the largest independent set to be S.
# For each node n in the graph, we assign a boolean variable v_n, where v_n = 1 when n
# is in S and v_n = 0 otherwise.
# We call the matrix defining our QUBO problem Q.
# On the diagnonal, we assign the linear bias for each node to be -1. This means that each
# node is biased towards being in S
# On the off diagnonal, we assign the off-diagonal terms of Q to be 2. Thus, if both
# nodes are in S, the overall energy is increased by 2.
Q = {(node, node): -1 for node in G}
Q.update({edge: 2 for edge in G.edges})
# use the sampler to find low energy states
response = sampler.sample_qubo(Q, **sampler_args)
# we want the lowest energy sample
sample = next(iter(response))
# nodes that are spin up or true are exactly the ones in S.
return [node for node in sample if sample[node] > 0]
def is_independent_set(G, indep_nodes):
"""Determines whether the given nodes form an independent set.
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges.
Parameters
----------
G : NetworkX graph
indep_nodes : list
List of nodes that the form a maximum independent set, as
determined by the given sampler.
Returns
-------
is_independent : bool
True if indep_nodes form an independent set.
"""
return not bool(G.subgraph(indep_nodes).edges)
| 0 | 0 | 0 |
a33ac7c4f39b26557ddcab1ab4337752858138d3 | 2,628 | py | Python | 2016/launcher.py | bartmanus/advent_of_code | 8c5a2d639302c95e49e15d011db2df844bc4e010 | [
"Unlicense"
] | null | null | null | 2016/launcher.py | bartmanus/advent_of_code | 8c5a2d639302c95e49e15d011db2df844bc4e010 | [
"Unlicense"
] | null | null | null | 2016/launcher.py | bartmanus/advent_of_code | 8c5a2d639302c95e49e15d011db2df844bc4e010 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
"""
Launcher for AoC 2016 puzzles.
Handles puzzle selection and puzzle input.
"""
import day_1_no_time_for_a_taxicab as d1
import day_2_bathroom_security as d2
if __name__ == '__main__':
AVAILABLE_PUZZLES = {1: run_taxicab, 2:run_keypad}
print('Welcome to inifinity! Try an available solution to AoC 2016 puzzles in', \
list(AVAILABLE_PUZZLES.keys()), 'or enter EOF to quit!')
while True:
puzzle = None
try:
puzzle = int(input('Please select a puzzle: '))
if puzzle not in AVAILABLE_PUZZLES:
print('That puzzle\'s solution is not available! Try one of', \
list(AVAILABLE_PUZZLES.keys()))
puzzle = None
else:
AVAILABLE_PUZZLES[puzzle]()
except ValueError:
print('Please input an integer!')
except EOFError:
print('\nThanks for playing, happy holidays!')
break
| 38.086957 | 94 | 0.603501 | #!/usr/bin/env python3
"""
Launcher for AoC 2016 puzzles.
Handles puzzle selection and puzzle input.
"""
import day_1_no_time_for_a_taxicab as d1
import day_2_bathroom_security as d2
def run_taxicab():
while True:
instructions = input('Instructions in <dir><steps>[, <dir><steps>]* format, please: ')
try:
assert instructions != None and len(instructions) > 1
for instruction in instructions.split(', '):
assert instruction[0] in ['R', 'L']
int(instruction[1:])
break
except AssertionError:
print('Invalid direction detected, please check your input!')
except ValueError:
print('Invalid step format detected, please check your input!')
distance_total, distance_crossing = d1.taxicab(instructions)
print('Taxicab distance to final destination is {}.'.format(distance_total))
print('Taxicab distance to first path crossing is {}.'.format(distance_crossing))
def run_keypad():
instructions = []
while len(instructions) == 0:
print('''Please input 3x3 keypad movement instructions. End input with by feeding
an empty line. For each code digit input one line in [UDLR]+ format. Movement
starts in the middle at digit 5.''')
while True:
instructions.append(input())
if instructions[-1] == '':
instructions.pop()
if len(instructions) > 0:
break
if d2.valid_input(instructions):
break
else:
print('Invalid instructions, please retry!')
print(str(instructions))
instructions.clear()
for keypad in d2.KEYPADS:
code = d2.keypad(instructions, pad=keypad)
print('Keypad code to {} is {}.'.format(keypad, code))
if __name__ == '__main__':
AVAILABLE_PUZZLES = {1: run_taxicab, 2:run_keypad}
print('Welcome to inifinity! Try an available solution to AoC 2016 puzzles in', \
list(AVAILABLE_PUZZLES.keys()), 'or enter EOF to quit!')
while True:
puzzle = None
try:
puzzle = int(input('Please select a puzzle: '))
if puzzle not in AVAILABLE_PUZZLES:
print('That puzzle\'s solution is not available! Try one of', \
list(AVAILABLE_PUZZLES.keys()))
puzzle = None
else:
AVAILABLE_PUZZLES[puzzle]()
except ValueError:
print('Please input an integer!')
except EOFError:
print('\nThanks for playing, happy holidays!')
break
| 1,596 | 0 | 46 |
38de5d32c8943c365cbee6da4102c8d00d4c821a | 198 | py | Python | finder/forms.py | rc4594/Dbms | 57a160fd4339a884b1ce4ef75fe8489f6ff30fa2 | [
"MIT"
] | null | null | null | finder/forms.py | rc4594/Dbms | 57a160fd4339a884b1ce4ef75fe8489f6ff30fa2 | [
"MIT"
] | null | null | null | finder/forms.py | rc4594/Dbms | 57a160fd4339a884b1ce4ef75fe8489f6ff30fa2 | [
"MIT"
] | null | null | null | from django import forms
from models import Student
| 24.75 | 73 | 0.712121 | from django import forms
from models import Student
class StudentForm(forms.ModelForm) :
class Meta:
model = Student
fields = ['name','RollNo','hostel','status','Genre1','Genre2','Genre3']
| 0 | 120 | 23 |
4973eb261d4ff7581ed865328f3333ba54885730 | 790 | py | Python | node_manager/models.py | Jennypies/catnet | 8c715e1ad638c9843e116b3c3926163b7dde1618 | [
"MIT"
] | null | null | null | node_manager/models.py | Jennypies/catnet | 8c715e1ad638c9843e116b3c3926163b7dde1618 | [
"MIT"
] | null | null | null | node_manager/models.py | Jennypies/catnet | 8c715e1ad638c9843e116b3c3926163b7dde1618 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
| 32.916667 | 83 | 0.696203 | from django.db import models
from django.contrib.auth.models import User
class Node(models.Model):
name = models.CharField(max_length=200)
last_contact = models.DateTimeField('Last contact', null=True, editable=False)
contacts = models.ManyToManyField(User)
email_users = models.BooleanField(default=True)
def __str__(self):
return self.name
class Photo(models.Model):
node = models.ForeignKey(Node, on_delete=models.CASCADE)
pub_date = models.DateTimeField('date published', auto_now_add=True)
photo = models.ImageField(upload_to='photos/%Y/%m/%d')
# see MEDIA ROOT for more info
def __str__(self):
return f"{self.node} {self.pub_date}"
# a format string containing its related node and pub date
| 136 | 527 | 48 |
6438cabcc2ee27593e57c71efd247356f05e9634 | 3,980 | py | Python | App.py | tyasvdspree/assignmentNetworking | b61a517c40c449298f173f492c50f24947785944 | [
"MIT"
] | 1 | 2020-10-05T14:54:07.000Z | 2020-10-05T14:54:07.000Z | App.py | tyasvdspree/assignmentNetworking | b61a517c40c449298f173f492c50f24947785944 | [
"MIT"
] | null | null | null | App.py | tyasvdspree/assignmentNetworking | b61a517c40c449298f173f492c50f24947785944 | [
"MIT"
] | 1 | 2020-10-05T15:16:18.000Z | 2020-10-05T15:16:18.000Z | from _thread import *
import threading
import socket
import json
# team: PWA
# member: 0870508 Tyas van de Spree
# member: 0966770 Maarten de Goede
# class: DINF2
BYTE_SIZE = 1024
TEAMNAME = "PWA" # programmers with attitude
CLASSNAME = "DINF2"
TEAMMATESTUDENTNR = ''
STUDENTNR = input("Please provide your student number")
if STUDENTNR == "0870508" or STUDENTNR == "":
if STUDENTNR == "":
STUDENTNR = "0870508"
TEAMMATESTUDENTNR = '0966770'
elif STUDENTNR == '0966770':
TEAMMATESTUDENTNR = '0870508'
SERVERIP = '145.24.238.191'
MYIP = socket.gethostbyname(socket.gethostbyname("localhost"))
peerIp = input("Please provide the ip of the peer client you wish to connect with. If left blank will run as both clients")
if peerIp == '':
peerIp = MYIP
print_lock = threading.Lock()
# create a peerListenerSocket object
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
peerConnectionSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
messageReceived = False
if __name__ == '__main__':
Main()
| 26.711409 | 123 | 0.651759 | from _thread import *
import threading
import socket
import json
# team: PWA
# member: 0870508 Tyas van de Spree
# member: 0966770 Maarten de Goede
# class: DINF2
BYTE_SIZE = 1024
TEAMNAME = "PWA" # programmers with attitude
CLASSNAME = "DINF2"
TEAMMATESTUDENTNR = ''
STUDENTNR = input("Please provide your student number")
if STUDENTNR == "0870508" or STUDENTNR == "":
if STUDENTNR == "":
STUDENTNR = "0870508"
TEAMMATESTUDENTNR = '0966770'
elif STUDENTNR == '0966770':
TEAMMATESTUDENTNR = '0870508'
SERVERIP = '145.24.238.191'
MYIP = socket.gethostbyname(socket.gethostbyname("localhost"))
peerIp = input("Please provide the ip of the peer client you wish to connect with. If left blank will run as both clients")
if peerIp == '':
peerIp = MYIP
print_lock = threading.Lock()
# create a peerListenerSocket object
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
peerConnectionSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
messageReceived = False
class Message(object):
def __init__(self, studentnr, classname, clientid, teamname, ip=MYIP, secret=None, status=None):
self.studentnr = studentnr
self.classname = classname
self.clientid = clientid
self.teamname = teamname
self.ip = ip
self.secret = secret
self.status = status
def setSecrect(self, secret):
self.secret = secret
def setStatus(self, status):
self.status = status
def getStudentnr(self):
return self.studentnr
def getClassname(self):
return self.classname
def getClientid(self):
return self.clientid
def getTeamname(self):
return self.teamname
def getIp(self):
return self.ip
def getSecrect(self):
return self.secret
def getStatus(self):
return self.status
def Server(connection):
while True:
data = connection.recv(BYTE_SIZE)
print(data)
if data:
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.connect((SERVERIP, 5001))
answer = serverSocket.recv(BYTE_SIZE)
print(answer)
data = json.loads(data)
message = Message(**data)
message.studentnr = '0966770'
message.clientid = 2
serverSocket.send(bytes(json.dumps(message.__dict__), 'utf8'))
answer = serverSocket.recv(BYTE_SIZE)
print(answer)
serverSocket.close()
break
print_lock.release()
connection.close()
messageReceived = True
def peerSocketHandeler(socket):
while True:
# establish connection with client
client, addr = socket.accept()
# lock acquired by client
print_lock.acquire()
print('Connected to :', addr[0], ':', addr[1])
# Start a new thread and return its identifier
start_new_thread(Server, (client,))
def Main():
if STUDENTNR == '0966770':
# create a peerListenerSocket object
peerListenerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the port and IP to the peerListenerSocket
peerListenerSocket.bind(('', 12345))
# Listen for incoming connections
peerListenerSocket.listen(5)
start_new_thread(peerSocketHandeler, (peerListenerSocket,))
else:
serverSocket.connect((SERVERIP, 5001))
print(serverSocket.recv(BYTE_SIZE))
message = Message(STUDENTNR, CLASSNAME, 1, TEAMNAME)
serverSocket.send(bytes(json.dumps(message.__dict__), 'utf8'))
answer = serverSocket.recv(BYTE_SIZE)
print(answer)
answer = json.loads(answer)
message = Message(**answer)
peerConnectionSocket.connect((peerIp, 12345))
peerConnectionSocket.send(bytes(json.dumps(message.__dict__), 'utf8'))
while not messageReceived:
pass
if __name__ == '__main__':
Main()
| 2,562 | 1 | 361 |
7a6bc09f03fc1366993dfe34eea65ffbcef063a0 | 1,203 | py | Python | src/astro/dataframe/__init__.py | jlaneve/astro | 4528162c7582f3860d1d21de7af954f20c9f9a6a | [
"Apache-2.0"
] | null | null | null | src/astro/dataframe/__init__.py | jlaneve/astro | 4528162c7582f3860d1d21de7af954f20c9f9a6a | [
"Apache-2.0"
] | null | null | null | src/astro/dataframe/__init__.py | jlaneve/astro | 4528162c7582f3860d1d21de7af954f20c9f9a6a | [
"Apache-2.0"
] | null | null | null | from typing import Callable, Optional
from airflow.decorators.base import task_decorator_factory
from astro.sql.operators.sql_dataframe import SqlDataframeOperator
def dataframe(
python_callable: Optional[Callable] = None,
multiple_outputs: Optional[bool] = None,
conn_id: str = "",
database: Optional[str] = None,
schema: Optional[str] = None,
warehouse: Optional[str] = None,
task_id: Optional[str] = None,
identifiers_as_lower: Optional[bool] = True,
):
"""
This function allows a user to run python functions in Airflow but with the huge benefit that SQL files
will automatically be turned into dataframes and resulting dataframes can automatically used in astro.sql functions
"""
param_map = {
"conn_id": conn_id,
"database": database,
"schema": schema,
"warehouse": warehouse,
"identifiers_as_lower": identifiers_as_lower,
}
if task_id:
param_map["task_id"] = task_id
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=SqlDataframeOperator, # type: ignore
**param_map,
)
| 32.513514 | 119 | 0.697423 | from typing import Callable, Optional
from airflow.decorators.base import task_decorator_factory
from astro.sql.operators.sql_dataframe import SqlDataframeOperator
def dataframe(
python_callable: Optional[Callable] = None,
multiple_outputs: Optional[bool] = None,
conn_id: str = "",
database: Optional[str] = None,
schema: Optional[str] = None,
warehouse: Optional[str] = None,
task_id: Optional[str] = None,
identifiers_as_lower: Optional[bool] = True,
):
"""
This function allows a user to run python functions in Airflow but with the huge benefit that SQL files
will automatically be turned into dataframes and resulting dataframes can automatically used in astro.sql functions
"""
param_map = {
"conn_id": conn_id,
"database": database,
"schema": schema,
"warehouse": warehouse,
"identifiers_as_lower": identifiers_as_lower,
}
if task_id:
param_map["task_id"] = task_id
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=SqlDataframeOperator, # type: ignore
**param_map,
)
| 0 | 0 | 0 |
1dbf4b7b9733bf48989a9616acea576abb284c79 | 421 | py | Python | core/urls.py | AmoleR/otis-web | afcb1f595675bd1478e231b9de2579d02234a076 | [
"MIT"
] | null | null | null | core/urls.py | AmoleR/otis-web | afcb1f595675bd1478e231b9de2579d02234a076 | [
"MIT"
] | null | null | null | core/urls.py | AmoleR/otis-web | afcb1f595675bd1478e231b9de2579d02234a076 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path(r'classroom/', views.classroom, name='classroom'),
path(r'synopsis/', views.UnitGroupListView.as_view(), name='synopsis'),
path(r'unit/problems/<int:pk>/', views.unit_problems, name='view-problems'),
path(r'unit/tex/<int:pk>/', views.unit_tex, name='view-tex'),
path(r'unit/solutions/<int:pk>/', views.unit_solutions, name='view-solutions'),
]
| 35.083333 | 80 | 0.714964 | from django.urls import path
from . import views
urlpatterns = [
path(r'classroom/', views.classroom, name='classroom'),
path(r'synopsis/', views.UnitGroupListView.as_view(), name='synopsis'),
path(r'unit/problems/<int:pk>/', views.unit_problems, name='view-problems'),
path(r'unit/tex/<int:pk>/', views.unit_tex, name='view-tex'),
path(r'unit/solutions/<int:pk>/', views.unit_solutions, name='view-solutions'),
]
| 0 | 0 | 0 |
d795ac9c432f6e0f1f8a67d8417295173eebd7aa | 5,901 | py | Python | src/hannoy/index.py | marijnl/AquilaDB | ff837f135715619e1d09e94f94b3d25b12a8c5db | [
"Apache-2.0"
] | 2 | 2020-04-30T19:47:07.000Z | 2020-05-03T16:58:34.000Z | src/hannoy/index.py | marijnl/AquilaDB | ff837f135715619e1d09e94f94b3d25b12a8c5db | [
"Apache-2.0"
] | null | null | null | src/hannoy/index.py | marijnl/AquilaDB | ff837f135715619e1d09e94f94b3d25b12a8c5db | [
"Apache-2.0"
] | null | null | null | import numpy as np
from annoy import AnnoyIndex
import yaml
import os
import threading
import queue
import time
model_location = '/data/model_ha' | 35.335329 | 102 | 0.534147 | import numpy as np
from annoy import AnnoyIndex
import yaml
import os
import threading
import queue
import time
model_location = '/data/model_ha'
class Annoy:
def __init__(self):
# to keep the thread & queue running
self.process_flag = True
self.q_maxsize = 10100
self.process_thread = None
self._lock = threading.Lock()
self.process_timeout_sec = 5 # seconds
# this is to keep track of all vectors inserted
# for saving into disk and retrieve later
self.index_disk = None
try:
with open('DB_config.yml', 'r') as stream:
DB_config = yaml.safe_load(stream)
self.dim = os.getenv('FIXED_VEC_DIMENSION', DB_config['annoy']['init']['vd'])
self.sim_metric = os.getenv('ANNOY_SIM_METRIC', DB_config['annoy']['init']['smetric'])
self.n_trees = os.getenv('ANNOY_NTREES', DB_config['annoy']['init']['ntrees'])
self.modelLoaded = self.loadModelFromDisk()
except Exception as e:
print('Error initializing Annoy: ', e)
# spawn process thread
self.spawn()
def __del__(self):
self.process_flag = False
if self.process_thread:
self.process_thread.join()
def spawn (self):
# create pipeline to add documents
self.pipeline = queue.Queue(maxsize=self.q_maxsize)
# create process thread
self.process_thread = threading.Thread(target=self.process, args=(), daemon=True)
# start process thread
self.process_thread.start()
# return self.pipeline
def initAnnoy(self):
# only do if no index loaded from disk
if not self.modelLoaded:
print('Annoy init index')
self.a_index = AnnoyIndex(self.dim, self.sim_metric)
# Lock index read / wtite until it is built
with self._lock:
# build index
build_ = self.a_index.build(self.n_trees)
if build_:
self.modelLoaded = self.saveModelToDisk()
return self.modelLoaded
def addVectors(self, documents):
ids = []
# add vectors
for document in documents:
# add document to queue
self.pipeline.put_nowait(document)
ids.append(document._id)
return True, ids
def process(self):
while (self.process_flag):
# print(list(self.pipeline.queue))
# set a timeout till next vector indexing
time.sleep(self.process_timeout_sec)
# check if queue is not empty
if self.pipeline.qsize() > 0:
# Lock index read / wtite until it is built
with self._lock:
# unbuild index first
self.a_index.unbuild()
# fetch all currently available documents from queue
while not self.pipeline.empty():
# extract document & contents
document = self.pipeline.get_nowait()
_id = document._id
vec = document.vector
vector_e = vec.e
# resize vectors
vector_e_l = len(vector_e)
# check if the vector length is below dimention limit
# then pad vector with 0 by dimension
if vector_e_l < self.dim:
vector_e.extend([0]*(self.dim-vector_e_l))
# make sure vector length doesn't exceed dimension limit
vector_e = vector_e[:self.dim]
# add vector to index
self.a_index.add_item(int(_id), vector_e)
# keep a copy for disk storage
list_ = vector_e
list_.append(int(_id))
# append to disk proxy
if self.index_disk is None:
self.index_disk = np.array([list_], dtype=float)
else:
self.index_disk = np.append(self.index_disk, [list_], axis=0)
# build vector
build_ = self.a_index.build(self.n_trees)
# write to disk
if build_:
self.modelLoaded = self.saveModelToDisk()
def deleteVectors(self, ids):
return True, ids
def getNearest(self, matrix, k):
ids = []
dists = []
# Lock index read / wtite until nearest neighbor search
with self._lock:
for vec_data in matrix:
_id, _dist = self.a_index.get_nns_by_vector(vec_data, k, include_distances=True)
ids.append(_id)
dists.append(_dist)
return True, ids, dists
def loadModelFromDisk(self):
try:
# prepare new index
self.a_index = AnnoyIndex(self.dim, self.sim_metric)
# read index
self.index_disk = np.load(model_location+'.npy')
# build Annoy Index
for vec_ in self.index_disk.tolist():
self.a_index.add_item(int(vec_[-1]), vec_[0:-1])
# build index
build_ = self.a_index.build(self.n_trees)
print('Annoy index loading success')
return True
except Exception as e:
print('Annoy index loading failed')
return False
def saveModelToDisk(self):
try:
# write index
np.save(model_location, self.index_disk)
print('Annoy index writing success')
return True
except:
print('Annoy index writing failed')
return False | 5,471 | -9 | 292 |
4bb8af7cfebda4f5b9abf228a7db10c33ad3ff2e | 52 | py | Python | salt/returners/__init__.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 111 | 2015-01-16T02:48:12.000Z | 2022-02-08T10:24:56.000Z | salt/returners/__init__.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 60 | 2015-01-06T12:28:44.000Z | 2020-12-01T21:30:38.000Z | salt/returners/__init__.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 163 | 2015-01-06T09:40:31.000Z | 2022-02-03T11:41:23.000Z | # -*- coding: utf-8 -*-
'''
Returners Directory
'''
| 10.4 | 23 | 0.538462 | # -*- coding: utf-8 -*-
'''
Returners Directory
'''
| 0 | 0 | 0 |
eddc16387b276719fe8d1b5a9e83d853a078ea81 | 5,702 | py | Python | app.py | a-tanman/vigil-hotline | 21b73e76c2c3de77f9c93cb11ae47295a064dabd | [
"Apache-2.0"
] | null | null | null | app.py | a-tanman/vigil-hotline | 21b73e76c2c3de77f9c93cb11ae47295a064dabd | [
"Apache-2.0"
] | null | null | null | app.py | a-tanman/vigil-hotline | 21b73e76c2c3de77f9c93cb11ae47295a064dabd | [
"Apache-2.0"
] | null | null | null | #----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, render_template, request, jsonify, redirect, url_for
import random
from datetime import datetime
from flask_cors import CORS
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import math
# from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from forms import *
import os
from Aaron_Lib import *
import io
# Imports the Google Cloud client library
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
# Instantiates a client
client = speech.SpeechClient()
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
CORS(app)
app.config.from_object('config')
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
# Create list of calls
calls = [
{
'time': str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
'text': 'Help!',
'sentiment': 6,
'confidence': 8
}
]
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
@app.route('/about')
@app.route('/login')
@app.route('/register')
@app.route('/forgot')
@app.route('/recorder')
@app.route('/recorder_mobile')
# Error handlers.
@app.errorhandler(500)
@app.errorhandler(404)
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
# List for request from client
@app.route('/api/newcall', methods = ['POST'])
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
# if __name__ == '__main__':
# app.run()
# Or specify port manually:
if __name__ == '__main__':
port = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=port)
| 27.023697 | 98 | 0.590845 | #----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, render_template, request, jsonify, redirect, url_for
import random
from datetime import datetime
from flask_cors import CORS
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import math
# from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from forms import *
import os
from Aaron_Lib import *
import io
# Imports the Google Cloud client library
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
# Instantiates a client
client = speech.SpeechClient()
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
CORS(app)
app.config.from_object('config')
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
# Create list of calls
calls = [
{
'time': str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
'text': 'Help!',
'sentiment': 6,
'confidence': 8
}
]
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def home():
calls.sort(key = lambda x: x['sentiment'], reverse = True)
return render_template('pages/placeholder.home.html', calls_data = calls)
@app.route('/about')
def about():
return render_template('pages/placeholder.about.html')
@app.route('/login')
def login():
form = LoginForm(request.form)
return render_template('forms/login.html', form=form)
@app.route('/register')
def register():
form = RegisterForm(request.form)
return render_template('forms/register.html', form=form)
@app.route('/forgot')
def forgot():
form = ForgotForm(request.form)
return render_template('forms/forgot.html', form=form)
@app.route('/recorder')
def recorder():
form = ForgotForm(request.form)
return render_template("Recorderjs-master/examples/example_simple_exportwav.html", form=form)
@app.route('/recorder_mobile')
def recorder_m():
form = ForgotForm(request.form)
return render_template("Recorderjs-master/examples/example_simple_exportwav3.html", form=form)
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
# List for request from client
@app.route('/api/newcall', methods = ['POST'])
def create_call():
# if not request.json:
# abort(400)
# return redirect(url_for('login'))
blob = request.files['audio_data'].read()
# blob.save(os.path.join())
text = transcribe(blob)
print(text.transcript)
print(text.confidence)
analyzer = SentimentIntensityAnalyzer()
vs = analyzer.polarity_scores(text.transcript)
print("{:-<65} {}".format(text.transcript, str(vs)))
senti = round(18*vs['neg'])
wordlist = ['help', 'bad', 'hit', 'sad', 'lonely', 'trouble', 'pain', 'hurting']
for word in text.transcript.split():
if word in wordlist:
senti = 5
if senti >= 3:
Send_Email(["aaron.limzy@uq.net.au"] ,
[],
"Hello. A call has been received which may be urgent.",
"<br><br>The transcript of this call is: {}\
<br><br>To check it out, go to the user dashboard at https://3463690e.ngrok.io/\
<br><br>Thanks,<br>Aaron".format(text.transcript),
[])
call = {
'id': str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
'text': text.transcript,
'sentiment': senti,
'confidence': round(10*text.confidence)
}
calls.append(call)
return jsonify({'call': call}), 201
def transcribe(blob):
client = speech.SpeechClient()
content = blob
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
language_code='en-US')
# Detects speech in the audio file
response = client.recognize(config, audio)
for result in response.results:
print(result)
print('Transcript: {}'.format(result.alternatives[0].transcript))
return(result.alternatives[0])
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
# if __name__ == '__main__':
# app.run()
# Or specify port manually:
if __name__ == '__main__':
port = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=port)
| 2,591 | 0 | 243 |
1231aef1d097e4a6200ffabfe2739b14b2d58dc5 | 1,396 | py | Python | spark_auto_mapper/automappers/automapper_base.py | icanbwell/SparkAutoMapper | bfd5da72f3b55ec48860935228c1ecf6d7c1a2e4 | [
"Apache-2.0"
] | 2 | 2021-12-27T10:41:59.000Z | 2022-02-24T00:19:40.000Z | spark_auto_mapper/automappers/automapper_base.py | icanbwell/SparkAutoMapper | bfd5da72f3b55ec48860935228c1ecf6d7c1a2e4 | [
"Apache-2.0"
] | 5 | 2020-10-22T01:19:11.000Z | 2021-03-18T16:04:23.000Z | spark_auto_mapper/automappers/automapper_base.py | icanbwell/SparkAutoMapper | bfd5da72f3b55ec48860935228c1ecf6d7c1a2e4 | [
"Apache-2.0"
] | 3 | 2020-12-17T21:23:46.000Z | 2021-07-29T18:08:31.000Z | from typing import List, Dict, Optional
from pyspark.sql import DataFrame, Column
from spark_auto_mapper.automappers.check_schema_result import CheckSchemaResult
class AutoMapperBase:
"""
Abstract Base class for AutoMappers
"""
def transform_with_data_frame(
self, df: DataFrame, source_df: Optional[DataFrame], keys: List[str]
) -> DataFrame:
"""
Internal function called by base class to transform the data frame
:param df: destination data frame
:param source_df: source data frame
:param keys: key columns
:return data frame after the transform
"""
# implement in subclasses
raise NotImplementedError
def get_column_specs(self, source_df: Optional[DataFrame]) -> Dict[str, Column]:
"""
Gets column specs (Spark expressions)
:param source_df: source data frame
:return: dictionary of column name, column expression
"""
raise NotImplementedError
def check_schema(
self, parent_column: Optional[str], source_df: Optional[DataFrame]
) -> Optional[CheckSchemaResult]:
"""
Checks the schema
:param parent_column: parent column
:param source_df: source data frame
:return: result of checking schema
"""
return None
| 25.851852 | 84 | 0.649713 | from typing import List, Dict, Optional
from pyspark.sql import DataFrame, Column
from spark_auto_mapper.automappers.check_schema_result import CheckSchemaResult
class AutoMapperBase:
"""
Abstract Base class for AutoMappers
"""
def __init__(self) -> None:
pass
def transform_with_data_frame(
self, df: DataFrame, source_df: Optional[DataFrame], keys: List[str]
) -> DataFrame:
"""
Internal function called by base class to transform the data frame
:param df: destination data frame
:param source_df: source data frame
:param keys: key columns
:return data frame after the transform
"""
# implement in subclasses
raise NotImplementedError
def get_column_specs(self, source_df: Optional[DataFrame]) -> Dict[str, Column]:
"""
Gets column specs (Spark expressions)
:param source_df: source data frame
:return: dictionary of column name, column expression
"""
raise NotImplementedError
def check_schema(
self, parent_column: Optional[str], source_df: Optional[DataFrame]
) -> Optional[CheckSchemaResult]:
"""
Checks the schema
:param parent_column: parent column
:param source_df: source data frame
:return: result of checking schema
"""
return None
| 19 | 0 | 27 |
f61d11acc6629c2b63f97227a5134b008acfe309 | 511 | py | Python | sphinx/source/docs/user_guide/examples/interaction_tab_panes.py | kevin1kevin1k/bokeh | 9f34b5b710e2748ec803c12918ec1706098a3477 | [
"BSD-3-Clause"
] | 12 | 2020-07-20T14:58:31.000Z | 2021-09-04T22:15:14.000Z | sphinx/source/docs/user_guide/examples/interaction_tab_panes.py | kevin1kevin1k/bokeh | 9f34b5b710e2748ec803c12918ec1706098a3477 | [
"BSD-3-Clause"
] | 1 | 2020-09-05T02:46:20.000Z | 2020-09-05T02:46:20.000Z | sphinx/source/docs/user_guide/examples/interaction_tab_panes.py | kevin1kevin1k/bokeh | 9f34b5b710e2748ec803c12918ec1706098a3477 | [
"BSD-3-Clause"
] | 3 | 2019-03-27T23:27:05.000Z | 2020-08-05T19:03:19.000Z | from bokeh.models import Panel, Tabs
from bokeh.io import output_file, show
from bokeh.plotting import figure
output_file("slider.html")
p1 = figure(plot_width=300, plot_height=300)
p1.circle([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], size=20, color="navy", alpha=0.5)
tab1 = Panel(child=p1, title="circle")
p2 = figure(plot_width=300, plot_height=300)
p2.line([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], line_width=3, color="navy", alpha=0.5)
tab2 = Panel(child=p2, title="line")
tabs = Tabs(tabs=[ tab1, tab2 ])
show(tabs)
| 28.388889 | 80 | 0.675147 | from bokeh.models import Panel, Tabs
from bokeh.io import output_file, show
from bokeh.plotting import figure
output_file("slider.html")
p1 = figure(plot_width=300, plot_height=300)
p1.circle([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], size=20, color="navy", alpha=0.5)
tab1 = Panel(child=p1, title="circle")
p2 = figure(plot_width=300, plot_height=300)
p2.line([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], line_width=3, color="navy", alpha=0.5)
tab2 = Panel(child=p2, title="line")
tabs = Tabs(tabs=[ tab1, tab2 ])
show(tabs)
| 0 | 0 | 0 |
a8b1d0d3f419b87fe666dbebbfa8c7d0b7bd9341 | 576 | py | Python | DSA/string/letterCombinations.py | lance-lh/Data-Structures-and-Algorithms | c432654edaeb752536e826e88bcce3ed2ab000fb | [
"MIT"
] | 1 | 2019-03-27T13:00:28.000Z | 2019-03-27T13:00:28.000Z | DSA/string/letterCombinations.py | lance-lh/Data-Structures-and-Algorithms | c432654edaeb752536e826e88bcce3ed2ab000fb | [
"MIT"
] | null | null | null | DSA/string/letterCombinations.py | lance-lh/Data-Structures-and-Algorithms | c432654edaeb752536e826e88bcce3ed2ab000fb | [
"MIT"
] | null | null | null | # @return a list of strings, [s1, s2]
# test
digits = "23"
print(Solution().letterCombinations(digits)) | 25.043478 | 100 | 0.447917 | class Solution:
# @return a list of strings, [s1, s2]
def letterCombinations(self, digits):
from functools import reduce
if digits == '':
return []
mapping = {
'2': 'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz'
}
return reduce(lambda acc, digit: [x + y for x in acc for y in mapping[digit]], digits, [''])
# test
digits = "23"
print(Solution().letterCombinations(digits)) | 426 | -6 | 48 |
014b2ff73bbee2c8fd077762e2cf0325f04ca8e3 | 1,556 | py | Python | velocyto/segment_match.py | subercui/velocyto.py | 87269d36f9e99650953dd4b1b9c4f505453b6515 | [
"BSD-2-Clause"
] | 119 | 2017-11-06T15:36:51.000Z | 2022-03-29T20:11:28.000Z | velocyto/segment_match.py | subercui/velocyto.py | 87269d36f9e99650953dd4b1b9c4f505453b6515 | [
"BSD-2-Clause"
] | 303 | 2017-10-20T22:48:11.000Z | 2022-03-26T19:17:36.000Z | velocyto/segment_match.py | subercui/velocyto.py | 87269d36f9e99650953dd4b1b9c4f505453b6515 | [
"BSD-2-Clause"
] | 74 | 2017-10-20T21:31:42.000Z | 2022-02-20T09:29:22.000Z | from typing import *
import velocyto as vcy
| 35.363636 | 140 | 0.596401 | from typing import *
import velocyto as vcy
class SegmentMatch:
__slots__ = ["segment", "feature", "is_spliced"]
def __init__(self, segment: Tuple[int, int], feature: vcy.Feature, is_spliced: bool=False) -> None:
self.segment = segment
self.feature = feature
self.is_spliced = is_spliced # this is really BAM_CREF_SKIP
@property
def maps_to_intron(self) -> bool:
return self.feature.kind == 105 # ord("i")
@property
def maps_to_exon(self) -> bool:
return self.feature.kind == 101 # ord("e")
@property
def skip_makes_sense(self) -> bool:
"""If the SKIP in the segment matches some extremity of the feature and therefore can be interpreted as a splice event
"""
if not self.is_spliced:
return True # NOTE: maybe here I should raise an error because the property is not supposed to be called
else:
if abs(self.feature.start - self.segment[0]) <= vcy.SPLIC_INACUR or abs(self.feature.end - self.segment[1]) <= vcy.SPLIC_INACUR:
return True
else:
return False
def __repr__(self) -> str:
txt = "<SegmentMatch "
if self.maps_to_intron:
txt += 'intron '
if self.maps_to_exon:
txt += 'exon '
if self.is_spliced:
txt += "spliced"
txt += f"\nSegmentPosition:{self.segment[0]}-{self.segment[1]} ({self.segment[1]-self.segment[0]+1}bp)"
txt += f"\n{self.feature}\n>"
return txt
| 715 | 773 | 23 |
3f8c4f5fff8a542b17ea599da32d09636a18443d | 5,767 | py | Python | plastron/pcdm.py | peichman-umd/plastron | 8453b1dc598eaf60e50a4614444a2c713b96190a | [
"Apache-2.0"
] | 3 | 2019-06-12T08:07:52.000Z | 2019-09-13T18:16:30.000Z | plastron/pcdm.py | peichman-umd/plastron | 8453b1dc598eaf60e50a4614444a2c713b96190a | [
"Apache-2.0"
] | 14 | 2018-05-11T15:17:40.000Z | 2022-03-11T23:27:50.000Z | plastron/pcdm.py | peichman-umd/plastron | 8453b1dc598eaf60e50a4614444a2c713b96190a | [
"Apache-2.0"
] | 5 | 2018-04-13T20:58:30.000Z | 2020-03-25T12:59:34.000Z | from plastron import ldp, ore, rdf
from plastron.namespaces import dcterms, dcmitype, ebucore, fabio, pcdm, pcdmuse, premis
from plastron.files import LocalFileSource, RepositoryFileSource
from PIL import Image
# alias the rdflib Namespace
ns = pcdm
@rdf.object_property('members', pcdm.hasMember)
@rdf.object_property('member_of', pcdm.memberOf)
@rdf.object_property('files', pcdm.hasFile)
@rdf.object_property('related', pcdm.hasRelatedObject)
@rdf.object_property('related_of', pcdm.relatedObjectOf)
@rdf.data_property('title', dcterms.title)
@rdf.rdf_class(pcdm.Object)
# recursively create an object and components and that don't yet exist
@rdf.object_property('file_of', pcdm.fileOf)
@rdf.data_property('mimetype', ebucore.hasMimeType)
@rdf.data_property('filename', ebucore.filename)
@rdf.data_property('size', premis.hasSize)
@rdf.data_property('width', ebucore.width)
@rdf.data_property('height', ebucore.height)
@rdf.object_property('dcmitype', dcterms.type)
@rdf.data_property('title', dcterms.title)
@rdf.rdf_class(pcdm.File)
@rdf.rdf_class(pcdmuse.PreservationMasterFile)
@rdf.rdf_class(pcdmuse.IntermediateFile)
@rdf.rdf_class(pcdmuse.ServiceFile)
@rdf.rdf_class(pcdmuse.ExtractedText)
@rdf.rdf_class(pcdm.Collection)
@rdf.data_property('number', fabio.hasSequenceIdentifier)
@rdf.rdf_class(fabio.Page)
class Page(Object):
"""One page of an item-level resource"""
pass
FILE_CLASS_FOR = {
'.tif': PreservationMasterFile,
'.jpg': IntermediateFile,
'.txt': ExtractedText,
'.xml': ExtractedText,
}
| 31.342391 | 120 | 0.648344 | from plastron import ldp, ore, rdf
from plastron.namespaces import dcterms, dcmitype, ebucore, fabio, pcdm, pcdmuse, premis
from plastron.files import LocalFileSource, RepositoryFileSource
from PIL import Image
# alias the rdflib Namespace
ns = pcdm
@rdf.object_property('members', pcdm.hasMember)
@rdf.object_property('member_of', pcdm.memberOf)
@rdf.object_property('files', pcdm.hasFile)
@rdf.object_property('related', pcdm.hasRelatedObject)
@rdf.object_property('related_of', pcdm.relatedObjectOf)
@rdf.data_property('title', dcterms.title)
@rdf.rdf_class(pcdm.Object)
class Object(ore.Aggregation):
def add_member(self, obj):
self.members.append(obj)
obj.member_of.append(self)
def add_file(self, obj):
self.files.append(obj)
obj.file_of.append(self)
def add_related(self, obj):
self.related.append(obj)
obj.related_of.append(self)
def gather_files(self, repository):
for proxy in self.load_proxies(repository):
page = Object.from_repository(repository, proxy.proxy_for[0])
for file_uri in page.files:
file = File.from_repository(repository, file_uri)
graph = repository.get_graph(file_uri)
file.read(graph)
yield file
# recursively create an object and components and that don't yet exist
def create(self, repository, container_path=None, slug=None, headers=None, recursive=True, **kwargs):
super().create(
repository=repository,
container_path=container_path,
slug=slug,
headers=headers,
recursive=recursive,
**kwargs
)
if recursive:
repository.create_members(self)
repository.create_files(self)
repository.create_related(self)
def get_new_member(self, rootname, number):
return Page(title=f'Page {number}', number=number)
@rdf.object_property('file_of', pcdm.fileOf)
@rdf.data_property('mimetype', ebucore.hasMimeType)
@rdf.data_property('filename', ebucore.filename)
@rdf.data_property('size', premis.hasSize)
@rdf.data_property('width', ebucore.width)
@rdf.data_property('height', ebucore.height)
@rdf.object_property('dcmitype', dcterms.type)
@rdf.data_property('title', dcterms.title)
@rdf.rdf_class(pcdm.File)
class File(ldp.NonRdfSource):
@classmethod
def from_repository(cls, repo, uri, include_server_managed=True):
obj = super().from_repository(repo, uri, include_server_managed)
obj.source = RepositoryFileSource(repo, uri)
return obj
@classmethod
def from_source(cls, source=None, **kwargs):
obj = super().from_source(source=source, **kwargs)
obj.mimetype = source.mimetype()
return obj
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# for image files
# TODO: move these to a subclass or mix-in?
self.width = None
self.height = None
# upload a binary resource
def create(self, repository, container_path=None, slug=None, headers=None, **kwargs):
if not repository.load_binaries:
self.logger.info(f'Skipping loading for binary {self.source.filename}')
return True
elif self.created:
return False
elif self.exists_in_repo(repository):
self.created = True
return False
self.logger.info(f'Loading {self.source.filename}')
if headers is None:
headers = {}
headers.update({
'Content-Type': self.source.mimetype(),
'Digest': self.source.digest(),
'Content-Disposition': f'attachment; filename="{self.source.filename}"'
})
with self.source as stream:
super().create(repository, container_path=container_path, slug=slug, headers=headers, data=stream, **kwargs)
self.created = True
return True
def update(self, repository, recursive=True):
if not repository.load_binaries:
self.logger.info(f'Skipping update for binary {self.source.filename}')
return True
# if this is an image file, see if we can get dimensions
if self.source.mimetype().startswith('image/'):
if self.width is None or self.height is None:
# use PIL
try:
with self.source as stream:
with Image.open(stream) as img:
self.width = img.width
self.height = img.height
except IOError as e:
self.logger.warn(f'Cannot read image file: {e}')
return super().update(repository, recursive=recursive)
@rdf.rdf_class(pcdmuse.PreservationMasterFile)
class PreservationMasterFile(File):
pass
@rdf.rdf_class(pcdmuse.IntermediateFile)
class IntermediateFile(File):
pass
@rdf.rdf_class(pcdmuse.ServiceFile)
class ServiceFile(File):
pass
@rdf.rdf_class(pcdmuse.ExtractedText)
class ExtractedText(File):
pass
@rdf.rdf_class(pcdm.Collection)
class Collection(Object):
pass
@rdf.data_property('number', fabio.hasSequenceIdentifier)
@rdf.rdf_class(fabio.Page)
class Page(Object):
"""One page of an item-level resource"""
pass
FILE_CLASS_FOR = {
'.tif': PreservationMasterFile,
'.jpg': IntermediateFile,
'.txt': ExtractedText,
'.xml': ExtractedText,
}
def get_file_object(path, source=None):
extension = path[path.rfind('.'):]
if extension in FILE_CLASS_FOR:
cls = FILE_CLASS_FOR[extension]
else:
cls = File
if source is None:
source = LocalFileSource(path)
f = cls.from_source(source)
return f
| 3,577 | 295 | 337 |
b386075e1018254114f56a24107ff27fd3c4acf4 | 761 | py | Python | intents/config.py | markdeutel/IntentFuzzer | e4d80251d57bd4d10dc7d214818fd54e0e7d3574 | [
"BSD-3-Clause"
] | 5 | 2018-12-02T14:04:22.000Z | 2021-04-08T10:46:58.000Z | intents/config.py | markdeutel/IntentFuzzer | e4d80251d57bd4d10dc7d214818fd54e0e7d3574 | [
"BSD-3-Clause"
] | null | null | null | intents/config.py | markdeutel/IntentFuzzer | e4d80251d57bd4d10dc7d214818fd54e0e7d3574 | [
"BSD-3-Clause"
] | 2 | 2018-12-30T09:35:57.000Z | 2019-07-15T15:17:11.000Z | from os import path
import json
| 47.5625 | 115 | 0.659658 | from os import path
import json
class Config:
def __init__(self):
configPath = path.abspath(path.dirname(__file__)) + "/config.json"
with open(configPath, 'r') as file:
jsonConfig = json.load(file)
self.dataStorePath = path.expanduser(jsonConfig.get("dataStore", path.abspath(path.dirname(__file__))))
self.outputPath = path.expanduser(jsonConfig.get("outputFolder", path.abspath(path.dirname(__file__))))
self.androidSDK = path.expanduser(jsonConfig.get("androidSDK", "~/Android/SDK"))
self.intentTimeout = jsonConfig.get("intentTimeout", 2)
self.numIter = jsonConfig.get("numberIterations", 1)
self.packageNames = jsonConfig.get("packageNames", [])
| 683 | -8 | 54 |
923893bcf3fdc9e6ae96e21578684f8468825b24 | 5,591 | py | Python | gpustats/sampler.py | dukestats/gpustats | 570fdeb4d1da204b1e56717ba29db07a08be8629 | [
"BSD-3-Clause"
] | 23 | 2015-02-01T23:46:52.000Z | 2021-01-13T18:07:47.000Z | gpustats/sampler.py | dukestats/gpustats | 570fdeb4d1da204b1e56717ba29db07a08be8629 | [
"BSD-3-Clause"
] | null | null | null | gpustats/sampler.py | dukestats/gpustats | 570fdeb4d1da204b1e56717ba29db07a08be8629 | [
"BSD-3-Clause"
] | 6 | 2015-06-18T10:23:59.000Z | 2020-05-05T22:32:40.000Z | import numpy as np
import gpustats.kernels as kernels
import gpustats.codegen as codegen
import gpustats.util as util
import pycuda.driver as drv
from pycuda.gpuarray import GPUArray, to_gpu
from pycuda.gpuarray import empty as gpu_empty
from pycuda.curandom import rand as curand
# reload(kernels)
# reload(codegen)
cu_module = codegen.get_full_cuda_module()
def sample_discrete(densities, logged=False,
return_gpuarray=False):
"""
Takes a categorical sample from the unnormalized univariate
densities defined in the rows of 'densities'
Parameters
---------
densities : ndarray or gpuarray (n, k)
logged: boolean indicating whether densities is on the
log scale ...
Returns
-------
indices : ndarray or gpuarray (if return_gpuarray=True)
of length n and dtype = int32
"""
from gpustats.util import info
n, k = densities.shape
# prep data
if isinstance(densities, GPUArray):
if densities.flags.f_contiguous:
gpu_densities = util.transpose(densities)
else:
gpu_densities = densities
else:
densities = util.prep_ndarray(densities)
gpu_densities = to_gpu(densities)
# get gpu function
cu_func = cu_module.get_function('sample_discrete')
# setup GPU data
gpu_random = to_gpu(np.asarray(np.random.rand(n), dtype=np.float32))
gpu_dest = gpu_empty(n, dtype=np.int32)
dims = np.array([n,k,logged],dtype=np.int32)
if info.max_block_threads<1024:
x_block_dim = 16
else:
x_block_dim = 32
y_block_dim = 16
# setup GPU call
block_design = (x_block_dim, y_block_dim, 1)
grid_design = (int(n/y_block_dim) + 1, 1)
shared_mem = 4 * ( (x_block_dim+1)*y_block_dim +
2 * y_block_dim )
cu_func(gpu_densities, gpu_random, gpu_dest,
dims[0], dims[1], dims[2],
block=block_design, grid=grid_design, shared=shared_mem)
gpu_random.gpudata.free()
if return_gpuarray:
return gpu_dest
else:
res = gpu_dest.get()
gpu_dest.gpudata.free()
return res
## depreciated
def sample_discrete_old(in_densities, logged=False, pad=False,
return_gpuarray=False):
"""
Takes a categorical sample from the unnormalized univariate
densities defined in the rows of 'densities'
Parameters
---------
densities : ndarray or gpuarray (n, k)
logged: boolean indicating whether densities is on the
log scale ...
Returns
-------
indices : ndarray or gpuarray (if return_gpuarray=True)
of length n and dtype = int32
"""
if pad:
if logged:
densities = util.pad_data_mult16(in_densities, fill=1)
else:
densities = util.pad_data_mult16(in_densities, fill=0)
else:
densities = in_densities
n, k = densities.shape
if logged:
cu_func = cu_module.get_function('sample_discrete_logged_old')
else:
cu_func = cu_module.get_function('sample_discrete_old')
if isinstance(densities, GPUArray):
if densities.flags.f_contiguous:
gpu_densities = util.transpose(densities)
else:
gpu_densities = densities
else:
densities = util.prep_ndarray(densities)
gpu_densities = to_gpu(densities)
# setup GPU data
#gpu_random = curand(n)
gpu_random = to_gpu(np.asarray(np.random.rand(n), dtype=np.float32))
#gpu_dest = to_gpu(np.zeros(n, dtype=np.float32))
gpu_dest = gpu_empty(n, dtype=np.float32)
stride = gpu_densities.shape[1]
if stride % 2 == 0:
stride += 1
dims = np.array([n,k, gpu_densities.shape[1], stride],dtype=np.int32)
# optimize design ...
grid_design, block_design = _tune_sfm(n, stride, cu_func.num_regs)
shared_mem = 4 * (block_design[0] * stride +
1 * block_design[0])
cu_func(gpu_densities, gpu_random, gpu_dest,
dims[0], dims[1], dims[2], dims[3],
block=block_design, grid=grid_design, shared=shared_mem)
gpu_random.gpudata.free()
if return_gpuarray:
return gpu_dest
else:
res = gpu_dest.get()
gpu_dest.gpudata.free()
return res
def _tune_sfm(n, stride, func_regs):
"""
Outputs the 'opimal' block and grid configuration
for the sample discrete kernel.
"""
from gpustats.util import info
#info = DeviceInfo()
comp_cap = info.compute_cap
max_smem = info.shared_mem * 0.8
max_threads = int(info.max_block_threads * 0.5)
max_regs = 0.9 * info.max_registers
# We want smallest dim possible in x dimsension while
# still reading mem correctly
if comp_cap[0] == 1:
xdim = 16
else:
xdim = 32
ydim = 2
while sfm_config_ok(xdim, ydim, stride, func_regs, max_regs, max_smem, max_threads):
ydim += 1
ydim -= 1
nblocks = int(n/xdim) + 1
return (nblocks,1), (xdim,ydim,1)
if __name__ == '__main__':
n = 100
k = 5
dens = np.log(np.abs(np.random.randn(k))) - 200
densities = [dens.copy() for _ in range(n)]
dens = np.exp(dens + 200)
densities = np.asarray(densities)
labels = sample_discrete(densities, logged=True)
mu = np.dot(dens / dens.sum(), np.arange(k))
print mu, labels.mean()
| 27.541872 | 88 | 0.640315 | import numpy as np
import gpustats.kernels as kernels
import gpustats.codegen as codegen
import gpustats.util as util
import pycuda.driver as drv
from pycuda.gpuarray import GPUArray, to_gpu
from pycuda.gpuarray import empty as gpu_empty
from pycuda.curandom import rand as curand
# reload(kernels)
# reload(codegen)
cu_module = codegen.get_full_cuda_module()
def sample_discrete(densities, logged=False,
return_gpuarray=False):
"""
Takes a categorical sample from the unnormalized univariate
densities defined in the rows of 'densities'
Parameters
---------
densities : ndarray or gpuarray (n, k)
logged: boolean indicating whether densities is on the
log scale ...
Returns
-------
indices : ndarray or gpuarray (if return_gpuarray=True)
of length n and dtype = int32
"""
from gpustats.util import info
n, k = densities.shape
# prep data
if isinstance(densities, GPUArray):
if densities.flags.f_contiguous:
gpu_densities = util.transpose(densities)
else:
gpu_densities = densities
else:
densities = util.prep_ndarray(densities)
gpu_densities = to_gpu(densities)
# get gpu function
cu_func = cu_module.get_function('sample_discrete')
# setup GPU data
gpu_random = to_gpu(np.asarray(np.random.rand(n), dtype=np.float32))
gpu_dest = gpu_empty(n, dtype=np.int32)
dims = np.array([n,k,logged],dtype=np.int32)
if info.max_block_threads<1024:
x_block_dim = 16
else:
x_block_dim = 32
y_block_dim = 16
# setup GPU call
block_design = (x_block_dim, y_block_dim, 1)
grid_design = (int(n/y_block_dim) + 1, 1)
shared_mem = 4 * ( (x_block_dim+1)*y_block_dim +
2 * y_block_dim )
cu_func(gpu_densities, gpu_random, gpu_dest,
dims[0], dims[1], dims[2],
block=block_design, grid=grid_design, shared=shared_mem)
gpu_random.gpudata.free()
if return_gpuarray:
return gpu_dest
else:
res = gpu_dest.get()
gpu_dest.gpudata.free()
return res
## depreciated
def sample_discrete_old(in_densities, logged=False, pad=False,
return_gpuarray=False):
"""
Takes a categorical sample from the unnormalized univariate
densities defined in the rows of 'densities'
Parameters
---------
densities : ndarray or gpuarray (n, k)
logged: boolean indicating whether densities is on the
log scale ...
Returns
-------
indices : ndarray or gpuarray (if return_gpuarray=True)
of length n and dtype = int32
"""
if pad:
if logged:
densities = util.pad_data_mult16(in_densities, fill=1)
else:
densities = util.pad_data_mult16(in_densities, fill=0)
else:
densities = in_densities
n, k = densities.shape
if logged:
cu_func = cu_module.get_function('sample_discrete_logged_old')
else:
cu_func = cu_module.get_function('sample_discrete_old')
if isinstance(densities, GPUArray):
if densities.flags.f_contiguous:
gpu_densities = util.transpose(densities)
else:
gpu_densities = densities
else:
densities = util.prep_ndarray(densities)
gpu_densities = to_gpu(densities)
# setup GPU data
#gpu_random = curand(n)
gpu_random = to_gpu(np.asarray(np.random.rand(n), dtype=np.float32))
#gpu_dest = to_gpu(np.zeros(n, dtype=np.float32))
gpu_dest = gpu_empty(n, dtype=np.float32)
stride = gpu_densities.shape[1]
if stride % 2 == 0:
stride += 1
dims = np.array([n,k, gpu_densities.shape[1], stride],dtype=np.int32)
# optimize design ...
grid_design, block_design = _tune_sfm(n, stride, cu_func.num_regs)
shared_mem = 4 * (block_design[0] * stride +
1 * block_design[0])
cu_func(gpu_densities, gpu_random, gpu_dest,
dims[0], dims[1], dims[2], dims[3],
block=block_design, grid=grid_design, shared=shared_mem)
gpu_random.gpudata.free()
if return_gpuarray:
return gpu_dest
else:
res = gpu_dest.get()
gpu_dest.gpudata.free()
return res
def _tune_sfm(n, stride, func_regs):
"""
Outputs the 'opimal' block and grid configuration
for the sample discrete kernel.
"""
from gpustats.util import info
#info = DeviceInfo()
comp_cap = info.compute_cap
max_smem = info.shared_mem * 0.8
max_threads = int(info.max_block_threads * 0.5)
max_regs = 0.9 * info.max_registers
# We want smallest dim possible in x dimsension while
# still reading mem correctly
if comp_cap[0] == 1:
xdim = 16
else:
xdim = 32
def sfm_config_ok(xdim, ydim, stride, func_regs, max_regs, max_smem, max_threads):
ok = 4*(xdim*stride + 1*xdim) < max_smem and func_regs*ydim*xdim < max_regs
return ok and xdim*ydim <= max_threads
ydim = 2
while sfm_config_ok(xdim, ydim, stride, func_regs, max_regs, max_smem, max_threads):
ydim += 1
ydim -= 1
nblocks = int(n/xdim) + 1
return (nblocks,1), (xdim,ydim,1)
if __name__ == '__main__':
n = 100
k = 5
dens = np.log(np.abs(np.random.randn(k))) - 200
densities = [dens.copy() for _ in range(n)]
dens = np.exp(dens + 200)
densities = np.asarray(densities)
labels = sample_discrete(densities, logged=True)
mu = np.dot(dens / dens.sum(), np.arange(k))
print mu, labels.mean()
| 192 | 0 | 27 |
93ddc216aa03e77588852f7a7e577c8e48d8a891 | 5,544 | py | Python | wandb/sdk/data_types/helper_types/classes.py | soumik12345/client | 31e4c2b143e6c219ea005fe4477e294f383f6888 | [
"MIT"
] | null | null | null | wandb/sdk/data_types/helper_types/classes.py | soumik12345/client | 31e4c2b143e6c219ea005fe4477e294f383f6888 | [
"MIT"
] | null | null | null | wandb/sdk/data_types/helper_types/classes.py | soumik12345/client | 31e4c2b143e6c219ea005fe4477e294f383f6888 | [
"MIT"
] | null | null | null | import os
from typing import Any, Dict, Optional, Sequence, Type, TYPE_CHECKING, Union
from .. import _dtypes
from ..base_types.media import Media
if TYPE_CHECKING: # pragma: no cover
from wandb.apis.public import Artifact as PublicArtifact
from ...wandb_artifacts import Artifact as LocalArtifact
from ...wandb_run import Run as LocalRun
_dtypes.TypeRegistry.add(_ClassesIdType)
| 34.222222 | 119 | 0.592352 | import os
from typing import Any, Dict, Optional, Sequence, Type, TYPE_CHECKING, Union
from .. import _dtypes
from ..base_types.media import Media
if TYPE_CHECKING: # pragma: no cover
from wandb.apis.public import Artifact as PublicArtifact
from ...wandb_artifacts import Artifact as LocalArtifact
from ...wandb_run import Run as LocalRun
class Classes(Media):
_log_type = "classes"
_class_set: Sequence[dict]
def __init__(self, class_set: Sequence[dict]) -> None:
"""Classes is holds class metadata intended to be used in concert with other objects when visualizing artifacts
Args:
class_set (list): list of dicts in the form of {"id":int|str, "name":str}
"""
super().__init__()
for class_obj in class_set:
assert "id" in class_obj and "name" in class_obj
self._class_set = class_set
@classmethod
def from_json(
cls: Type["Classes"],
json_obj: dict,
source_artifact: Optional["PublicArtifact"],
) -> "Classes":
return cls(json_obj.get("class_set")) # type: ignore
def to_json(
self, run_or_artifact: Optional[Union["LocalRun", "LocalArtifact"]]
) -> dict:
json_obj = {}
# This is a bit of a hack to allow _ClassesIdType to
# be able to operate fully without an artifact in play.
# In all other cases, artifact should be a true artifact.
if run_or_artifact is not None:
json_obj = super().to_json(run_or_artifact)
json_obj["_type"] = Classes._log_type
json_obj["class_set"] = self._class_set
return json_obj
def get_type(self) -> "_ClassesIdType":
return _ClassesIdType(self)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if isinstance(other, Classes):
return self._class_set == other._class_set
else:
return False
class _ClassesIdType(_dtypes.Type):
name = "classesId"
legacy_names = ["wandb.Classes_id"]
types = [Classes]
def __init__(
self,
classes_obj: Optional[Classes] = None,
valid_ids: Optional["_dtypes.UnionType"] = None,
):
if valid_ids is None:
valid_ids = _dtypes.UnionType()
elif isinstance(valid_ids, list):
valid_ids = _dtypes.UnionType(
[_dtypes.ConstType(item) for item in valid_ids]
)
elif isinstance(valid_ids, _dtypes.UnionType):
valid_ids = valid_ids
else:
raise TypeError("valid_ids must be None, list, or UnionType")
if classes_obj is None:
classes_obj = Classes(
[
{"id": _id.params["val"], "name": str(_id.params["val"])}
for _id in valid_ids.params["allowed_types"]
]
)
elif not isinstance(classes_obj, Classes):
raise TypeError("valid_ids must be None, or instance of Classes")
else:
valid_ids = _dtypes.UnionType(
[
_dtypes.ConstType(class_obj["id"])
for class_obj in classes_obj._class_set
]
)
self.wb_classes_obj_ref = classes_obj
self.params.update({"valid_ids": valid_ids})
def assign(self, py_obj: Optional[Any] = None) -> "_dtypes.Type":
return self.assign_type(_dtypes.ConstType(py_obj))
def assign_type(self, wb_type: "_dtypes.Type") -> "_dtypes.Type":
valid_ids = self.params["valid_ids"].assign_type(wb_type)
if not isinstance(valid_ids, _dtypes.InvalidType):
return self
return _dtypes.InvalidType()
@classmethod
def from_obj(cls, py_obj: Optional[Any] = None) -> "_dtypes.Type":
return cls(py_obj)
def to_json(self, artifact: Optional["LocalArtifact"] = None) -> Dict[str, Any]:
cl_dict = super().to_json(artifact)
# TODO (tss): Refactor this block with the similar one in wandb.Image.
# This is a bit of a smell that the classes object does not follow
# the same file-pattern as other media types.
if artifact is not None:
class_name = os.path.join("media", "cls")
classes_entry = artifact.add(self.wb_classes_obj_ref, class_name)
cl_dict["params"]["classes_obj"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest, # is this needed really?
}
else:
cl_dict["params"]["classes_obj"] = self.wb_classes_obj_ref.to_json(artifact)
return cl_dict
@classmethod
def from_json(
cls,
json_dict: Dict[str, Any],
artifact: Optional["PublicArtifact"] = None,
) -> "_dtypes.Type":
classes_obj = None
if (
json_dict.get("params", {}).get("classes_obj", {}).get("type")
== "classes-file"
):
if artifact is not None:
classes_obj = artifact.get(
json_dict.get("params", {}).get("classes_obj", {}).get("path")
)
else:
raise RuntimeError("Expected artifact to be non-null.")
else:
classes_obj = Classes.from_json(
json_dict["params"]["classes_obj"], artifact
)
return cls(classes_obj)
_dtypes.TypeRegistry.add(_ClassesIdType)
| 4,140 | 958 | 46 |
08c4b55684bc43747f4a9875f98c55d0ce244fb7 | 6,018 | py | Python | test/rest/clienttests.py | geoco84/comodit-client | 4cf47e60a6739ed8b88ce8b955ed57375c4d400d | [
"MIT"
] | 1 | 2015-01-20T17:24:34.000Z | 2015-01-20T17:24:34.000Z | test/rest/clienttests.py | geoco84/comodit-client | 4cf47e60a6739ed8b88ce8b955ed57375c4d400d | [
"MIT"
] | null | null | null | test/rest/clienttests.py | geoco84/comodit-client | 4cf47e60a6739ed8b88ce8b955ed57375c4d400d | [
"MIT"
] | 24 | 2016-09-07T15:28:00.000Z | 2021-12-08T16:03:16.000Z | import unittest, json
from comodit_client.rest.client import HttpClient
from comodit_client.rest.exceptions import ApiException
from test.mock.urllib_mocks import RequestWithMethodMock, RequestResult
# Create tests
# Delete tests
# Read tests
# Update tests
# Helpers
if __name__ == '__main__':
unittest.main()
| 29.5 | 113 | 0.623463 | import unittest, json
from comodit_client.rest.client import HttpClient
from comodit_client.rest.exceptions import ApiException
from test.mock.urllib_mocks import RequestWithMethodMock, RequestResult
class ClientTest(unittest.TestCase):
def setUp(self):
self._url = "url"
self._params = ""
self._api = "http://localhost/api"
self._user = "user"
self._pass = "pass"
self._token = None
self._headers = None
self._urlopen_result = None
self._client = HttpClient(self._api, self._user, self._pass, self._token)
# Mock some Client methods
self._client._new_request = self._new_request
self._client._new_request_with_data = self._new_request_with_data
def tearDown(self):
pass
# Create tests
def test_create_success(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
data = {"test":"value"}
self._urlopen_result = json.dumps(data)
result = self._client.create(self._url, item = data)
self.assertEqual(data, result, "Wrong result returned")
def test_create_success_w_params(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
data = {"test":"value"}
self._urlopen_result = json.dumps(data)
self._params = "?param2=value2¶m1=value1"
result = self._client.create(self._url, item = data, parameters = {"param1":"value1", "param2":"value2"})
self.assertEqual(data, result, "Wrong result returned")
def test_create_wrong_url(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
data = {"test":"value"}
self._urlopen_result = json.dumps(data)
try:
self._client.create(self._url + "x", item = data)
except:
return
self.assertFalse(True)
def test_create_success_wo_decode(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
data = {"test":"value"}
self._urlopen_result = json.dumps(data)
result = self._client.create(self._url, item = data, parameters = {}, decode = False)
self.assertEqual(data, json.load(result), "Wrong result returned")
def test_create_failure_urlopen(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_failure
try:
self._client.create(self._url, item = {})
except ApiException:
return
self.assertFalse(True)
# Delete tests
def test_delete_success(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
self._client.delete(self._url)
def test_delete_wrong_url(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
try:
self._client.delete(self._url + "x")
except:
return
self.assertFalse(True)
def test_delete_failure_urlopen(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_failure
try:
self._client.delete(self._url)
except ApiException:
return
self.assertFalse(True)
# Read tests
def test_read_success(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
data = {"test":"value"}
self._urlopen_result = json.dumps(data)
result = self._client.read(self._url)
self.assertEqual(data, result, "Wrong result returned")
def test_read_wrong_url(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
try:
self._client.read(self._url + "x")
except:
return
self.assertFalse(True)
def test_read_failure_urlopen(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_failure
try:
self._client.read(self._url)
except ApiException:
return
self.assertFalse(True)
# Update tests
def test_update_success(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
data = {"test":"value"}
self._urlopen_result = json.dumps(data)
result = self._client.update(self._url, item = data)
self.assertEqual(data, result, "Wrong result returned")
def test_update_wrong_url(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
data = {"test":"value"}
self._urlopen_result = json.dumps(data)
try:
self._client.update(self._url + "x", item = data)
except:
return
self.assertFalse(True)
def test_update_success_wo_decode(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_success
data = {"test":"value"}
self._urlopen_result = json.dumps(data)
result = self._client.update(self._url, item = data, parameters = {}, decode = False)
self.assertEqual(data, json.load(result), "Wrong result returned")
def test_update_failure_urlopen(self):
# Mock _urlopen
self._client._urlopen = self._urlopen_failure
try:
self._client.update(self._url, item = {})
except ApiException:
return
self.assertFalse(True)
# Helpers
def _new_request(self, url, m):
req = RequestWithMethodMock(url, method = m, headers = self._headers)
return req
def _new_request_with_data(self, url, m, d):
req = RequestWithMethodMock(url, method = m, headers = self._headers, data = d)
return req
def _urlopen_success(self, request):
if request.get_url() == self._api + "/" + self._url + self._params:
return RequestResult(self._urlopen_result)
else:
raise Exception()
def _urlopen_failure(self, request):
raise ApiException("message", 404)
if __name__ == '__main__':
unittest.main()
| 5,066 | 15 | 589 |
f4436d2fd8b94b5828d5d0b7ad6611c0470a1208 | 95 | py | Python | terrascript/chef/__init__.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/chef/__init__.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/chef/__init__.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/chef/__init__.py
import terrascript
| 13.571429 | 33 | 0.778947 | # terrascript/chef/__init__.py
import terrascript
class chef(terrascript.Provider):
pass
| 0 | 21 | 23 |
32200536ad140d2e40509a8f4ea62c17f2e2b660 | 2,018 | py | Python | src/boogie/configurations/django_conf/security.py | pencil-labs/django-boogie | 79b759617785ce33a24cb6013266a0810b24801c | [
"BSD-3-Clause"
] | null | null | null | src/boogie/configurations/django_conf/security.py | pencil-labs/django-boogie | 79b759617785ce33a24cb6013266a0810b24801c | [
"BSD-3-Clause"
] | null | null | null | src/boogie/configurations/django_conf/security.py | pencil-labs/django-boogie | 79b759617785ce33a24cb6013266a0810b24801c | [
"BSD-3-Clause"
] | 2 | 2021-09-16T22:11:35.000Z | 2021-09-25T12:28:27.000Z | from .environment import EnvironmentConf
from ..tools import secret_hash
class SecurityConf(EnvironmentConf):
"""
Security options.
"""
def get_secret_key(self):
"""
WARNING: keep the secret key used in production secret! We generate a
secret from a hash of the current settings during the .finalize() phase.
this is ok for local development, but may be insecure/inconvenient for
"""
value = self.env.str("DJANGO_SECRET_KEY", default=None)
if not value:
if self.ENVIRONMENT in ("local", "test"):
return self.ENVIRONMENT
else:
return None
return value
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
def get_auth_password_validators(self):
"""
Password validation
"""
prefix = "django.contrib.auth.password_validation"
validators = [
"UserAttributeSimilarityValidator",
"MinimumLengthValidator",
"CommonPasswordValidator",
"NumericPasswordValidator",
]
return [{"NAME": f"{prefix}.{x}"} for x in validators]
| 35.403509 | 82 | 0.631318 | from .environment import EnvironmentConf
from ..tools import secret_hash
class SecurityConf(EnvironmentConf):
"""
Security options.
"""
def finalize(self, settings):
settings = super().finalize(settings)
if not settings.get("SECRET_KEY"):
settings["SECRET_KEY"] = secret_hash(settings)
return settings
def get_secret_key(self):
"""
WARNING: keep the secret key used in production secret! We generate a
secret from a hash of the current settings during the .finalize() phase.
this is ok for local development, but may be insecure/inconvenient for
"""
value = self.env.str("DJANGO_SECRET_KEY", default=None)
if not value:
if self.ENVIRONMENT in ("local", "test"):
return self.ENVIRONMENT
else:
return None
return value
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
def get_auth_password_validators(self):
"""
Password validation
"""
prefix = "django.contrib.auth.password_validation"
validators = [
"UserAttributeSimilarityValidator",
"MinimumLengthValidator",
"CommonPasswordValidator",
"NumericPasswordValidator",
]
return [{"NAME": f"{prefix}.{x}"} for x in validators]
def get_allowed_hosts(self):
return self.env("DJANGO_ALLOWED_HOSTS", type=list, default=["localhost"])
def get_password_hashers(self):
if self.ENVIRONMENT == "testing":
return ["django.contrib.auth.hashers.MD5PasswordHasher"]
return [
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
"django.contrib.auth.hashers.BCryptPasswordHasher",
]
| 747 | 0 | 81 |
e2daa8f9b242d23b6e640ea90ef23c05a358f900 | 1,670 | py | Python | pybpodgui_plugin/settings.py | ckarageorgkaneen/pybpod-gui-plugin | ef9ca8a7094b9d225dde8e3db58d94ae084aaac5 | [
"MIT"
] | null | null | null | pybpodgui_plugin/settings.py | ckarageorgkaneen/pybpod-gui-plugin | ef9ca8a7094b9d225dde8e3db58d94ae084aaac5 | [
"MIT"
] | null | null | null | pybpodgui_plugin/settings.py | ckarageorgkaneen/pybpod-gui-plugin | ef9ca8a7094b9d225dde8e3db58d94ae084aaac5 | [
"MIT"
] | 1 | 2021-02-22T21:32:03.000Z | 2021-02-22T21:32:03.000Z | # # !/usr/bin/python3
# # -*- coding: utf-8 -*-
import logging, os
SETTINGS_PRIORITY = 100
# THESE SETTINGS ARE NEEDED FOR PYSETTINGS
APP_LOG_FILENAME = 'app.log'
APP_LOG_HANDLER_CONSOLE_LEVEL = logging.WARNING
APP_LOG_HANDLER_FILE_LEVEL = logging.WARNING
CONTROL_EVENTS_GRAPH_DEFAULT_SCALE = 100
BOARD_LOG_WINDOW_REFRESH_RATE = 1000
USE_MULTIPROCESSING = True
PYFORMS_MAINWINDOW_MARGIN = 0
PYFORMS_STYLESHEET = ''
PYFORMS_STYLESHEET_DARWIN = ''
PYFORMS_SILENT_PLUGINS_FINDER = True
#PYFORMS_STYLESHEET = os.path.join(os.path.dirname(__file__), 'resources', 'css', 'default.css')
PYFORMS_MATPLOTLIB_ENABLED = True
PYFORMS_WEB_ENABLED = True
PYFORMS_GL_ENABLED = True
PYFORMS_VISVIS_ENABLED = False
GENERIC_EDITOR_PLUGINS_PATH = None
GENERIC_EDITOR_PLUGINS_LIST = [
'pybpodgui_plugin',
'pybpodgui_plugin_timeline',
'pybpodgui_plugin_trial_timeline',
'pybpod_alyx_plugin',
'pybpodgui_plugin_session_history',
# 'pge_welcome_plugin',
]
#WELCOME_PLUGIN_URL = 'http://pybpod.readthedocs.io'
############ BPODGUI PLUGIN SETTINGS ############
#DEFAULT_PROJECT_PATH = '/home/ricardo/bitbucket/pybpod/pybpod-gui-plugin/projects/Untitled project 1'
BOARD_LOG_WINDOW_REFRESH_RATE = 2000
SESSIONLOG_PLUGIN_REFRESH_RATE = 1000
TIMELINE_PLUGIN_REFRESH_RATE = 1000
PYBOARD_COMMUNICATION_THREAD_REFRESH_TIME = 2 # timer for thread look for events (seconds)
PYBOARD_COMMUNICATION_PROCESS_REFRESH_TIME = 2 # timer for process look for events (seconds)
PYBOARD_COMMUNICATION_PROCESS_TIME_2_LIVE = 0 # wait before killing process (seconds)
GENERIC_EDITOR_TITLE = 'PyBpod'
PYBPOD_REPOSITORIES_TXT_LIST = 'repositories.yml' | 27.377049 | 102 | 0.782036 | # # !/usr/bin/python3
# # -*- coding: utf-8 -*-
import logging, os
SETTINGS_PRIORITY = 100
# THESE SETTINGS ARE NEEDED FOR PYSETTINGS
APP_LOG_FILENAME = 'app.log'
APP_LOG_HANDLER_CONSOLE_LEVEL = logging.WARNING
APP_LOG_HANDLER_FILE_LEVEL = logging.WARNING
CONTROL_EVENTS_GRAPH_DEFAULT_SCALE = 100
BOARD_LOG_WINDOW_REFRESH_RATE = 1000
USE_MULTIPROCESSING = True
PYFORMS_MAINWINDOW_MARGIN = 0
PYFORMS_STYLESHEET = ''
PYFORMS_STYLESHEET_DARWIN = ''
PYFORMS_SILENT_PLUGINS_FINDER = True
#PYFORMS_STYLESHEET = os.path.join(os.path.dirname(__file__), 'resources', 'css', 'default.css')
PYFORMS_MATPLOTLIB_ENABLED = True
PYFORMS_WEB_ENABLED = True
PYFORMS_GL_ENABLED = True
PYFORMS_VISVIS_ENABLED = False
GENERIC_EDITOR_PLUGINS_PATH = None
GENERIC_EDITOR_PLUGINS_LIST = [
'pybpodgui_plugin',
'pybpodgui_plugin_timeline',
'pybpodgui_plugin_trial_timeline',
'pybpod_alyx_plugin',
'pybpodgui_plugin_session_history',
# 'pge_welcome_plugin',
]
#WELCOME_PLUGIN_URL = 'http://pybpod.readthedocs.io'
############ BPODGUI PLUGIN SETTINGS ############
#DEFAULT_PROJECT_PATH = '/home/ricardo/bitbucket/pybpod/pybpod-gui-plugin/projects/Untitled project 1'
BOARD_LOG_WINDOW_REFRESH_RATE = 2000
SESSIONLOG_PLUGIN_REFRESH_RATE = 1000
TIMELINE_PLUGIN_REFRESH_RATE = 1000
PYBOARD_COMMUNICATION_THREAD_REFRESH_TIME = 2 # timer for thread look for events (seconds)
PYBOARD_COMMUNICATION_PROCESS_REFRESH_TIME = 2 # timer for process look for events (seconds)
PYBOARD_COMMUNICATION_PROCESS_TIME_2_LIVE = 0 # wait before killing process (seconds)
GENERIC_EDITOR_TITLE = 'PyBpod'
PYBPOD_REPOSITORIES_TXT_LIST = 'repositories.yml' | 0 | 0 | 0 |
51b66a26418cbf9c12132319bfeb2cb5131c8eef | 6,798 | py | Python | pdfmajor/interpreter/commands/state/PDFTextState/PDFFont/fonts.py | asosnovsky/pdfmajor | 7e24c64b5b4fdc84c12b2f78dcaab0e1aa07f4ad | [
"MIT"
] | 23 | 2019-01-13T23:32:24.000Z | 2021-07-08T04:29:15.000Z | pdfmajor/interpreter/commands/state/PDFTextState/PDFFont/fonts.py | asosnovsky/pdfmajor | 7e24c64b5b4fdc84c12b2f78dcaab0e1aa07f4ad | [
"MIT"
] | 3 | 2019-08-09T18:42:01.000Z | 2019-12-13T15:43:24.000Z | pdfmajor/interpreter/commands/state/PDFTextState/PDFFont/fonts.py | asosnovsky/pdfmajor | 7e24c64b5b4fdc84c12b2f78dcaab0e1aa07f4ad | [
"MIT"
] | 2 | 2020-01-09T11:18:20.000Z | 2020-03-24T06:02:30.000Z | from io import BytesIO
from pdfmajor.execptions import FontError, UnicodeNotDefined, CMapNotFound
from pdfmajor.parser.PSStackParser import literal_name
from pdfmajor.parser.PDFStream import int_value
from pdfmajor.parser.PDFStream import num_value
from pdfmajor.parser.PDFStream import list_value
from pdfmajor.parser.PDFStream import dict_value
from pdfmajor.parser.PDFStream import PDFStream
from pdfmajor.parser.PDFStream import resolve1
from pdfmajor.parser.cmapdb import CMap, CMapDB, CMapParser
from pdfmajor.parser.cmapdb import FileUnicodeMap
from pdfmajor.utils import settings, apply_matrix_norm
from .PDFFont import PDFFont, PDFSimpleFont
from .util import FontMetricsDB, get_widths, get_widths2
from .Type1FontHeaderParser import Type1FontHeaderParser
from .TrueTypeFont import TrueTypeFont
# PDFType1Font
# PDFTrueTypeFont
# PDFType3Font
# PDFCIDFont
| 38.625 | 110 | 0.612533 | from io import BytesIO
from pdfmajor.execptions import FontError, UnicodeNotDefined, CMapNotFound
from pdfmajor.parser.PSStackParser import literal_name
from pdfmajor.parser.PDFStream import int_value
from pdfmajor.parser.PDFStream import num_value
from pdfmajor.parser.PDFStream import list_value
from pdfmajor.parser.PDFStream import dict_value
from pdfmajor.parser.PDFStream import PDFStream
from pdfmajor.parser.PDFStream import resolve1
from pdfmajor.parser.cmapdb import CMap, CMapDB, CMapParser
from pdfmajor.parser.cmapdb import FileUnicodeMap
from pdfmajor.utils import settings, apply_matrix_norm
from .PDFFont import PDFFont, PDFSimpleFont
from .util import FontMetricsDB, get_widths, get_widths2
from .Type1FontHeaderParser import Type1FontHeaderParser
from .TrueTypeFont import TrueTypeFont
# PDFType1Font
class PDFType1Font(PDFSimpleFont):
def __init__(self, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if settings.STRICT:
raise FontError('BaseFont is missing')
self.basefont = 'unknown'
try:
(descriptor, widths) = FontMetricsDB.get_metrics(self.basefont)
except KeyError:
descriptor = dict_value(spec.get('FontDescriptor', {}))
firstchar = int_value(spec.get('FirstChar', 0))
#lastchar = int_value(spec.get('LastChar', 255))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict((i+firstchar, w) for (i, w) in enumerate(widths))
PDFSimpleFont.__init__(self, descriptor, widths, spec)
if 'Encoding' not in spec and 'FontFile' in descriptor:
# try to recover the missing encoding info from the font file.
self.fontfile = PDFStream.validated_stream(descriptor.get('FontFile'))
length1 = int_value(self.fontfile['Length1'])
data = self.fontfile.get_data()[:length1]
parser = Type1FontHeaderParser(BytesIO(data))
self.cid2unicode = parser.get_encoding()
return
def __repr__(self):
return '<PDFType1Font: basefont=%r>' % self.basefont
# PDFTrueTypeFont
class PDFTrueTypeFont(PDFType1Font):
def __repr__(self):
return '<PDFTrueTypeFont: basefont=%r>' % self.basefont
# PDFType3Font
class PDFType3Font(PDFSimpleFont):
def __init__(self, spec):
firstchar = int_value(spec.get('FirstChar', 0))
#lastchar = int_value(spec.get('LastChar', 0))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict((i+firstchar, w) for (i, w) in enumerate(widths))
if 'FontDescriptor' in spec:
descriptor = dict_value(spec['FontDescriptor'])
else:
descriptor = {'Ascent': 0, 'Descent': 0,
'FontBBox': spec['FontBBox']}
PDFSimpleFont.__init__(self, descriptor, widths, spec)
self.matrix = tuple(list_value(spec.get('FontMatrix')))
(_, self.descent, _, self.ascent) = self.bbox
(self.hscale, self.vscale) = apply_matrix_norm(self.matrix, (1, 1))
return
def __repr__(self):
return '<PDFType3Font>'
# PDFCIDFont
class PDFCIDFont(PDFFont):
def __init__(self, spec, strict=settings.STRICT):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if strict:
raise FontError('BaseFont is missing')
self.basefont = 'unknown'
self.cidsysteminfo = dict_value(spec.get('CIDSystemInfo', {}))
self.cidcoding = '%s-%s' % (resolve1(self.cidsysteminfo.get('Registry', b'unknown')).decode("latin1"),
resolve1(self.cidsysteminfo.get('Ordering', b'unknown')).decode("latin1"))
try:
name = literal_name(spec['Encoding'])
except KeyError:
if strict:
raise FontError('Encoding is unspecified')
name = 'unknown'
try:
self.cmap = CMapDB.get_cmap(name)
except CMapNotFound as e:
if strict:
raise FontError(e)
self.cmap = CMap()
try:
descriptor = dict_value(spec['FontDescriptor'])
except KeyError:
if strict:
raise FontError('FontDescriptor is missing')
descriptor = {}
ttf = None
if 'FontFile2' in descriptor:
self.fontfile = PDFStream.validated_stream(descriptor.get('FontFile2'))
ttf = TrueTypeFont(self.basefont,
BytesIO(self.fontfile.get_data()))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = PDFStream.validated_stream(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, BytesIO(strm.get_data())).run()
elif self.cidcoding in ('Adobe-Identity', 'Adobe-UCS'):
if ttf:
try:
self.unicode_map = ttf.create_unicode_map()
except CMapNotFound:
pass
else:
try:
self.unicode_map = CMapDB.get_unicode_map(self.cidcoding, self.cmap.is_vertical())
except CMapNotFound as e:
pass
self.vertical = self.cmap.is_vertical()
if self.vertical:
# writing mode: vertical
widths = get_widths2(list_value(spec.get('W2', [])))
self.disps = dict((cid, (vx, vy)) for (cid, (_, (vx, vy))) in iter(widths.items()))
(vy, w) = spec.get('DW2', [880, -1000])
self.default_disp = (None, vy)
widths = dict((cid, w) for (cid, (w, _)) in iter(widths.items()))
default_width = w
else:
# writing mode: horizontal
self.disps = {}
self.default_disp = 0
widths = get_widths(list_value(spec.get('W', [])))
default_width = spec.get('DW', 1000)
PDFFont.__init__(self, descriptor, widths, default_width=default_width)
return
def __repr__(self):
return '<PDFCIDFont: basefont=%r, cidcoding=%r>' % (self.basefont, self.cidcoding)
def is_vertical(self):
return self.vertical
def is_multibyte(self):
return True
def decode(self, bytes):
return self.cmap.decode(bytes)
def char_disp(self, cid):
"Returns an integer for horizontal fonts, a tuple for vertical fonts."
return self.disps.get(cid, self.default_disp)
def to_unichr(self, cid):
try:
if not self.unicode_map:
raise KeyError(cid)
return self.unicode_map.get_unichr(cid)
except KeyError:
raise UnicodeNotDefined(self.cidcoding, cid)
| 5,329 | 372 | 223 |
0d06b0adc2fd7a6757d80b73c268e069d1397b68 | 125 | py | Python | PYTHON/starwars fingers/mixersample.py | arpitarunkumaar/Hacktoberfest2021 | 0af40f90a6c0716caadbbfff44ece947b6146f60 | [
"MIT"
] | 125 | 2021-10-01T19:05:26.000Z | 2021-10-03T13:32:42.000Z | PYTHON/starwars fingers/mixersample.py | arpitarunkumaar/Hacktoberfest2021 | 0af40f90a6c0716caadbbfff44ece947b6146f60 | [
"MIT"
] | 201 | 2021-10-30T20:40:01.000Z | 2022-03-22T17:26:28.000Z | PYTHON/starwars fingers/mixersample.py | arpitarunkumaar/Hacktoberfest2021 | 0af40f90a6c0716caadbbfff44ece947b6146f60 | [
"MIT"
] | 294 | 2021-10-01T18:46:05.000Z | 2021-10-03T14:25:07.000Z | import mixer
import pygame
soun_obj=pygame.mixer.Sound("Star Wars Main Theme (Full).mp3")
soun_obj.play()
soun_obj.stop() | 25 | 63 | 0.76 | import mixer
import pygame
soun_obj=pygame.mixer.Sound("Star Wars Main Theme (Full).mp3")
soun_obj.play()
soun_obj.stop() | 0 | 0 | 0 |
cf8a9ddef46ebe737e00e9f4684a2b26991ee7b0 | 3,111 | py | Python | mols2grid/utils.py | cbouy/mol2grid | 1f0dc632e2b2b471b924f27a441950fe5209823d | [
"Apache-2.0"
] | 105 | 2021-03-22T16:08:51.000Z | 2022-03-07T15:38:32.000Z | mols2grid/utils.py | cbouy/molgrid | 1f0dc632e2b2b471b924f27a441950fe5209823d | [
"Apache-2.0"
] | 19 | 2021-03-24T13:08:05.000Z | 2022-03-30T20:33:47.000Z | mols2grid/utils.py | cbouy/molgrid | 1f0dc632e2b2b471b924f27a441950fe5209823d | [
"Apache-2.0"
] | 13 | 2021-03-22T19:26:24.000Z | 2022-03-22T06:01:10.000Z | from functools import wraps
from importlib.util import find_spec
from jinja2 import Environment, FileSystemLoader
from pathlib import Path
from rdkit import Chem
import pandas as pd
env = Environment(loader=FileSystemLoader(Path(__file__).parent / 'templates'),
autoescape=False)
def tooltip_formatter(s, subset, fmt, style, transform):
"""Function to generate tooltips from a pandas Series
Parameters
----------
s : pandas.Series
Row in the internal pandas DataFrame
subset : list
Subset of columns that are used for the tooltip
fmt : str
Format string for each key-value pair of the tooltip
style : dict
CSS styling applied to each item independently
transform : dict
Functions applied to each value before rendering
"""
items = []
for k, v in s[subset].to_dict().items():
v = transform[k](v) if transform.get(k) else v
v = f'<span style="{style[k](v)}">{v}</span>' if style.get(k) else v
items.append(fmt.format(key=k, value=v))
return "<br>".join(items)
def mol_to_smiles(mol):
"""Returns a SMILES from an RDKit molecule, or None if not an RDKit mol"""
return Chem.MolToSmiles(mol) if mol else None
def mol_to_record(mol, mol_col="mol"):
"""Function to create a dict of data from an RDKit molecule"""
return {"SMILES": Chem.MolToSmiles(mol),
**mol.GetPropsAsDict(includePrivate=True),
mol_col: mol} if mol else {}
def sdf_to_dataframe(sdf_path, mol_col="mol"):
"""Returns a dataframe of molecules from an SDF file"""
return pd.DataFrame([mol_to_record(mol, mol_col)
for mol in Chem.SDMolSupplier(sdf_path)])
def remove_coordinates(mol):
"""Removes the existing coordinates from the molecule. The molecule is
modified inplace"""
mol.RemoveAllConformers()
return mol
def make_popup_callback(title, html, js="", style=""):
"""Creates a JavaScript callback that displays a popup window
Parameters
----------
title : str
Title of the popup. Use `title='${data["Name"]}'` to use the value
of the column "Name" as a title
html : str
Content of the popup window
js : str
JavaScript code executed before making the content of the popup window.
This allows you to create variables and reuse them later in the `html`
content of the popup, using the `${my_variable}` syntax
style : str
CSS style assigned to the popup window
"""
return (env.get_template('js/popup.js')
.render(js=js,
html=html,
title=title,
style=style))
| 34.566667 | 79 | 0.627772 | from functools import wraps
from importlib.util import find_spec
from jinja2 import Environment, FileSystemLoader
from pathlib import Path
from rdkit import Chem
import pandas as pd
env = Environment(loader=FileSystemLoader(Path(__file__).parent / 'templates'),
autoescape=False)
def requires(module):
def inner(func):
@wraps(func)
def wrapper(*args, **kwargs):
if find_spec(module):
return func(*args, **kwargs)
raise ModuleNotFoundError(
f"The module {module!r} is required to use {func.__name__!r} "
"but it is not installed!")
return wrapper
return inner
def tooltip_formatter(s, subset, fmt, style, transform):
"""Function to generate tooltips from a pandas Series
Parameters
----------
s : pandas.Series
Row in the internal pandas DataFrame
subset : list
Subset of columns that are used for the tooltip
fmt : str
Format string for each key-value pair of the tooltip
style : dict
CSS styling applied to each item independently
transform : dict
Functions applied to each value before rendering
"""
items = []
for k, v in s[subset].to_dict().items():
v = transform[k](v) if transform.get(k) else v
v = f'<span style="{style[k](v)}">{v}</span>' if style.get(k) else v
items.append(fmt.format(key=k, value=v))
return "<br>".join(items)
def mol_to_smiles(mol):
"""Returns a SMILES from an RDKit molecule, or None if not an RDKit mol"""
return Chem.MolToSmiles(mol) if mol else None
def mol_to_record(mol, mol_col="mol"):
"""Function to create a dict of data from an RDKit molecule"""
return {"SMILES": Chem.MolToSmiles(mol),
**mol.GetPropsAsDict(includePrivate=True),
mol_col: mol} if mol else {}
def sdf_to_dataframe(sdf_path, mol_col="mol"):
"""Returns a dataframe of molecules from an SDF file"""
return pd.DataFrame([mol_to_record(mol, mol_col)
for mol in Chem.SDMolSupplier(sdf_path)])
def remove_coordinates(mol):
"""Removes the existing coordinates from the molecule. The molecule is
modified inplace"""
mol.RemoveAllConformers()
return mol
def make_popup_callback(title, html, js="", style=""):
"""Creates a JavaScript callback that displays a popup window
Parameters
----------
title : str
Title of the popup. Use `title='${data["Name"]}'` to use the value
of the column "Name" as a title
html : str
Content of the popup window
js : str
JavaScript code executed before making the content of the popup window.
This allows you to create variables and reuse them later in the `html`
content of the popup, using the `${my_variable}` syntax
style : str
CSS style assigned to the popup window
"""
return (env.get_template('js/popup.js')
.render(js=js,
html=html,
title=title,
style=style))
| 361 | 0 | 23 |
5c2b633eae20fd8c195dffb3e55c0d408377fe88 | 41,361 | py | Python | pypy/objspace/std/bytesobject.py | akercheval/espy | f8317d2f01ba726ed4f03cab081176c32ae4cac4 | [
"Apache-2.0",
"OpenSSL"
] | 4 | 2019-02-11T06:58:43.000Z | 2020-03-15T14:12:32.000Z | pypy/objspace/std/bytesobject.py | akercheval/espy | f8317d2f01ba726ed4f03cab081176c32ae4cac4 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | pypy/objspace/std/bytesobject.py | akercheval/espy | f8317d2f01ba726ed4f03cab081176c32ae4cac4 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | """The builtin str implementation"""
from rpython.rlib import jit
from rpython.rlib.jit import we_are_jitted
from rpython.rlib.objectmodel import (
compute_hash, compute_unique_id, import_from_mixin)
from rpython.rlib.buffer import StringBuffer
from rpython.rlib.rstring import StringBuilder, replace
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.buffer import SimpleView
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
WrappedDefault, interp2app, interpindirect2app, unwrap_spec)
from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std import newformat
from pypy.objspace.std.basestringtype import basestring_typedef
from pypy.objspace.std.formatting import mod_format
from pypy.objspace.std.stringmethods import StringMethods
from pypy.objspace.std.unicodeobject import (
decode_object, unicode_from_encoded_object,
unicode_from_string, getdefaultencoding)
from pypy.objspace.std.util import IDTAG_SPECIAL, IDTAG_SHIFT
W_BytesObject.EMPTY = W_BytesObject('')
W_BytesObject.typedef = TypeDef(
"pal", basestring_typedef, None, "read",
__new__ = interp2app(W_BytesObject.descr_new),
__doc__ = """pal(objeto='') -> palabra
Vuelve una representación palabra del objeto. Si el argumento es
una palabra, lo que vuelve es el objeto mismo.
""",
__repr__ = interpindirect2app(W_AbstractBytesObject.descr_repr),
__pal__ = interpindirect2app(W_AbstractBytesObject.descr_str),
__str__ = interpindirect2app(W_AbstractBytesObject.descr_str),
__hash__ = interpindirect2app(W_AbstractBytesObject.descr_hash),
__ig__ = interpindirect2app(W_AbstractBytesObject.descr_eq),
__eq__ = interpindirect2app(W_AbstractBytesObject.descr_eq),
__ni__ = interpindirect2app(W_AbstractBytesObject.descr_ne),
__ne__ = interpindirect2app(W_AbstractBytesObject.descr_ne),
__meq__ = interpindirect2app(W_AbstractBytesObject.descr_lt),
__lt__ = interpindirect2app(W_AbstractBytesObject.descr_lt),
__mei__ = interpindirect2app(W_AbstractBytesObject.descr_le),
__le__ = interpindirect2app(W_AbstractBytesObject.descr_le),
__maq__ = interpindirect2app(W_AbstractBytesObject.descr_gt),
__gt__ = interpindirect2app(W_AbstractBytesObject.descr_gt),
__mai__ = interpindirect2app(W_AbstractBytesObject.descr_ge),
__ge__ = interpindirect2app(W_AbstractBytesObject.descr_ge),
__tam__ = interpindirect2app(W_AbstractBytesObject.descr_len),
__len__ = interpindirect2app(W_AbstractBytesObject.descr_len),
__contiene__ = interpindirect2app(W_AbstractBytesObject.descr_contains),
__contains__ = interpindirect2app(W_AbstractBytesObject.descr_contains),
__mas__ = interpindirect2app(W_AbstractBytesObject.descr_add),
__add__ = interpindirect2app(W_AbstractBytesObject.descr_add),
__mul__ = interpindirect2app(W_AbstractBytesObject.descr_mul),
__dmul__ = interpindirect2app(W_AbstractBytesObject.descr_rmul),
__rmul__ = interpindirect2app(W_AbstractBytesObject.descr_rmul),
__sacaartic__ = interpindirect2app(W_AbstractBytesObject.descr_getitem),
__getitem__ = interpindirect2app(W_AbstractBytesObject.descr_getitem),
__sacaparte__ = interpindirect2app(W_AbstractBytesObject.descr_getslice),
__getslice__ = interpindirect2app(W_AbstractBytesObject.descr_getslice),
mayuscular = interpindirect2app(W_AbstractBytesObject.descr_capitalize),
capitalize = interpindirect2app(W_AbstractBytesObject.descr_capitalize),
centro = interpindirect2app(W_AbstractBytesObject.descr_center),
center = interpindirect2app(W_AbstractBytesObject.descr_center),
total = interpindirect2app(W_AbstractBytesObject.descr_count),
count = interpindirect2app(W_AbstractBytesObject.descr_count),
decodificar = interpindirect2app(W_AbstractBytesObject.descr_decode),
decode = interpindirect2app(W_AbstractBytesObject.descr_decode),
codificar = interpindirect2app(W_AbstractBytesObject.descr_encode),
encode = interpindirect2app(W_AbstractBytesObject.descr_encode),
expandtabs = interpindirect2app(W_AbstractBytesObject.descr_expandtabs),
encontrar = interpindirect2app(W_AbstractBytesObject.descr_find),
find = interpindirect2app(W_AbstractBytesObject.descr_find),
dencontrar = interpindirect2app(W_AbstractBytesObject.descr_rfind),
rfind = interpindirect2app(W_AbstractBytesObject.descr_rfind),
indice = interpindirect2app(W_AbstractBytesObject.descr_index),
index = interpindirect2app(W_AbstractBytesObject.descr_index),
dindice = interpindirect2app(W_AbstractBytesObject.descr_rindex),
rindex = interpindirect2app(W_AbstractBytesObject.descr_rindex),
esalnum = interpindirect2app(W_AbstractBytesObject.descr_isalnum),
isalnum = interpindirect2app(W_AbstractBytesObject.descr_isalnum),
esalfa = interpindirect2app(W_AbstractBytesObject.descr_isalpha),
isalpha = interpindirect2app(W_AbstractBytesObject.descr_isalpha),
esdig = interpindirect2app(W_AbstractBytesObject.descr_isdigit),
isdigit = interpindirect2app(W_AbstractBytesObject.descr_isdigit),
esminusc = interpindirect2app(W_AbstractBytesObject.descr_islower),
islower = interpindirect2app(W_AbstractBytesObject.descr_islower),
esespac = interpindirect2app(W_AbstractBytesObject.descr_isspace),
isspace = interpindirect2app(W_AbstractBytesObject.descr_isspace),
estitulo = interpindirect2app(W_AbstractBytesObject.descr_istitle),
istitle = interpindirect2app(W_AbstractBytesObject.descr_istitle),
esmayusc = interpindirect2app(W_AbstractBytesObject.descr_isupper),
isupper = interpindirect2app(W_AbstractBytesObject.descr_isupper),
juntar = interpindirect2app(W_AbstractBytesObject.descr_join),
join = interpindirect2app(W_AbstractBytesObject.descr_join),
ijust = interpindirect2app(W_AbstractBytesObject.descr_ljust),
ljust = interpindirect2app(W_AbstractBytesObject.descr_ljust),
djust = interpindirect2app(W_AbstractBytesObject.descr_rjust),
rjust = interpindirect2app(W_AbstractBytesObject.descr_rjust),
minusc = interpindirect2app(W_AbstractBytesObject.descr_lower),
lower = interpindirect2app(W_AbstractBytesObject.descr_lower),
particion = interpindirect2app(W_AbstractBytesObject.descr_partition),
partition = interpindirect2app(W_AbstractBytesObject.descr_partition),
dparticion = interpindirect2app(W_AbstractBytesObject.descr_rpartition),
rpartition = interpindirect2app(W_AbstractBytesObject.descr_rpartition),
reemplazar = interpindirect2app(W_AbstractBytesObject.descr_replace),
replace = interpindirect2app(W_AbstractBytesObject.descr_replace),
quebrar = interpindirect2app(W_AbstractBytesObject.descr_split),
split = interpindirect2app(W_AbstractBytesObject.descr_split),
dquebrar = interpindirect2app(W_AbstractBytesObject.descr_rsplit),
rsplit = interpindirect2app(W_AbstractBytesObject.descr_rsplit),
quebrarlineas = interpindirect2app(W_AbstractBytesObject.descr_splitlines),
splitlines = interpindirect2app(W_AbstractBytesObject.descr_splitlines),
empcon = interpindirect2app(W_AbstractBytesObject.descr_startswith),
startswith = interpindirect2app(W_AbstractBytesObject.descr_startswith),
terminacon = interpindirect2app(W_AbstractBytesObject.descr_endswith),
endswith = interpindirect2app(W_AbstractBytesObject.descr_endswith),
decapar = interpindirect2app(W_AbstractBytesObject.descr_strip),
strip = interpindirect2app(W_AbstractBytesObject.descr_strip),
idecapar = interpindirect2app(W_AbstractBytesObject.descr_lstrip),
lstrip = interpindirect2app(W_AbstractBytesObject.descr_lstrip),
ddecapar = interpindirect2app(W_AbstractBytesObject.descr_rstrip),
rstrip = interpindirect2app(W_AbstractBytesObject.descr_rstrip),
minmayusc = interpindirect2app(W_AbstractBytesObject.descr_swapcase),
swapcase = interpindirect2app(W_AbstractBytesObject.descr_swapcase),
titulo = interpindirect2app(W_AbstractBytesObject.descr_title),
title = interpindirect2app(W_AbstractBytesObject.descr_title),
traducir = interpindirect2app(W_AbstractBytesObject.descr_translate),
translate = interpindirect2app(W_AbstractBytesObject.descr_translate),
mayusc = interpindirect2app(W_AbstractBytesObject.descr_upper),
upper = interpindirect2app(W_AbstractBytesObject.descr_upper),
cllenar = interpindirect2app(W_AbstractBytesObject.descr_zfill),
zfill = interpindirect2app(W_AbstractBytesObject.descr_zfill),
__bufer__ = interp2app(W_BytesObject.descr_getbuffer),
__buffer__ = interp2app(W_BytesObject.descr_getbuffer),
formato = interpindirect2app(W_BytesObject.descr_format),
format = interpindirect2app(W_BytesObject.descr_format),
__formato__ = interpindirect2app(W_BytesObject.descr__format__),
__format__ = interpindirect2app(W_BytesObject.descr__format__),
__mod__ = interpindirect2app(W_BytesObject.descr_mod),
__dmod__ = interpindirect2app(W_BytesObject.descr_rmod),
__rmod__ = interpindirect2app(W_BytesObject.descr_rmod),
__sacanuevosargs__ = interpindirect2app(
W_AbstractBytesObject.descr_getnewargs),
__getnewargs__ = interpindirect2app(
W_AbstractBytesObject.descr_getnewargs),
_formatter_parser = interp2app(W_BytesObject.descr_formatter_parser),
_formatter_field_name_split =
interp2app(W_BytesObject.descr_formatter_field_name_split),
)
W_BytesObject.typedef.flag_sequence_bug_compat = True
@jit.elidable
| 40.431085 | 80 | 0.677595 | """The builtin str implementation"""
from rpython.rlib import jit
from rpython.rlib.jit import we_are_jitted
from rpython.rlib.objectmodel import (
compute_hash, compute_unique_id, import_from_mixin)
from rpython.rlib.buffer import StringBuffer
from rpython.rlib.rstring import StringBuilder, replace
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.buffer import SimpleView
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import (
WrappedDefault, interp2app, interpindirect2app, unwrap_spec)
from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std import newformat
from pypy.objspace.std.basestringtype import basestring_typedef
from pypy.objspace.std.formatting import mod_format
from pypy.objspace.std.stringmethods import StringMethods
from pypy.objspace.std.unicodeobject import (
decode_object, unicode_from_encoded_object,
unicode_from_string, getdefaultencoding)
from pypy.objspace.std.util import IDTAG_SPECIAL, IDTAG_SHIFT
class W_AbstractBytesObject(W_Root):
__slots__ = ()
def is_w(self, space, w_other):
if not isinstance(w_other, W_AbstractBytesObject):
return False
if self is w_other:
return True
if self.user_overridden_class or w_other.user_overridden_class:
return False
s1 = space.bytes_w(self)
s2 = space.bytes_w(w_other)
if len(s2) > 1:
return s1 is s2
else: # strings of len <= 1 are unique-ified
return s1 == s2
def immutable_unique_id(self, space):
if self.user_overridden_class:
return None
s = space.bytes_w(self)
if len(s) > 1:
uid = compute_unique_id(s)
else: # strings of len <= 1 are unique-ified
if len(s) == 1:
base = ord(s[0]) # base values 0-255
else:
base = 256 # empty string: base value 256
uid = (base << IDTAG_SHIFT) | IDTAG_SPECIAL
return space.newint(uid)
def unicode_w(self, space):
# Use the default encoding.
encoding = getdefaultencoding(space)
return space.unicode_w(decode_object(space, self, encoding, None))
def descr_add(self, space, w_other):
"""x.__mas__(y) <==> x+y"""
def descr_contains(self, space, w_sub):
"""x.__contiene__(y) <==> y in x"""
def descr_eq(self, space, w_other):
"""x.__ig__(y) <==> x==y"""
def descr__format__(self, space, w_format_spec):
"""S.__formato__(formato_espec) -> palabra
Vuelve una versión formateada de S, describido por formato_espec.
"""
def descr_ge(self, space, w_other):
"""x.__mai__(y) <==> x>=y"""
def descr_getitem(self, space, w_index):
"""x.__sacaartic__(y) <==> x[y]"""
def descr_getnewargs(self, space):
""
def descr_getslice(self, space, w_start, w_stop):
"""x.__sacaparte__(i, j) <==> x[i:j]
Uso de índices negativos no es apoyado.
"""
def descr_gt(self, space, w_other):
"""x.__maq__(y) <==> x>y"""
def descr_hash(self, space):
"""x.__hash__() <==> hash(x)"""
def descr_le(self, space, w_other):
"""x.__mei__(y) <==> x<=y"""
def descr_len(self, space):
"""x.__tam__() <==> tam(x)"""
def descr_lt(self, space, w_other):
"""x.__meq__(y) <==> x<y"""
def descr_mod(self, space, w_values):
"""x.__mod__(y) <==> x%y"""
def descr_mul(self, space, w_times):
"""x.__mul__(n) <==> x*n"""
def descr_ne(self, space, w_other):
"""x.__ni__(y) <==> x!=y"""
def descr_repr(self, space):
"""x.__repr__() <==> repr(x)"""
def descr_rmod(self, space, w_values):
"""x.__dmod__(y) <==> y%x"""
def descr_rmul(self, space, w_times):
"""x.__dmul__(n) <==> n*x"""
def descr_str(self, space):
"""x.__pal__() <==> pal(x)"""
def descr_capitalize(self, space):
"""S.mayuscular() -> palabra
Vuelve una versión de S puesta en mayusculas, i.e. poner el carácter
primero en mayúsculo y el resto en minusculo.
"""
@unwrap_spec(width=int, w_fillchar=WrappedDefault(' '))
def descr_center(self, space, width, w_fillchar):
"""S.centro(ancho[, llenacarác]) -> palabra
Vuelve S en el centro de una palabra de tamaño ancho. Relleno está
hecho con la llenacarác especificada (estándar es un espacio).
"""
def descr_count(self, space, w_sub, w_start=None, w_end=None):
"""S.total(sub[, empieza[, fin]]) -> ent
Vuelve el numero de casos no sobreponiendos del sub-palabra sub en
palabra S[empieza:fin]. Argumentos opcionales empieza y fin son
interpretados como en notación cortar.
"""
def descr_decode(self, space, w_encoding=None, w_errors=None):
"""S.decodificar(codificación=Nada, errores='estricto') -> objeto
Decodificar S usando el codec registrado para codificación. Errores se
pueden pasar a una esquema de encargación de errores diferente. El
estándar es 'estricto', es decir que los errores llaman
UnicodeDecodeError. Otros valores posibles son 'ignorar' y 'reemplazar'
y cualquier otro nombre registrado con codecs.register_error que puede
llamar UnicodeDecodeErrors.
"""
def descr_encode(self, space, w_encoding=None, w_errors=None):
"""S.codificar(codificación=Nada, errores='estricto') -> objeto
Codificar S usando el codec para codificación. Errores se pueden
pasar a una esquema de encargación de errores diferente. El estándar
es 'estricto', es decir que los errores llaman UnicodeEncodeError.
Otros valores posibles son 'ignorar', 'reemplazar' y 'xmlcharrefreplace'
y cualquier otro nombre registrado con codecs.register_error que puede
llamar UnicodeEncodeErrors.
"""
def descr_endswith(self, space, w_suffix, w_start=None, w_end=None):
"""S.terminacon(sufijo[, empieza[, fin]]) -> bool
Vuelve Cierto si S termina con el sufijo especificado, Falso si no.
Con empieza opcional, prueba S al inicio de esa posición.
Con fin opcional, pare comparando S en esa posición.
sufijo también puede ser un tuple de palabrase para probar.
"""
@unwrap_spec(tabsize=int)
def descr_expandtabs(self, space, tabsize=8):
"""S.expandtabs([tabtamaño]) -> palabra
Vuelve una copia de S donde todos los tabs son expandidos usando
espacios. Si tabtamaño no está dado, un tamaño de 8 carácteres está
asumido.
"""
def descr_find(self, space, w_sub, w_start=None, w_end=None):
"""S.encontrar(sub[, empieza[, fin]]) -> ent
Vuelve la índice más baja en S donde la sub-palabra sub
está encontrada, para que sub esté contenido entre S[empieza:fin].
Vuelve -1 si fracasa.
"""
def descr_format(self, space, __args__):
"""S.formato(*args, **kwargs) -> palabra
Vuelve una versión de S formateado, usando substituciones de args y
kwargs. Las substituciones son identificados con llaves ('{' y '}').
"""
def descr_index(self, space, w_sub, w_start=None, w_end=None):
"""S.indice(sub[, empieza[, fin]]) -> ent
Como S.encontrar() pero llama ValueError cuando el sub-palabra no
se puede encontrar.
"""
def descr_isalnum(self, space):
"""S.esalnum() -> bool
Vuelve Cierto si todos los carácteres en S son alfanuméricos
y hay por lo menos un carácter en S, Falso si no.
"""
def descr_isalpha(self, space):
"""S.esalfa() -> bool
Vuelve Cierto si todos los carácteres en S son alfabéticos y hay
por lo menos un carácter en S, Falso si no.
"""
def descr_isdigit(self, space):
"""S.esdec() -> bool
Vuelve Cierto si todos los carácteres en S son dígitos y hay por
lo menus un carácter en S, Falso si no.
"""
def descr_islower(self, space):
"""S.esminusc() -> bool
Vuelve Cierto si todos los carácteres en S están en minúscula y
hay por lo menos un carácter en S, Falso si no.
"""
def descr_isspace(self, space):
"""S.esespac() -> bool
Vuelve Cierto si todos los carácteres en S son espacio blanco y
hay por lo menos un carácter en S, Falso si no.
"""
def descr_istitle(self, space):
"""S.estitulo() -> bool
Vuelve Cierto si S está en formato de título y hay por lo menos
un carácter en S, Falso si no.
"""
def descr_isupper(self, space):
"""S.esmayusc() -> bool
Vuelve Cierto si todos los carácteres en S son en mayúsculo y hay
por lo menos un carácter en S, Falso si no.
"""
def descr_join(self, space, w_list):
"""S.juntar(iterable) -> palabra
Vuelve una palabra que es la juntación de las palabras en el
iterable. El separador entre elementos es S.
"""
@unwrap_spec(width=int, w_fillchar=WrappedDefault(' '))
def descr_ljust(self, space, width, w_fillchar):
"""S.ijust(ancho[, lleneacarác]) -> palabra
Vuelve S justificado a la izquierda en una palabra de tamaño
ancho. Relleno está hecho con el carácter especificado (estándar
es un espacio).
"""
def descr_lower(self, space):
"""S.minusc() -> palabra
Vuelve una copia de la palabra S convertido a minúscula.
"""
def descr_lstrip(self, space, w_chars=None):
"""S.idecapar([carács]) -> palabra o unicod
Vuelve una copia de la palabra S con espacio blanco al frente quitado.
Si carács está dado y no es Nada, quita carácteres en carács en lugar
de espacio blanco. Si carács es unicod, S será convertido a unicode
antes de decapar.
"""
def descr_partition(self, space, w_sub):
"""S.particion(sep) -> (cabeza, sep, cola)
Busca el separador sep en S, y volver la parte antes de ello, el
separador, y el parte después de ello. Si sep no está encontrado,
volver S y dos palabras vacías.
"""
@unwrap_spec(count=int)
def descr_replace(self, space, w_old, w_new, count=-1):
"""S.reemplazar(viejo, nuevo[, total]) -> palabra
Vuelve una copia de la palabra S con todas occurencias de la
sub-palabra viejo reemplazadas por nuevo. Si el argumento
opcional total está dado, solamente las primeras total occurencias
son reemplazadas.
"""
def descr_rfind(self, space, w_sub, w_start=None, w_end=None):
"""S.dencontrar(sub[, empieza[, fin]]) -> ent
Vuelve la índice más alta en S donde sub-palabra sub está
encontrada, para que sub esté contenida en S[empieza:fin].
Vuelve -1 si fracasa.
"""
def descr_rindex(self, space, w_sub, w_start=None, w_end=None):
"""S.dindice(sub[, empieza[, fin]]) -> ent
Como S.dencontrar() pero llama ValueError cuando la sub-palabra
no está encontrada.
"""
@unwrap_spec(width=int, w_fillchar=WrappedDefault(' '))
def descr_rjust(self, space, width, w_fillchar):
"""S.djust(ancho[, llenacarác]) -> palabra
Vuelve S justificado a la derecha en una palabra de tamaño
ancho. Relleno está hecho con el carácter especificado (estándar
es un espacio).
"""
def descr_rpartition(self, space, w_sub):
"""S.dparticion(sep) -> (cabeza, sep, cola)
Busca el separador sep en S, empezando al fin de S, y volver la
parte antes de ello, el separador, y el parte después de ello.
Si sep no está encontrado, volver S y dos palabras vacías.
"""
@unwrap_spec(maxsplit=int)
def descr_rsplit(self, space, w_sep=None, maxsplit=-1):
"""S.dquebrar(sep=Nada, maxquebrar=-1) -> lista de palabras
Volver una lista de las secciones en S, usando sep como delimitador,
empezando al final de S y siguendo al frente.
Si sep no está dado o es Nada, cualquier espacio blanco es un
separador.
Si maxquebrar está dado, al máximo maxquebrar quebraciones están
hechos.
"""
def descr_rstrip(self, space, w_chars=None):
"""S.ddecapar([carács]) -> palabra o unicod
Vuelve una copia de la palabra S con espacio blanco al final quitado.
Si carács está dado y no es Nada, quita carácteres en carács en lugar
de espacio blanco. Si carács es unicod, S será convertido a unicode
antes de decapar.
"""
@unwrap_spec(maxsplit=int)
def descr_split(self, space, w_sep=None, maxsplit=-1):
"""S.quebrar(sep=Nada, maxquebrar=-1) -> lista de palabras
Volver una lista de las secciones en S, usando sep como delimitador.
Si sep no está dado o es Nada, cualquier espacio blanco es un
separador.
Si maxquebrar está dado, al máximo maxquebrar quebraciones están
hechos.
"""
@unwrap_spec(keepends=bool)
def descr_splitlines(self, space, keepends=False):
"""S.quebrarlineas(guardacolas=Falso) -> lista de palabras
Volver una lista de las líneas en S, rompiendo en límites de las
líneas. Rompes de línea no son incluidos en el resultado a menos
que guardarcolas está dado y es Cierto.
"""
def descr_startswith(self, space, w_prefix, w_start=None, w_end=None):
"""S.empcon(prefijo[, empieza[, fin]]) -> bool
Vuelve Cierto si S empieza con el prefijo especificado, Falso si no.
Con empieza opcional, prueba S empezando en esta posición.
Con fin opcional, pare comparando S en esta posición.
prefijo también puede ser un tuple de palabras para probar.
"""
def descr_strip(self, space, w_chars=None):
"""S.decapar([carács]) -> palabra o unicod
Vuelve una copia de la palabra S con espacio blanco al inicio y al
final quitado.
Si carács está dado y no es Nada, quita carácteres in carács.
Si carács es unicod, S será convertido a unicod antes de decapar.
"""
def descr_swapcase(self, space):
"""S.minmayusc() -> palabra
Vuelve una copia de S con todos los carácteres mayúsculos convertidos
a minúsculo, y vice versa.
"""
def descr_title(self, space):
"""S.titulo() -> palabra
Vuelve una versión de S puesto como título, i.e. palabras que empiezan
con mayúsculos, y todos otros carácteres están in minúsculo.
"""
@unwrap_spec(w_deletechars=WrappedDefault(''))
def descr_translate(self, space, w_table, w_deletechars):
"""S.traducir(mesa[, elimcarács]) -> palabra
Vuelve una copia de B donde todos los carácteres que ocurren
en el argumento opcional elimcarács son quitados, y el resto
de los carácteres han sido aplicados en la mesa de traducción,
que tiene que ser un objeto bytes de tamaño 256. Si el argumento
mesa es Nada, no traducción está aplicado y la operación simplemente
quita los carácteres en elimcarács.
"""
def descr_upper(self, space):
"""S.mayusc() -> palabra
Vuelve una copia de S con todos carácteres puesto en mayúsculo.
"""
@unwrap_spec(width=int)
def descr_zfill(self, space, width):
"""S.cllenar(ancho) -> palabra
Rellenar una palabra numérica S con ceros a la izquierda, para
llenar un campo del ancho especificado. S nunca está truncado.
"""
class W_BytesObject(W_AbstractBytesObject):
import_from_mixin(StringMethods)
_immutable_fields_ = ['_value']
def __init__(self, str):
assert str is not None
self._value = str
def __repr__(self):
"""representation for debugging purposes"""
return "%s(%r)" % (self.__class__.__name__, self._value)
def unwrap(self, space):
return self._value
def str_w(self, space):
return self._value
def buffer_w(self, space, flags):
space.check_buf_flags(flags, True)
return SimpleView(StringBuffer(self._value))
def readbuf_w(self, space):
return StringBuffer(self._value)
def writebuf_w(self, space):
raise oefmt(space.w_TypeError,
"No puede usar palabra como búfer modificable")
def descr_getbuffer(self, space, w_flags):
#from pypy.objspace.std.bufferobject import W_Buffer
#return W_Buffer(StringBuffer(self._value))
return self
charbuf_w = str_w
def listview_bytes(self):
return _create_list_from_bytes(self._value)
def ord(self, space):
if len(self._value) != 1:
raise oefmt(space.w_TypeError,
"ord() anticipó un carácter, pero palabra de tamaño %d "
"encontrada", len(self._value))
return space.newint(ord(self._value[0]))
def _new(self, value):
return W_BytesObject(value)
def _new_from_list(self, value):
return W_BytesObject(''.join(value))
def _empty(self):
return W_BytesObject.EMPTY
def _len(self):
return len(self._value)
_val = str_w
@staticmethod
def _use_rstr_ops(space, w_other):
from pypy.objspace.std.unicodeobject import W_UnicodeObject
return (isinstance(w_other, W_BytesObject) or
isinstance(w_other, W_UnicodeObject))
@staticmethod
def _op_val(space, w_other, strict=None):
if strict and not space.isinstance_w(w_other, space.w_bytes):
raise oefmt(space.w_TypeError,
"%s arg tiene que ser Nada, pal o unicod", strict)
try:
return space.bytes_w(w_other)
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
return space.charbuf_w(w_other)
def _chr(self, char):
assert len(char) == 1
return str(char)[0]
_builder = StringBuilder
def _isupper(self, ch):
return ch.isupper()
def _islower(self, ch):
return ch.islower()
def _istitle(self, ch):
return ch.isupper()
def _isspace(self, ch):
return ch.isspace()
def _isalpha(self, ch):
return ch.isalpha()
def _isalnum(self, ch):
return ch.isalnum()
def _isdigit(self, ch):
return ch.isdigit()
_iscased = _isalpha
def _islinebreak(self, ch):
return (ch == '\n') or (ch == '\r')
def _upper(self, ch):
if ch.islower():
o = ord(ch) - 32
return chr(o)
else:
return ch
def _lower(self, ch):
if ch.isupper():
o = ord(ch) + 32
return chr(o)
else:
return ch
_title = _upper
def _newlist_unwrapped(self, space, lst):
return space.newlist_bytes(lst)
@staticmethod
@unwrap_spec(w_object=WrappedDefault(""))
def descr_new(space, w_stringtype, w_object):
# NB. the default value of w_object is really a *wrapped* empty string:
# there is gateway magic at work
w_obj = space.str(w_object)
if space.is_w(w_stringtype, space.w_bytes):
return w_obj # XXX might be reworked when space.str() typechecks
value = space.bytes_w(w_obj)
w_obj = space.allocate_instance(W_BytesObject, w_stringtype)
W_BytesObject.__init__(w_obj, value)
return w_obj
def descr_repr(self, space):
s = self._value
quote = "'"
if quote in s and '"' not in s:
quote = '"'
return space.newtext(string_escape_encode(s, quote))
def descr_str(self, space):
if type(self) is W_BytesObject:
return self
return W_BytesObject(self._value)
def descr_hash(self, space):
x = compute_hash(self._value)
x -= (x == -1) # convert -1 to -2 without creating a bridge
return space.newint(x)
def descr_format(self, space, __args__):
return newformat.format_method(space, self, __args__, is_unicode=False)
def descr__format__(self, space, w_format_spec):
if not space.isinstance_w(w_format_spec, space.w_bytes):
w_format_spec = space.str(w_format_spec)
spec = space.bytes_w(w_format_spec)
formatter = newformat.str_formatter(space, spec)
return formatter.format_string(self._value)
def descr_mod(self, space, w_values):
return mod_format(space, self, w_values, do_unicode=False)
def descr_rmod(self, space, w_values):
return mod_format(space, w_values, self, do_unicode=False)
def descr_eq(self, space, w_other):
if not isinstance(w_other, W_BytesObject):
return space.w_NotImplemented
return space.newbool(self._value == w_other._value)
def descr_ne(self, space, w_other):
if not isinstance(w_other, W_BytesObject):
return space.w_NotImplemented
return space.newbool(self._value != w_other._value)
def descr_lt(self, space, w_other):
if not isinstance(w_other, W_BytesObject):
return space.w_NotImplemented
return space.newbool(self._value < w_other._value)
def descr_le(self, space, w_other):
if not isinstance(w_other, W_BytesObject):
return space.w_NotImplemented
return space.newbool(self._value <= w_other._value)
def descr_gt(self, space, w_other):
if not isinstance(w_other, W_BytesObject):
return space.w_NotImplemented
return space.newbool(self._value > w_other._value)
def descr_ge(self, space, w_other):
if not isinstance(w_other, W_BytesObject):
return space.w_NotImplemented
return space.newbool(self._value >= w_other._value)
# auto-conversion fun
_StringMethods_descr_add = descr_add
def descr_add(self, space, w_other):
if space.isinstance_w(w_other, space.w_unicode):
self_as_unicode = unicode_from_encoded_object(space, self, None,
None)
return self_as_unicode.descr_add(space, w_other)
elif space.isinstance_w(w_other, space.w_bytearray):
# XXX: eliminate double-copy
from .bytearrayobject import W_BytearrayObject, _make_data
self_as_bytearray = W_BytearrayObject(_make_data(self._value))
return space.add(self_as_bytearray, w_other)
return self._StringMethods_descr_add(space, w_other)
_StringMethods__startswith = _startswith
def _startswith(self, space, value, w_prefix, start, end):
if space.isinstance_w(w_prefix, space.w_unicode):
self_as_unicode = unicode_from_encoded_object(space, self, None,
None)
return self_as_unicode._startswith(space, self_as_unicode._value,
w_prefix, start, end)
return self._StringMethods__startswith(space, value, w_prefix, start,
end)
_StringMethods__endswith = _endswith
def _endswith(self, space, value, w_suffix, start, end):
if space.isinstance_w(w_suffix, space.w_unicode):
self_as_unicode = unicode_from_encoded_object(space, self, None,
None)
return self_as_unicode._endswith(space, self_as_unicode._value,
w_suffix, start, end)
return self._StringMethods__endswith(space, value, w_suffix, start,
end)
_StringMethods_descr_contains = descr_contains
def descr_contains(self, space, w_sub):
if space.isinstance_w(w_sub, space.w_unicode):
from pypy.objspace.std.unicodeobject import W_UnicodeObject
assert isinstance(w_sub, W_UnicodeObject)
self_as_unicode = unicode_from_encoded_object(space, self, None,
None)
return space.newbool(
self_as_unicode._value.find(w_sub._value) >= 0)
return self._StringMethods_descr_contains(space, w_sub)
_StringMethods_descr_replace = descr_replace
@unwrap_spec(count=int)
def descr_replace(self, space, w_old, w_new, count=-1):
old_is_unicode = space.isinstance_w(w_old, space.w_unicode)
new_is_unicode = space.isinstance_w(w_new, space.w_unicode)
if old_is_unicode or new_is_unicode:
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_replace(space, w_old, w_new, count)
return self._StringMethods_descr_replace(space, w_old, w_new, count)
_StringMethods_descr_join = descr_join
def descr_join(self, space, w_list):
l = space.listview_bytes(w_list)
if l is not None:
if len(l) == 1:
return space.newbytes(l[0])
return space.newbytes(self._val(space).join(l))
return self._StringMethods_descr_join(space, w_list)
_StringMethods_descr_split = descr_split
@unwrap_spec(maxsplit=int)
def descr_split(self, space, w_sep=None, maxsplit=-1):
if w_sep is not None and space.isinstance_w(w_sep, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_split(space, w_sep, maxsplit)
return self._StringMethods_descr_split(space, w_sep, maxsplit)
_StringMethods_descr_rsplit = descr_rsplit
@unwrap_spec(maxsplit=int)
def descr_rsplit(self, space, w_sep=None, maxsplit=-1):
if w_sep is not None and space.isinstance_w(w_sep, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_rsplit(space, w_sep, maxsplit)
return self._StringMethods_descr_rsplit(space, w_sep, maxsplit)
_StringMethods_descr_strip = descr_strip
def descr_strip(self, space, w_chars=None):
if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_strip(space, w_chars)
return self._StringMethods_descr_strip(space, w_chars)
_StringMethods_descr_lstrip = descr_lstrip
def descr_lstrip(self, space, w_chars=None):
if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_lstrip(space, w_chars)
return self._StringMethods_descr_lstrip(space, w_chars)
_StringMethods_descr_rstrip = descr_rstrip
def descr_rstrip(self, space, w_chars=None):
if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_rstrip(space, w_chars)
return self._StringMethods_descr_rstrip(space, w_chars)
_StringMethods_descr_count = descr_count
def descr_count(self, space, w_sub, w_start=None, w_end=None):
if space.isinstance_w(w_sub, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_count(space, w_sub, w_start, w_end)
return self._StringMethods_descr_count(space, w_sub, w_start, w_end)
_StringMethods_descr_find = descr_find
def descr_find(self, space, w_sub, w_start=None, w_end=None):
if space.isinstance_w(w_sub, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_find(space, w_sub, w_start, w_end)
return self._StringMethods_descr_find(space, w_sub, w_start, w_end)
_StringMethods_descr_rfind = descr_rfind
def descr_rfind(self, space, w_sub, w_start=None, w_end=None):
if space.isinstance_w(w_sub, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_rfind(space, w_sub, w_start, w_end)
return self._StringMethods_descr_rfind(space, w_sub, w_start, w_end)
_StringMethods_descr_index = descr_index
def descr_index(self, space, w_sub, w_start=None, w_end=None):
if space.isinstance_w(w_sub, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_index(space, w_sub, w_start, w_end)
return self._StringMethods_descr_index(space, w_sub, w_start, w_end)
_StringMethods_descr_rindex = descr_rindex
def descr_rindex(self, space, w_sub, w_start=None, w_end=None):
if space.isinstance_w(w_sub, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_rindex(space, w_sub, w_start, w_end)
return self._StringMethods_descr_rindex(space, w_sub, w_start, w_end)
_StringMethods_descr_partition = descr_partition
def descr_partition(self, space, w_sub):
if space.isinstance_w(w_sub, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_partition(space, w_sub)
return self._StringMethods_descr_partition(space, w_sub)
_StringMethods_descr_rpartition = descr_rpartition
def descr_rpartition(self, space, w_sub):
if space.isinstance_w(w_sub, space.w_unicode):
self_as_uni = unicode_from_encoded_object(space, self, None, None)
return self_as_uni.descr_rpartition(space, w_sub)
return self._StringMethods_descr_rpartition(space, w_sub)
def _join_return_one(self, space, w_obj):
return (space.is_w(space.type(w_obj), space.w_bytes) or
space.is_w(space.type(w_obj), space.w_unicode))
def _join_check_item(self, space, w_obj):
if space.isinstance_w(w_obj, space.w_bytes):
return 0
if space.isinstance_w(w_obj, space.w_unicode):
return 2
return 1
def _join_autoconvert(self, space, list_w):
# we need to rebuild w_list here, because the original
# w_list might be an iterable which we already consumed
w_list = space.newlist(list_w)
w_u = space.call_function(space.w_unicode, self)
return space.call_method(w_u, "join", w_list)
def descr_lower(self, space):
return W_BytesObject(self._value.lower())
def descr_upper(self, space):
return W_BytesObject(self._value.upper())
def descr_formatter_parser(self, space):
from pypy.objspace.std.newformat import str_template_formatter
tformat = str_template_formatter(space, space.bytes_w(self))
return tformat.formatter_parser()
def descr_formatter_field_name_split(self, space):
from pypy.objspace.std.newformat import str_template_formatter
tformat = str_template_formatter(space, space.bytes_w(self))
return tformat.formatter_field_name_split()
def _create_list_from_bytes(value):
# need this helper function to allow the jit to look inside and inline
# listview_bytes
return [s for s in value]
W_BytesObject.EMPTY = W_BytesObject('')
W_BytesObject.typedef = TypeDef(
"pal", basestring_typedef, None, "read",
__new__ = interp2app(W_BytesObject.descr_new),
__doc__ = """pal(objeto='') -> palabra
Vuelve una representación palabra del objeto. Si el argumento es
una palabra, lo que vuelve es el objeto mismo.
""",
__repr__ = interpindirect2app(W_AbstractBytesObject.descr_repr),
__pal__ = interpindirect2app(W_AbstractBytesObject.descr_str),
__str__ = interpindirect2app(W_AbstractBytesObject.descr_str),
__hash__ = interpindirect2app(W_AbstractBytesObject.descr_hash),
__ig__ = interpindirect2app(W_AbstractBytesObject.descr_eq),
__eq__ = interpindirect2app(W_AbstractBytesObject.descr_eq),
__ni__ = interpindirect2app(W_AbstractBytesObject.descr_ne),
__ne__ = interpindirect2app(W_AbstractBytesObject.descr_ne),
__meq__ = interpindirect2app(W_AbstractBytesObject.descr_lt),
__lt__ = interpindirect2app(W_AbstractBytesObject.descr_lt),
__mei__ = interpindirect2app(W_AbstractBytesObject.descr_le),
__le__ = interpindirect2app(W_AbstractBytesObject.descr_le),
__maq__ = interpindirect2app(W_AbstractBytesObject.descr_gt),
__gt__ = interpindirect2app(W_AbstractBytesObject.descr_gt),
__mai__ = interpindirect2app(W_AbstractBytesObject.descr_ge),
__ge__ = interpindirect2app(W_AbstractBytesObject.descr_ge),
__tam__ = interpindirect2app(W_AbstractBytesObject.descr_len),
__len__ = interpindirect2app(W_AbstractBytesObject.descr_len),
__contiene__ = interpindirect2app(W_AbstractBytesObject.descr_contains),
__contains__ = interpindirect2app(W_AbstractBytesObject.descr_contains),
__mas__ = interpindirect2app(W_AbstractBytesObject.descr_add),
__add__ = interpindirect2app(W_AbstractBytesObject.descr_add),
__mul__ = interpindirect2app(W_AbstractBytesObject.descr_mul),
__dmul__ = interpindirect2app(W_AbstractBytesObject.descr_rmul),
__rmul__ = interpindirect2app(W_AbstractBytesObject.descr_rmul),
__sacaartic__ = interpindirect2app(W_AbstractBytesObject.descr_getitem),
__getitem__ = interpindirect2app(W_AbstractBytesObject.descr_getitem),
__sacaparte__ = interpindirect2app(W_AbstractBytesObject.descr_getslice),
__getslice__ = interpindirect2app(W_AbstractBytesObject.descr_getslice),
mayuscular = interpindirect2app(W_AbstractBytesObject.descr_capitalize),
capitalize = interpindirect2app(W_AbstractBytesObject.descr_capitalize),
centro = interpindirect2app(W_AbstractBytesObject.descr_center),
center = interpindirect2app(W_AbstractBytesObject.descr_center),
total = interpindirect2app(W_AbstractBytesObject.descr_count),
count = interpindirect2app(W_AbstractBytesObject.descr_count),
decodificar = interpindirect2app(W_AbstractBytesObject.descr_decode),
decode = interpindirect2app(W_AbstractBytesObject.descr_decode),
codificar = interpindirect2app(W_AbstractBytesObject.descr_encode),
encode = interpindirect2app(W_AbstractBytesObject.descr_encode),
expandtabs = interpindirect2app(W_AbstractBytesObject.descr_expandtabs),
encontrar = interpindirect2app(W_AbstractBytesObject.descr_find),
find = interpindirect2app(W_AbstractBytesObject.descr_find),
dencontrar = interpindirect2app(W_AbstractBytesObject.descr_rfind),
rfind = interpindirect2app(W_AbstractBytesObject.descr_rfind),
indice = interpindirect2app(W_AbstractBytesObject.descr_index),
index = interpindirect2app(W_AbstractBytesObject.descr_index),
dindice = interpindirect2app(W_AbstractBytesObject.descr_rindex),
rindex = interpindirect2app(W_AbstractBytesObject.descr_rindex),
esalnum = interpindirect2app(W_AbstractBytesObject.descr_isalnum),
isalnum = interpindirect2app(W_AbstractBytesObject.descr_isalnum),
esalfa = interpindirect2app(W_AbstractBytesObject.descr_isalpha),
isalpha = interpindirect2app(W_AbstractBytesObject.descr_isalpha),
esdig = interpindirect2app(W_AbstractBytesObject.descr_isdigit),
isdigit = interpindirect2app(W_AbstractBytesObject.descr_isdigit),
esminusc = interpindirect2app(W_AbstractBytesObject.descr_islower),
islower = interpindirect2app(W_AbstractBytesObject.descr_islower),
esespac = interpindirect2app(W_AbstractBytesObject.descr_isspace),
isspace = interpindirect2app(W_AbstractBytesObject.descr_isspace),
estitulo = interpindirect2app(W_AbstractBytesObject.descr_istitle),
istitle = interpindirect2app(W_AbstractBytesObject.descr_istitle),
esmayusc = interpindirect2app(W_AbstractBytesObject.descr_isupper),
isupper = interpindirect2app(W_AbstractBytesObject.descr_isupper),
juntar = interpindirect2app(W_AbstractBytesObject.descr_join),
join = interpindirect2app(W_AbstractBytesObject.descr_join),
ijust = interpindirect2app(W_AbstractBytesObject.descr_ljust),
ljust = interpindirect2app(W_AbstractBytesObject.descr_ljust),
djust = interpindirect2app(W_AbstractBytesObject.descr_rjust),
rjust = interpindirect2app(W_AbstractBytesObject.descr_rjust),
minusc = interpindirect2app(W_AbstractBytesObject.descr_lower),
lower = interpindirect2app(W_AbstractBytesObject.descr_lower),
particion = interpindirect2app(W_AbstractBytesObject.descr_partition),
partition = interpindirect2app(W_AbstractBytesObject.descr_partition),
dparticion = interpindirect2app(W_AbstractBytesObject.descr_rpartition),
rpartition = interpindirect2app(W_AbstractBytesObject.descr_rpartition),
reemplazar = interpindirect2app(W_AbstractBytesObject.descr_replace),
replace = interpindirect2app(W_AbstractBytesObject.descr_replace),
quebrar = interpindirect2app(W_AbstractBytesObject.descr_split),
split = interpindirect2app(W_AbstractBytesObject.descr_split),
dquebrar = interpindirect2app(W_AbstractBytesObject.descr_rsplit),
rsplit = interpindirect2app(W_AbstractBytesObject.descr_rsplit),
quebrarlineas = interpindirect2app(W_AbstractBytesObject.descr_splitlines),
splitlines = interpindirect2app(W_AbstractBytesObject.descr_splitlines),
empcon = interpindirect2app(W_AbstractBytesObject.descr_startswith),
startswith = interpindirect2app(W_AbstractBytesObject.descr_startswith),
terminacon = interpindirect2app(W_AbstractBytesObject.descr_endswith),
endswith = interpindirect2app(W_AbstractBytesObject.descr_endswith),
decapar = interpindirect2app(W_AbstractBytesObject.descr_strip),
strip = interpindirect2app(W_AbstractBytesObject.descr_strip),
idecapar = interpindirect2app(W_AbstractBytesObject.descr_lstrip),
lstrip = interpindirect2app(W_AbstractBytesObject.descr_lstrip),
ddecapar = interpindirect2app(W_AbstractBytesObject.descr_rstrip),
rstrip = interpindirect2app(W_AbstractBytesObject.descr_rstrip),
minmayusc = interpindirect2app(W_AbstractBytesObject.descr_swapcase),
swapcase = interpindirect2app(W_AbstractBytesObject.descr_swapcase),
titulo = interpindirect2app(W_AbstractBytesObject.descr_title),
title = interpindirect2app(W_AbstractBytesObject.descr_title),
traducir = interpindirect2app(W_AbstractBytesObject.descr_translate),
translate = interpindirect2app(W_AbstractBytesObject.descr_translate),
mayusc = interpindirect2app(W_AbstractBytesObject.descr_upper),
upper = interpindirect2app(W_AbstractBytesObject.descr_upper),
cllenar = interpindirect2app(W_AbstractBytesObject.descr_zfill),
zfill = interpindirect2app(W_AbstractBytesObject.descr_zfill),
__bufer__ = interp2app(W_BytesObject.descr_getbuffer),
__buffer__ = interp2app(W_BytesObject.descr_getbuffer),
formato = interpindirect2app(W_BytesObject.descr_format),
format = interpindirect2app(W_BytesObject.descr_format),
__formato__ = interpindirect2app(W_BytesObject.descr__format__),
__format__ = interpindirect2app(W_BytesObject.descr__format__),
__mod__ = interpindirect2app(W_BytesObject.descr_mod),
__dmod__ = interpindirect2app(W_BytesObject.descr_rmod),
__rmod__ = interpindirect2app(W_BytesObject.descr_rmod),
__sacanuevosargs__ = interpindirect2app(
W_AbstractBytesObject.descr_getnewargs),
__getnewargs__ = interpindirect2app(
W_AbstractBytesObject.descr_getnewargs),
_formatter_parser = interp2app(W_BytesObject.descr_formatter_parser),
_formatter_field_name_split =
interp2app(W_BytesObject.descr_formatter_field_name_split),
)
W_BytesObject.typedef.flag_sequence_bug_compat = True
@jit.elidable
def string_escape_encode(s, quote):
buf = StringBuilder(len(s) + 2)
buf.append(quote)
startslice = 0
for i in range(len(s)):
c = s[i]
use_bs_char = False # character quoted by backspace
if c == '\\' or c == quote:
bs_char = c
use_bs_char = True
elif c == '\t':
bs_char = 't'
use_bs_char = True
elif c == '\r':
bs_char = 'r'
use_bs_char = True
elif c == '\n':
bs_char = 'n'
use_bs_char = True
elif not '\x20' <= c < '\x7f':
n = ord(c)
if i != startslice:
buf.append_slice(s, startslice, i)
startslice = i + 1
buf.append('\\x')
buf.append("0123456789abcdef"[n >> 4])
buf.append("0123456789abcdef"[n & 0xF])
if use_bs_char:
if i != startslice:
buf.append_slice(s, startslice, i)
startslice = i + 1
buf.append('\\')
buf.append(bs_char)
if len(s) != startslice:
buf.append_slice(s, startslice, len(s))
buf.append(quote)
return buf.build()
| 14,936 | 16,955 | 91 |
ce14eda92e15aefbc025b2c490635a553b714fb5 | 1,506 | py | Python | db/database.py | akhfzz/FastAPI-Shorten-case | eb2dfe6e63182c2cf4e04078d2199a70563756e0 | [
"MIT"
] | null | null | null | db/database.py | akhfzz/FastAPI-Shorten-case | eb2dfe6e63182c2cf4e04078d2199a70563756e0 | [
"MIT"
] | null | null | null | db/database.py | akhfzz/FastAPI-Shorten-case | eb2dfe6e63182c2cf4e04078d2199a70563756e0 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.orm import relationship
from sqlalchemy.sql.schema import ForeignKey
from configuration import Base
from datetime import * | 45.636364 | 108 | 0.729748 | from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.orm import relationship
from sqlalchemy.sql.schema import ForeignKey
from configuration import Base
from datetime import *
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
email = Column(String(150), unique=True, nullable=False)
nama = Column(String(100), nullable=False)
username = Column(String(150), unique=True, nullable=False)
password = Column(String(255), unique=True, nullable=False)
position_job = Column(String(100), nullable=False)
url_table = relationship('URL', backref='url_id')
class URL(Base):
__tablename__ = 'url'
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Integer, ForeignKey('user.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
url_before = Column(String(255), nullable=False)
url_shorten = Column(String(255), nullable=False)
created_at = Column(Date, default=datetime.now())
click_on = Column(Integer, nullable=True)
url_detail = relationship('Detail', backref='new_shorten')
class Detail(Base):
__tablename__ = 'url_update'
id = Column(Integer, primary_key=True, autoincrement=True)
url_id = Column(Integer, ForeignKey('url.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
new_url = Column(String(255), nullable=False)
created_at = Column(Date, default=datetime.now())
click_on = Column(Integer, nullable=True) | 0 | 1,245 | 69 |
69f92ecad800dba71ec28c90133f05cd43d2b219 | 2,035 | py | Python | apps/upload/views.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | 1 | 2019-07-31T07:34:38.000Z | 2019-07-31T07:34:38.000Z | apps/upload/views.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | 9 | 2019-12-05T00:39:29.000Z | 2022-02-10T14:13:29.000Z | apps/upload/views.py | plsof/tabops_api | 39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8 | [
"MIT"
] | null | null | null | from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.permissions import IsAdminUser
from common.views import ResponseInfo, MyPageNumber
from .models import File
from .serializers import FileSerializer
| 35.086207 | 104 | 0.678133 | from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.permissions import IsAdminUser
from common.views import ResponseInfo, MyPageNumber
from .models import File
from .serializers import FileSerializer
class FileUploadView(APIView):
permission_classes = [IsAdminUser]
def __init__(self, **kwargs):
self.response_format = ResponseInfo().response
super(FileUploadView, self).__init__(**kwargs)
def get(self, request, format=None):
files = File.objects.all()
serializer = FileSerializer(files, many=True)
page = self.request.query_params.get('page', None)
if page is not None and page is not '':
page_obj = MyPageNumber()
page_data = page_obj.paginate_queryset(queryset=serializer.data, request=request, view=self)
self.response_format["data"] = page_data
else:
self.response_format["data"] = serializer.data
self.response_format["total"] = len(serializer.data)
self.response_format["code"] = 0
if not serializer.data:
self.response_format["msg"] = "List empty"
return Response(self.response_format)
def post(self, request, format=None):
serializer = FileSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class FileDetailView(APIView):
permission_classes = [IsAdminUser]
def get_object(self, pk):
try:
return File.objects.get(pk=pk)
except File.DoesNotExist:
raise status.HTTP_404_NOT_FOUND
def delete(self, request, pk, format=None):
file = self.get_object(pk)
file.file.delete() # 物理删除图片
file.delete() # 删除数据库记录
return Response(status=status.HTTP_204_NO_CONTENT)
| 1,493 | 231 | 46 |
078e765fcfe27de2aa9ef67e4be7f01a4827155c | 569 | py | Python | drive-google-deinit.py | scivision/deprecated-google-drive-public | 33dd090a0be381abd1938ca403d91c6bf9db0b1c | [
"MIT"
] | 4 | 2017-03-19T22:58:20.000Z | 2017-12-02T14:25:53.000Z | drive-google-deinit.py | scivision/deprecated-google-drive-public | 33dd090a0be381abd1938ca403d91c6bf9db0b1c | [
"MIT"
] | 1 | 2017-04-13T09:54:29.000Z | 2017-05-11T07:17:11.000Z | drive-google-deinit.py | scivision/deprecated-google-drive-public | 33dd090a0be381abd1938ca403d91c6bf9db0b1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
recursive search and deinit (disconnection) for drive-google directories
"""
from pathlib import Path
from gdrivepublic import isgdrive
from subprocess import call
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('rdir',help='root directory to search for active drive-google connections',nargs='?',default='~')
p = p.parse_args()
rdir = Path(p.rdir).expanduser()
#%%
for d in rdir.rglob('.gd'):
try:
if isgdrive(d):
call(['drive','deinit'],cwd=str(d))
except PermissionError:
pass
| 24.73913 | 112 | 0.697715 | #!/usr/bin/env python
"""
recursive search and deinit (disconnection) for drive-google directories
"""
from pathlib import Path
from gdrivepublic import isgdrive
from subprocess import call
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('rdir',help='root directory to search for active drive-google connections',nargs='?',default='~')
p = p.parse_args()
rdir = Path(p.rdir).expanduser()
#%%
for d in rdir.rglob('.gd'):
try:
if isgdrive(d):
call(['drive','deinit'],cwd=str(d))
except PermissionError:
pass
| 0 | 0 | 0 |
dc76a9dbd6dd167c7e2d72d09a02af1f8d079b72 | 3,067 | py | Python | main/cloudfoundry_client/main/tasks_command_domain.py | subhash12/cf-python-client | c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0 | [
"Apache-2.0"
] | 47 | 2017-12-17T00:54:33.000Z | 2022-02-25T09:54:52.000Z | main/cloudfoundry_client/main/tasks_command_domain.py | subhash12/cf-python-client | c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0 | [
"Apache-2.0"
] | 125 | 2017-10-27T09:38:10.000Z | 2022-03-10T07:53:35.000Z | main/cloudfoundry_client/main/tasks_command_domain.py | subhash12/cf-python-client | c0ecbb8ec85040fc2f74b6c52e1f9a6c6c16c4b0 | [
"Apache-2.0"
] | 50 | 2018-01-19T07:57:21.000Z | 2022-02-14T14:47:31.000Z | import json
import os
from argparse import Namespace, _SubParsersAction
from cloudfoundry_client.client import CloudFoundryClient
from cloudfoundry_client.json_object import JsonObject
from cloudfoundry_client.main.command_domain import CommandDomain, Command
| 38.822785 | 114 | 0.600587 | import json
import os
from argparse import Namespace, _SubParsersAction
from cloudfoundry_client.client import CloudFoundryClient
from cloudfoundry_client.json_object import JsonObject
from cloudfoundry_client.main.command_domain import CommandDomain, Command
class TaskCommandDomain(CommandDomain):
def __init__(self):
super(TaskCommandDomain, self).__init__(
display_name="Tasks",
entity_name="task",
filter_list_parameters=["names", "app_guids", "space_guids", "organization_guids"],
api_version="v3",
allow_creation=True,
allow_deletion=False,
extra_methods=[
(
self.cancel(),
"Cancel Task",
)
],
)
def id(self, entity: JsonObject) -> str:
return entity["guid"]
def name(self, entity: JsonObject) -> str:
return entity[self.name_property]
def find_by_name(self, client: CloudFoundryClient, name: str):
return self._get_client_domain(client).get_first(**{"%ss" % self.name_property: name})
def create(self) -> Command:
entry = self._create_entry()
def execute(client: CloudFoundryClient, arguments: Namespace):
data = None
if os.path.isfile(arguments.entity[0]):
with open(arguments.entity[0], "r") as f:
try:
data = json.load(f)
except ValueError:
raise ValueError("entity: file %s does not contain valid json data" % arguments.entity[0])
else:
try:
data = json.loads(arguments.entity[0])
except ValueError:
raise ValueError("entity: must be either a valid json file path or a json object")
print(self._get_client_domain(client).create(arguments.app_id[0], **data).json())
def generate_parser(parser: _SubParsersAction):
create_parser = parser.add_parser(entry)
create_parser.add_argument("app_id", metavar="ids", type=str, nargs=1, help="The application UUID.")
create_parser.add_argument(
"entity",
metavar="entities",
type=str,
nargs=1,
help="Either a path of the json file containing the %s or a json object or the json %s object"
% (self.client_domain, self.client_domain),
)
return Command(entry, generate_parser, execute)
def cancel(self) -> Command:
entry = "cancel_task"
def execute(client: CloudFoundryClient, arguments: Namespace):
print(self._get_client_domain(client).cancel(arguments.id[0]).json(indent=1))
def generate_parser(parser: _SubParsersAction):
command_parser = parser.add_parser(entry)
command_parser.add_argument("id", metavar="ids", type=str, nargs=1, help="The task UUID")
return Command(entry, generate_parser, execute)
| 2,603 | 18 | 184 |
b7ec571afdc19b55f0b9873d557e9ca53ed0a12f | 977 | py | Python | data/generateQuickdrawDataset.py | manas-avi/detection-2016-nipsws | b25669dbf1c5d3d1a79638f928c989aca1c32622 | [
"MIT"
] | null | null | null | data/generateQuickdrawDataset.py | manas-avi/detection-2016-nipsws | b25669dbf1c5d3d1a79638f928c989aca1c32622 | [
"MIT"
] | null | null | null | data/generateQuickdrawDataset.py | manas-avi/detection-2016-nipsws | b25669dbf1c5d3d1a79638f928c989aca1c32622 | [
"MIT"
] | 2 | 2018-12-02T08:39:24.000Z | 2018-12-08T15:55:54.000Z | from numpy import random
import gc
import numpy as np
import pdb
import cv2
import os
import sys
import matplotlib.pyplot as plt
dataset_name = sys.argv[1]
data_dir = './quickdraw/' + dataset_name + '/r128/'
save_dir = './quickdraw/' + dataset_name + '/obj-in-image/'
os.makedirs(save_dir + 'test/' , exist_ok=True)
os.makedirs(save_dir + 'train/' , exist_ok=True)
list_files = os.listdir(data_dir)
test_num = int(len(list_files) / 5)
mode = ''
for count , file in enumerate(list_files):
if count < test_num:
mode = 'test/'
else:
mode = 'train/'
obj_img = cv2.imread(data_dir + file , 0)
obj_img = cv2.resize(obj_img , (32,32))
_,obj_img = cv2.threshold(obj_img,127,255,cv2.THRESH_BINARY)
file , ext = os.path.splitext(file)
for i in range(5):
bkg_img = np.zeros((128,128))
tx = random.randint(0,64)
ty = random.randint(0,64)
bkg_img[tx:tx+32,ty:ty+32] = obj_img
cv2.imwrite(save_dir + mode + file + '_' + str(i) + ext , bkg_img)
# pdb.set_trace()
| 25.051282 | 68 | 0.684749 | from numpy import random
import gc
import numpy as np
import pdb
import cv2
import os
import sys
import matplotlib.pyplot as plt
dataset_name = sys.argv[1]
data_dir = './quickdraw/' + dataset_name + '/r128/'
save_dir = './quickdraw/' + dataset_name + '/obj-in-image/'
os.makedirs(save_dir + 'test/' , exist_ok=True)
os.makedirs(save_dir + 'train/' , exist_ok=True)
list_files = os.listdir(data_dir)
test_num = int(len(list_files) / 5)
mode = ''
for count , file in enumerate(list_files):
if count < test_num:
mode = 'test/'
else:
mode = 'train/'
obj_img = cv2.imread(data_dir + file , 0)
obj_img = cv2.resize(obj_img , (32,32))
_,obj_img = cv2.threshold(obj_img,127,255,cv2.THRESH_BINARY)
file , ext = os.path.splitext(file)
for i in range(5):
bkg_img = np.zeros((128,128))
tx = random.randint(0,64)
ty = random.randint(0,64)
bkg_img[tx:tx+32,ty:ty+32] = obj_img
cv2.imwrite(save_dir + mode + file + '_' + str(i) + ext , bkg_img)
# pdb.set_trace()
| 0 | 0 | 0 |
8ef7c1aa85b2e0042a2dcefa5ce7a98ed26ddaef | 49,547 | py | Python | pytest_cases/main_fixtures.py | keszybz/python-pytest-cases | 424d35108228716d7ea0276f6a89ef72181dd919 | [
"BSD-3-Clause"
] | null | null | null | pytest_cases/main_fixtures.py | keszybz/python-pytest-cases | 424d35108228716d7ea0276f6a89ef72181dd919 | [
"BSD-3-Clause"
] | null | null | null | pytest_cases/main_fixtures.py | keszybz/python-pytest-cases | 424d35108228716d7ea0276f6a89ef72181dd919 | [
"BSD-3-Clause"
] | null | null | null | # Use true division operator always even in old python 2.x (used in `_get_case_getter_s`)
from __future__ import division
from distutils.version import LooseVersion
from enum import Enum
from inspect import isgeneratorfunction, getmodule, currentframe
from itertools import product
from warnings import warn
from decopatch import function_decorator, DECORATED
from makefun import with_signature, add_signature_parameters, remove_signature_parameters, wraps
import pytest
try: # python 3.3+
from inspect import signature, Parameter
except ImportError:
from funcsigs import signature, Parameter
try:
from typing import Type
except ImportError:
# on old versions of typing module the above does not work. Since our code below has all Type hints quoted it's ok
pass
try: # type hints, python 3+
from typing import Callable, Union, Optional, Any, Tuple, List, Dict, Iterable
from pytest_cases.case_funcs import CaseData, ExpectedError
from types import ModuleType
# Type hint for the simple functions
CaseFunc = Callable[[], CaseData]
# Type hint for generator functions
GeneratedCaseFunc = Callable[[Any], CaseData]
except ImportError:
pass
from pytest_cases.common import yield_fixture, get_pytest_parametrize_marks, get_test_ids_from_param_values, \
make_marked_parameter_value, extract_parameterset_info, get_fixture_name, get_param_argnames_as_list, \
get_fixture_scope, remove_duplicates
from pytest_cases.main_params import cases_data
def unpack_fixture(argnames, fixture):
"""
Creates several fixtures with names `argnames` from the source `fixture`. Created fixtures will correspond to
elements unpacked from `fixture` in order. For example if `fixture` is a tuple of length 2, `argnames="a,b"` will
create two fixtures containing the first and second element respectively.
The created fixtures are automatically registered into the callers' module, but you may wish to assign them to
variables for convenience. In that case make sure that you use the same names,
e.g. `a, b = unpack_fixture('a,b', 'c')`.
```python
import pytest
from pytest_cases import unpack_fixture, pytest_fixture_plus
@pytest_fixture_plus
@pytest.mark.parametrize("o", ['hello', 'world'])
def c(o):
return o, o[0]
a, b = unpack_fixture("a,b", c)
def test_function(a, b):
assert a[0] == b
```
:param argnames: same as `@pytest.mark.parametrize` `argnames`.
:param fixture: a fixture name string or a fixture symbol. If a fixture symbol is provided, the created fixtures
will have the same scope. If a name is provided, they will have scope='function'. Note that in practice the
performance loss resulting from using `function` rather than a higher scope is negligible since the created
fixtures' body is a one-liner.
:return: the created fixtures.
"""
# get caller module to create the symbols
caller_module = get_caller_module()
return _unpack_fixture(caller_module, argnames, fixture)
def _unpack_fixture(caller_module, argnames, fixture):
"""
:param caller_module:
:param argnames:
:param fixture:
:return:
"""
# unpack fixture names to create if needed
argnames_lst = get_param_argnames_as_list(argnames)
# possibly get the source fixture name if the fixture symbol was provided
if not isinstance(fixture, str):
source_f_name = get_fixture_name(fixture)
scope = get_fixture_scope(fixture)
else:
source_f_name = fixture
# we dont have a clue about the real scope, so lets use function scope
scope = 'function'
# finally create the sub-fixtures
created_fixtures = []
for value_idx, argname in enumerate(argnames_lst):
# create the fixture
# To fix late binding issue with `value_idx` we add an extra layer of scope: a factory function
# See https://stackoverflow.com/questions/3431676/creating-functions-in-a-loop
# create it
fix = _create_fixture(value_idx)
# add to module
check_name_available(caller_module, argname, if_name_exists=WARN, caller=unpack_fixture)
setattr(caller_module, argname, fix)
# collect to return the whole list eventually
created_fixtures.append(fix)
return created_fixtures
def param_fixture(argname, argvalues, autouse=False, ids=None, scope="function", **kwargs):
"""
Identical to `param_fixtures` but for a single parameter name, so that you can assign its output to a single
variable.
```python
import pytest
from pytest_cases import param_fixtures, param_fixture
# create a single parameter fixture
my_parameter = param_fixture("my_parameter", [1, 2, 3, 4])
@pytest.fixture
def fixture_uses_param(my_parameter):
...
def test_uses_param(my_parameter, fixture_uses_param):
...
```
:param argname: see fixture `name`
:param argvalues: see fixture `params`
:param autouse: see fixture `autouse`
:param ids: see fixture `ids`
:param scope: see fixture `scope`
:param kwargs: any other argument for 'fixture'
:return: the create fixture
"""
if "," in argname:
raise ValueError("`param_fixture` is an alias for `param_fixtures` that can only be used for a single "
"parameter name. Use `param_fixtures` instead - but note that it creates several fixtures.")
elif len(argname.replace(' ', '')) == 0:
raise ValueError("empty argname")
caller_module = get_caller_module()
return _param_fixture(caller_module, argname, argvalues, autouse=autouse, ids=ids, scope=scope, **kwargs)
def _param_fixture(caller_module, argname, argvalues, autouse=False, ids=None, scope="function", **kwargs):
""" Internal method shared with param_fixture and param_fixtures """
# create the fixture
fix = pytest_fixture_plus(name=argname, scope=scope, autouse=autouse, params=argvalues, ids=ids,
**kwargs)(__param_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(caller_module, argname, if_name_exists=WARN, caller=param_fixture)
setattr(caller_module, argname, fix)
return fix
class ExistingFixtureNameError(ValueError):
"""
Raised by `add_fixture_to_callers_module` when a fixture already exists in a module
"""
RAISE = 0
WARN = 1
CHANGE = 2
def check_name_available(module,
name, # type: str
if_name_exists=RAISE, # type: int
caller=None, # type: Callable[[Any], Any]
):
"""
Routine to
:param module:
:param name:
:param if_name_exists:
:param caller:
:return: a name that might be different if policy was CHANGE
"""
if name in dir(module):
if caller is None:
caller = ''
# Name already exists: act according to policy
if if_name_exists is RAISE:
raise ExistingFixtureNameError(module, name, caller)
elif if_name_exists is WARN:
warn("%s Overriding symbol %s in module %s" % (caller, name, module))
elif if_name_exists is CHANGE:
# find a non-used name in that module
i = 1
name2 = name + '_%s' % i
while name2 in dir(module):
i += 1
name2 = name + '_%s' % i
name = name2
else:
raise ValueError("invalid value for `if_name_exists`: %s" % if_name_exists)
return name
def param_fixtures(argnames, argvalues, autouse=False, ids=None, scope="function", **kwargs):
"""
Creates one or several "parameters" fixtures - depending on the number or coma-separated names in `argnames`. The
created fixtures are automatically registered into the callers' module, but you may wish to assign them to
variables for convenience. In that case make sure that you use the same names, e.g.
`p, q = param_fixtures('p,q', [(0, 1), (2, 3)])`.
Note that the (argnames, argvalues, ids) signature is similar to `@pytest.mark.parametrize` for consistency,
see https://docs.pytest.org/en/latest/reference.html?highlight=pytest.param#pytest-mark-parametrize
```python
import pytest
from pytest_cases import param_fixtures, param_fixture
# create a 2-tuple parameter fixture
arg1, arg2 = param_fixtures("arg1, arg2", [(1, 2), (3, 4)])
@pytest.fixture
def fixture_uses_param2(arg2):
...
def test_uses_param2(arg1, arg2, fixture_uses_param2):
...
```
:param argnames: same as `@pytest.mark.parametrize` `argnames`.
:param argvalues: same as `@pytest.mark.parametrize` `argvalues`.
:param autouse: see fixture `autouse`
:param ids: same as `@pytest.mark.parametrize` `ids`
:param scope: see fixture `scope`
:param kwargs: any other argument for the created 'fixtures'
:return: the created fixtures
"""
created_fixtures = []
argnames_lst = get_param_argnames_as_list(argnames)
caller_module = get_caller_module()
if len(argnames_lst) < 2:
return _param_fixture(caller_module, argnames, argvalues, autouse=autouse, ids=ids, scope=scope, **kwargs)
# create the root fixture that will contain all parameter values
# note: we sort the list so that the first in alphabetical order appears first. Indeed pytest uses this order.
root_fixture_name = "%s__param_fixtures_root" % ('_'.join(sorted(argnames_lst)))
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
root_fixture_name = check_name_available(caller_module, root_fixture_name, if_name_exists=CHANGE, caller=param_fixtures)
@pytest_fixture_plus(name=root_fixture_name, autouse=autouse, scope=scope, **kwargs)
@pytest.mark.parametrize(argnames, argvalues, ids=ids)
@with_signature("(%s)" % argnames)
# Override once again the symbol with the correct contents
setattr(caller_module, root_fixture_name, _root_fixture)
# finally create the sub-fixtures
for param_idx, argname in enumerate(argnames_lst):
# create the fixture
# To fix late binding issue with `param_idx` we add an extra layer of scope: a factory function
# See https://stackoverflow.com/questions/3431676/creating-functions-in-a-loop
# create it
fix = _create_fixture(param_idx)
# add to module
check_name_available(caller_module, argname, if_name_exists=WARN, caller=param_fixtures)
setattr(caller_module, argname, fix)
# collect to return the whole list eventually
created_fixtures.append(fix)
return created_fixtures
@function_decorator
def cases_fixture(cases=None, # type: Union[Callable[[Any], Any], Iterable[Callable[[Any], Any]]]
module=None, # type: Union[ModuleType, Iterable[ModuleType]]
case_data_argname='case_data', # type: str
has_tag=None, # type: Any
filter=None, # type: Callable[[List[Any]], bool]
f=DECORATED,
**kwargs
):
"""
DEPRECATED - use double annotation `@pytest_fixture_plus` + `@cases_data` instead
```python
@pytest_fixture_plus
@cases_data(module=xxx)
def my_fixture(case_data)
```
Decorates a function so that it becomes a parametrized fixture.
The fixture will be automatically parametrized with all cases listed in module `module`, or with
all cases listed explicitly in `cases`.
Using it with a non-None `module` argument is equivalent to
* extracting all cases from `module`
* then decorating your function with @pytest.fixture(params=cases) with all the cases
So
```python
from pytest_cases import cases_fixture, CaseData
# import the module containing the test cases
import test_foo_cases
@cases_fixture(module=test_foo_cases)
def foo_fixture(case_data: CaseData):
...
```
is equivalent to:
```python
import pytest
from pytest_cases import get_all_cases, CaseData
# import the module containing the test cases
import test_foo_cases
# manually list the available cases
cases = get_all_cases(module=test_foo_cases)
# parametrize the fixture manually
@pytest.fixture(params=cases)
def foo_fixture(request):
case_data = request.param # type: CaseData
...
```
Parameters (cases, module, has_tag, filter) can be used to perform explicit listing, or filtering. See
`get_all_cases()` for details.
:param cases: a single case or a hardcoded list of cases to use. Only one of `cases` and `module` should be set.
:param module: a module or a hardcoded list of modules to use. You may use `THIS_MODULE` to indicate that the
module is the current one. Only one of `cases` and `module` should be set.
:param case_data_argname: the optional name of the function parameter that should receive the `CaseDataGetter`
object. Default is 'case_data'.
:param has_tag: an optional tag used to filter the cases. Only cases with the given tag will be selected. Only
cases with the given tag will be selected.
:param filter: an optional filtering function taking as an input a list of tags associated with a case, and
returning a boolean indicating if the case should be selected. It will be used to filter the cases in the
`module`. It both `has_tag` and `filter` are set, both will be applied in sequence.
:return:
"""
# apply @cases_data (that will translate to a @pytest.mark.parametrize)
parametrized_f = cases_data(cases=cases, module=module,
case_data_argname=case_data_argname, has_tag=has_tag, filter=filter)(f)
# apply @pytest_fixture_plus
return pytest_fixture_plus(**kwargs)(parametrized_f)
@function_decorator
def pytest_fixture_plus(scope="function",
autouse=False,
name=None,
unpack_into=None,
fixture_func=DECORATED,
**kwargs):
""" decorator to mark a fixture factory function.
Identical to `@pytest.fixture` decorator, except that
- it supports multi-parametrization with `@pytest.mark.parametrize` as requested in
https://github.com/pytest-dev/pytest/issues/3960. As a consequence it does not support the `params` and `ids`
arguments anymore.
- it supports a new argument `unpack_into` where you can provide names for fixtures where to unpack this fixture
into.
:param scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module" or "session".
:param autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:param name: the name of the fixture. This defaults to the name of the
decorated function. Note: If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
:param unpack_into: an optional iterable of names, or string containing coma-separated names, for additional
fixtures to create to represent parts of this fixture. See `unpack_fixture` for details.
:param kwargs: other keyword arguments for `@pytest.fixture`
"""
if name is not None:
# Compatibility for the 'name' argument
if LooseVersion(pytest.__version__) >= LooseVersion('3.0.0'):
# pytest version supports "name" keyword argument
kwargs['name'] = name
elif name is not None:
# 'name' argument is not supported in this old version, use the __name__ trick.
fixture_func.__name__ = name
# if unpacking is requested, do it first
if unpack_into is not None:
# get the future fixture name if needed
if name is None:
name = fixture_func.__name__
# get caller module to create the symbols
caller_module = get_caller_module(frame_offset=2)
_unpack_fixture(caller_module, unpack_into, name)
# (1) Collect all @pytest.mark.parametrize markers (including those created by usage of @cases_data)
parametrizer_marks = get_pytest_parametrize_marks(fixture_func)
if len(parametrizer_marks) < 1:
return _create_fixture_without_marks(fixture_func, scope, autouse, **kwargs)
else:
if 'params' in kwargs:
raise ValueError(
"With `pytest_fixture_plus` you cannot mix usage of the keyword argument `params` and of "
"the pytest.mark.parametrize marks")
# (2) create the huge "param" containing all params combined
# --loop (use the same order to get it right)
params_names_or_name_combinations = []
params_values = []
params_ids = []
params_marks = []
for pmark in parametrizer_marks:
# check number of parameter names in this parameterset
if len(pmark.param_names) < 1:
raise ValueError("Fixture function '%s' decorated with '@pytest_fixture_plus' has an empty parameter "
"name in a @pytest.mark.parametrize mark")
# remember
params_names_or_name_combinations.append(pmark.param_names)
# extract all parameters that have a specific configuration (pytest.param())
_pids, _pmarks, _pvalues = extract_parameterset_info(pmark.param_names, pmark)
# Create the proper id for each test
if pmark.param_ids is not None:
# overridden at global pytest.mark.parametrize level - this takes precedence.
try: # an explicit list of ids ?
paramids = list(pmark.param_ids)
except TypeError: # a callable to apply on the values
paramids = list(pmark.param_ids(v) for v in _pvalues)
else:
# default: values-based...
paramids = get_test_ids_from_param_values(pmark.param_names, _pvalues)
# ...but local pytest.param takes precedence
for i, _id in enumerate(_pids):
if _id is not None:
paramids[i] = _id
# Finally store the ids, marks, and values for this parameterset
params_ids.append(paramids)
params_marks.append(tuple(_pmarks))
params_values.append(tuple(_pvalues))
# (3) generate the ids and values, possibly reapplying marks
if len(params_names_or_name_combinations) == 1:
# we can simplify - that will be more readable
final_ids = params_ids[0]
final_marks = params_marks[0]
final_values = list(params_values[0])
# reapply the marks
for i, marks in enumerate(final_marks):
if marks is not None:
final_values[i] = make_marked_parameter_value(final_values[i], marks=marks)
else:
final_values = list(product(*params_values))
final_ids = get_test_ids_from_param_values(params_names_or_name_combinations, product(*params_ids))
final_marks = tuple(product(*params_marks))
# reapply the marks
for i, marks in enumerate(final_marks):
ms = [m for mm in marks if mm is not None for m in mm]
if len(ms) > 0:
final_values[i] = make_marked_parameter_value(final_values[i], marks=ms)
if len(final_values) != len(final_ids):
raise ValueError("Internal error related to fixture parametrization- please report")
# (4) wrap the fixture function so as to remove the parameter names and add 'request' if needed
all_param_names = tuple(v for l in params_names_or_name_combinations for v in l)
# --create the new signature that we want to expose to pytest
old_sig = signature(fixture_func)
for p in all_param_names:
if p not in old_sig.parameters:
raise ValueError("parameter '%s' not found in fixture signature '%s%s'"
"" % (p, fixture_func.__name__, old_sig))
new_sig = remove_signature_parameters(old_sig, *all_param_names)
# add request if needed
func_needs_request = 'request' in old_sig.parameters
if not func_needs_request:
new_sig = add_signature_parameters(new_sig, first=Parameter('request', kind=Parameter.POSITIONAL_OR_KEYWORD))
# --common routine used below. Fills kwargs with the appropriate names and values from fixture_params
# --Finally create the fixture function, a wrapper of user-provided fixture with the new signature
if not isgeneratorfunction(fixture_func):
# normal function with return statement
@wraps(fixture_func, new_sig=new_sig)
# transform the created wrapper into a fixture
fixture_decorator = pytest.fixture(scope=scope, params=final_values, autouse=autouse, ids=final_ids, **kwargs)
return fixture_decorator(wrapped_fixture_func)
else:
# generator function (with a yield statement)
@wraps(fixture_func, new_sig=new_sig)
# transform the created wrapper into a fixture
fixture_decorator = yield_fixture(scope=scope, params=final_values, autouse=autouse, ids=final_ids, **kwargs)
return fixture_decorator(wrapped_fixture_func)
def _create_fixture_without_marks(fixture_func, scope, autouse, **kwargs):
"""
creates a fixture for decorated fixture function `fixture_func`.
:param fixture_func:
:param scope:
:param autouse:
:param kwargs:
:return:
"""
# IMPORTANT: even if 'params' is not in kwargs, the fixture
# can be used in a fixture union and therefore a param will be received
# on some calls (and the fixture will be called several times - only once for real)
# - we have to handle the NOT_USED.
# --create a wrapper where we will be able to auto-detect
# TODO we could put this in a dedicated wrapper 'ignore_unsused'..
old_sig = signature(fixture_func)
# add request if needed
func_needs_request = 'request' in old_sig.parameters
if not func_needs_request:
new_sig = add_signature_parameters(old_sig,
first=Parameter('request', kind=Parameter.POSITIONAL_OR_KEYWORD))
else:
new_sig = old_sig
if not isgeneratorfunction(fixture_func):
# normal function with return statement
@wraps(fixture_func, new_sig=new_sig)
# transform the created wrapper into a fixture
fixture_decorator = pytest.fixture(scope=scope, autouse=autouse, **kwargs)
return fixture_decorator(wrapped_fixture_func)
else:
# generator function (with a yield statement)
@wraps(fixture_func, new_sig=new_sig)
# transform the created wrapper into a fixture
fixture_decorator = yield_fixture(scope=scope, autouse=autouse, **kwargs)
return fixture_decorator(wrapped_fixture_func)
NOT_USED = _NotUsed()
"""Object representing a fixture value when the fixture is not used"""
class UnionFixtureAlternative(object):
"""A special class that should be used to wrap a fixture name"""
# def __str__(self):
# that is maybe too dangerous...
# return self.fixture_name
@staticmethod
class IdStyle(Enum):
"""
The enum defining all possible id styles.
"""
none = None
explicit = 'explicit'
compact = 'compact'
def apply_id_style(id, union_fixture_name, idstyle):
"""
Applies the id style defined in `idstyle` to the given id.
See https://github.com/smarie/python-pytest-cases/issues/41
:param id:
:param union_fixture_name:
:param idstyle:
:return:
"""
if idstyle is IdStyle.none:
return id
elif idstyle is IdStyle.explicit:
return "%s_is_%s" % (union_fixture_name, id)
elif idstyle is IdStyle.compact:
return "U%s" % id
else:
raise ValueError("Invalid id style")
class InvalidParamsList(Exception):
"""
Exception raised when users attempt to provide a non-iterable `argvalues` in pytest parametrize.
See https://docs.pytest.org/en/latest/reference.html#pytest-mark-parametrize-ref
"""
__slots__ = 'params',
def is_fixture_union_params(params):
"""
Internal helper to quickly check if a bunch of parameters correspond to a union fixture.
:param params:
:return:
"""
try:
return len(params) >= 1 and isinstance(params[0], UnionFixtureAlternative)
except TypeError:
raise InvalidParamsList(params)
def is_used_request(request):
"""
Internal helper to check if a given request for fixture is active or not. Inactive fixtures
happen when a fixture is not used in the current branch of a UNION fixture.
This helper is used in all fixtures created in this module.
:param request:
:return:
"""
return getattr(request, 'param', None) is not NOT_USED
def fixture_union(name,
fixtures,
scope="function",
idstyle='explicit',
ids=fixture_alternative_to_str,
unpack_into=None,
autouse=False,
**kwargs):
"""
Creates a fixture that will take all values of the provided fixtures in order. That fixture is automatically
registered into the callers' module, but you may wish to assign it to a variable for convenience. In that case
make sure that you use the same name, e.g. `a = fixture_union('a', ['b', 'c'])`
The style of test ids corresponding to the union alternatives can be changed with `idstyle`. Three values are
allowed:
- `'explicit'` (default) favors readability,
- `'compact'` adds a small mark so that at least one sees which parameters are union parameters and which others
are normal parameters,
- `None` does not change the ids.
:param name: the name of the fixture to create
:param fixtures: an array-like containing fixture names and/or fixture symbols
:param scope: the scope of the union. Since the union depends on the sub-fixtures, it should be smaller than the
smallest scope of fixtures referenced.
:param idstyle: The style of test ids corresponding to the union alternatives. One of `'explicit'` (default),
`'compact'`, or `None`.
:param ids: as in pytest. The default value returns the correct fixture
:param unpack_into: an optional iterable of names, or string containing coma-separated names, for additional
fixtures to create to represent parts of this fixture. See `unpack_fixture` for details.
:param autouse: as in pytest
:param kwargs: other pytest fixture options. They might not be supported correctly.
:return: the new fixture. Note: you do not need to capture that output in a symbol, since the fixture is
automatically registered in your module. However if you decide to do so make sure that you use the same name.
"""
caller_module = get_caller_module()
return _fixture_union(caller_module, name, fixtures, scope=scope, idstyle=idstyle, ids=ids, autouse=autouse,
unpack_into=unpack_into, **kwargs)
def _fixture_union(caller_module, name, fixtures, idstyle, scope="function", ids=fixture_alternative_to_str,
unpack_into=None, autouse=False, **kwargs):
"""
Internal implementation for fixture_union
:param caller_module:
:param name:
:param fixtures:
:param idstyle:
:param scope:
:param ids:
:param unpack_into:
:param autouse:
:param kwargs:
:return:
"""
# test the `fixtures` argument to avoid common mistakes
if not isinstance(fixtures, (tuple, set, list)):
raise TypeError("fixture_union: the `fixtures` argument should be a tuple, set or list")
# validate the idstyle
idstyle = IdStyle(idstyle)
# first get all required fixture names
f_names = []
for f in fixtures:
# possibly get the fixture name if the fixture symbol was provided
f_names.append(get_fixture_name(f) if not isinstance(f, str) else f)
if len(f_names) < 1:
raise ValueError("Empty fixture unions are not permitted")
# then generate the body of our union fixture. It will require all of its dependent fixtures and receive as
# a parameter the name of the fixture to use
@with_signature("(%s, request)" % ', '.join(f_names))
_new_fixture.__name__ = name
# finally create the fixture per se.
# WARNING we do not use pytest.fixture but pytest_fixture_plus so that NOT_USED is discarded
f_decorator = pytest_fixture_plus(scope=scope,
params=[UnionFixtureAlternative(_name, idstyle) for _name in f_names],
autouse=autouse, ids=ids, **kwargs)
fix = f_decorator(_new_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(caller_module, name, if_name_exists=WARN, caller=param_fixture)
setattr(caller_module, name, fix)
# if unpacking is requested, do it here
if unpack_into is not None:
_unpack_fixture(caller_module, argnames=unpack_into, fixture=name)
return fix
def _fixture_product(caller_module, name, fixtures_or_values, fixture_positions,
scope="function", ids=fixture_alternative_to_str,
unpack_into=None, autouse=False, **kwargs):
"""
Internal implementation for fixture products created by pytest parametrize plus.
:param caller_module:
:param name:
:param fixtures_or_values:
:param fixture_positions:
:param idstyle:
:param scope:
:param ids:
:param unpack_into:
:param autouse:
:param kwargs:
:return:
"""
# test the `fixtures` argument to avoid common mistakes
if not isinstance(fixtures_or_values, (tuple, set, list)):
raise TypeError("fixture_product: the `fixtures_or_values` argument should be a tuple, set or list")
_tuple_size = len(fixtures_or_values)
# first get all required fixture names
f_names = [None] * _tuple_size
for f_pos in fixture_positions:
# possibly get the fixture name if the fixture symbol was provided
f = fixtures_or_values[f_pos]
# and remember the position in the tuple
f_names[f_pos] = get_fixture_name(f) if not isinstance(f, str) else f
# remove duplicates by making it an ordered set
all_names = remove_duplicates((n for n in f_names if n is not None))
if len(all_names) < 1:
raise ValueError("Empty fixture products are not permitted")
# then generate the body of our product fixture. It will require all of its dependent fixtures
@with_signature("(%s)" % ', '.join(all_names))
_new_fixture.__name__ = name
# finally create the fixture per se.
# WARNING we do not use pytest.fixture but pytest_fixture_plus so that NOT_USED is discarded
f_decorator = pytest_fixture_plus(scope=scope, autouse=autouse, ids=ids, **kwargs)
fix = f_decorator(_new_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(caller_module, name, if_name_exists=WARN, caller=param_fixture)
setattr(caller_module, name, fix)
# if unpacking is requested, do it here
if unpack_into is not None:
_unpack_fixture(caller_module, argnames=unpack_into, fixture=name)
return fix
class fixture_ref:
"""
A reference to a fixture, to be used in `pytest_parametrize_plus`.
You can create it from a fixture name or a fixture object (function).
"""
__slots__ = 'fixture',
def pytest_parametrize_plus(argnames, argvalues, indirect=False, ids=None, scope=None, **kwargs):
"""
Equivalent to `@pytest.mark.parametrize` but also supports the fact that in argvalues one can include references to
fixtures with `fixture_ref(<fixture>)` where <fixture> can be the fixture name or fixture function.
When such a fixture reference is detected in the argvalues, a new function-scope fixture will be created with a
unique name, and the test function will be wrapped so as to be injected with the correct parameters. Special test
ids will be created to illustrate the switching between normal parameters and fixtures.
:param argnames:
:param argvalues:
:param indirect:
:param ids:
:param scope:
:param kwargs:
:return:
"""
# make sure that we do not destroy the argvalues if it is provided as an iterator
try:
argvalues = list(argvalues)
except TypeError:
raise InvalidParamsList(argvalues)
# get the param names
all_param_names = get_param_argnames_as_list(argnames)
nb_params = len(all_param_names)
# find if there are fixture references in the values provided
fixture_indices = []
if nb_params == 1:
for i, v in enumerate(argvalues):
if isinstance(v, fixture_ref):
fixture_indices.append((i, None))
elif nb_params > 1:
for i, v in enumerate(argvalues):
try:
j = 0
fix_pos = []
for j, _pval in enumerate(v):
if isinstance(_pval, fixture_ref):
fix_pos.append(j)
if len(fix_pos) > 0:
fixture_indices.append((i, fix_pos))
if j+1 != nb_params:
raise ValueError("Invalid parameter values containing %s items while the number of parameters is %s: "
"%s." % (j+1, nb_params, v))
except TypeError:
# a fixture ref is
if isinstance(v, fixture_ref):
fixture_indices.append((i, None))
else:
raise ValueError(
"Invalid parameter values containing %s items while the number of parameters is %s: "
"%s." % (1, nb_params, v))
if len(fixture_indices) == 0:
# no fixture reference: do as usual
return pytest.mark.parametrize(argnames, argvalues, indirect=indirect, ids=ids, scope=scope, **kwargs)
else:
# there are fixture references: we have to create a specific decorator
caller_module = get_caller_module()
def _create_param_fixture(from_i, to_i, p_fix_name):
""" Routine that will be used to create a parameter fixture for argvalues between prev_i and i"""
selected_argvalues = argvalues[from_i:to_i]
try:
# an explicit list of ids
selected_ids = ids[from_i:to_i]
except TypeError:
# a callable to create the ids
selected_ids = ids
# default behaviour is not the same betwee pytest params and pytest fixtures
if selected_ids is None:
# selected_ids = ['-'.join([str(_v) for _v in v]) for v in selected_argvalues]
selected_ids = get_test_ids_from_param_values(all_param_names, selected_argvalues)
if to_i == from_i + 1:
p_fix_name = "%s_is_%s" % (p_fix_name, from_i)
else:
p_fix_name = "%s_is_%sto%s" % (p_fix_name, from_i, to_i - 1)
p_fix_name = check_name_available(caller_module, p_fix_name, if_name_exists=CHANGE,
caller=pytest_parametrize_plus)
param_fix = _param_fixture(caller_module, argname=p_fix_name, argvalues=selected_argvalues,
ids=selected_ids)
return param_fix
# then create the decorator
def parametrize_plus_decorate(test_func):
"""
A decorator that wraps the test function so that instead of receiving the parameter names, it receives the
new fixture. All other decorations are unchanged.
:param test_func:
:return:
"""
# first check if the test function has the parameters as arguments
old_sig = signature(test_func)
for p in all_param_names:
if p not in old_sig.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func.__name__, old_sig))
# The base name for all fixtures that will be created below
# style_template = "%s_param__%s"
style_template = "%s_%s"
base_name = style_template % (test_func.__name__, argnames.replace(' ', '').replace(',', '_'))
base_name = check_name_available(caller_module, base_name, if_name_exists=CHANGE, caller=pytest_parametrize_plus)
# Retrieve (if ref) or create (for normal argvalues) the fixtures that we will union
# TODO important note: we could either wish to create one fixture for parameter value or to create one for
# each consecutive group as shown below. This should not lead to different results but perf might differ.
# maybe add a parameter in the signature so that users can test it ?
fixtures_to_union = []
fixtures_to_union_names_for_ids = []
prev_i = -1
for i, j_list in fixture_indices:
if i > prev_i + 1:
# there was a non-empty group of 'normal' parameters before this fixture_ref.
# create a new fixture parametrized with all of that consecutive group.
param_fix = _create_param_fixture(prev_i + 1, i, base_name)
fixtures_to_union.append(param_fix)
fixtures_to_union_names_for_ids.append(get_fixture_name(param_fix))
if j_list is None:
# add the fixture referenced with `fixture_ref`
referenced_fixture = argvalues[i].fixture
fixtures_to_union.append(referenced_fixture)
id_for_fixture = apply_id_style(get_fixture_name(referenced_fixture), base_name, IdStyle.explicit)
fixtures_to_union_names_for_ids.append(id_for_fixture)
else:
# create a fixture refering to all the fixtures required in the tuple
prod_fix = _create_fixture_product(i, j_list, base_name)
fixtures_to_union.append(prod_fix)
id_for_fixture = apply_id_style(get_fixture_name(prod_fix), base_name, IdStyle.explicit)
fixtures_to_union_names_for_ids.append(id_for_fixture)
prev_i = i
# handle last consecutive group of normal parameters, if any
i = len(argvalues)
if i > prev_i + 1:
param_fix = _create_param_fixture(prev_i + 1, i, base_name)
fixtures_to_union.append(param_fix)
fixtures_to_union_names_for_ids.append(get_fixture_name(param_fix))
# Finally create a "main" fixture with a unique name for this test function
# note: the function automatically registers it in the module
# note 2: idstyle is set to None because we provide an explicit enough list of ids
big_param_fixture = _fixture_union(caller_module, base_name, fixtures_to_union, idstyle=None,
ids=fixtures_to_union_names_for_ids)
# --create the new test function's signature that we want to expose to pytest
# it is the same than existing, except that we want to replace all parameters with the new fixture
new_sig = remove_signature_parameters(old_sig, *all_param_names)
new_sig = add_signature_parameters(new_sig, Parameter(base_name, kind=Parameter.POSITIONAL_OR_KEYWORD))
# --Finally create the fixture function, a wrapper of user-provided fixture with the new signature
if not isgeneratorfunction(test_func):
# normal test function with return statement
@wraps(test_func, new_sig=new_sig)
else:
# generator test function (with one or several yield statement)
@wraps(test_func, new_sig=new_sig)
# move all pytest marks from the test function to the wrapper
# not needed because the __dict__ is automatically copied when we use @wraps
# move_all_pytest_marks(test_func, wrapped_test_func)
# With this hack we will be ordered correctly by pytest https://github.com/pytest-dev/pytest/issues/4429
wrapped_test_func.place_as = test_func
# return the new test function
return wrapped_test_func
return parametrize_plus_decorate
| 41.882502 | 125 | 0.653844 | # Use true division operator always even in old python 2.x (used in `_get_case_getter_s`)
from __future__ import division
from distutils.version import LooseVersion
from enum import Enum
from inspect import isgeneratorfunction, getmodule, currentframe
from itertools import product
from warnings import warn
from decopatch import function_decorator, DECORATED
from makefun import with_signature, add_signature_parameters, remove_signature_parameters, wraps
import pytest
try: # python 3.3+
from inspect import signature, Parameter
except ImportError:
from funcsigs import signature, Parameter
try:
from typing import Type
except ImportError:
# on old versions of typing module the above does not work. Since our code below has all Type hints quoted it's ok
pass
try: # type hints, python 3+
from typing import Callable, Union, Optional, Any, Tuple, List, Dict, Iterable
from pytest_cases.case_funcs import CaseData, ExpectedError
from types import ModuleType
# Type hint for the simple functions
CaseFunc = Callable[[], CaseData]
# Type hint for generator functions
GeneratedCaseFunc = Callable[[Any], CaseData]
except ImportError:
pass
from pytest_cases.common import yield_fixture, get_pytest_parametrize_marks, get_test_ids_from_param_values, \
make_marked_parameter_value, extract_parameterset_info, get_fixture_name, get_param_argnames_as_list, \
get_fixture_scope, remove_duplicates
from pytest_cases.main_params import cases_data
def unpack_fixture(argnames, fixture):
"""
Creates several fixtures with names `argnames` from the source `fixture`. Created fixtures will correspond to
elements unpacked from `fixture` in order. For example if `fixture` is a tuple of length 2, `argnames="a,b"` will
create two fixtures containing the first and second element respectively.
The created fixtures are automatically registered into the callers' module, but you may wish to assign them to
variables for convenience. In that case make sure that you use the same names,
e.g. `a, b = unpack_fixture('a,b', 'c')`.
```python
import pytest
from pytest_cases import unpack_fixture, pytest_fixture_plus
@pytest_fixture_plus
@pytest.mark.parametrize("o", ['hello', 'world'])
def c(o):
return o, o[0]
a, b = unpack_fixture("a,b", c)
def test_function(a, b):
assert a[0] == b
```
:param argnames: same as `@pytest.mark.parametrize` `argnames`.
:param fixture: a fixture name string or a fixture symbol. If a fixture symbol is provided, the created fixtures
will have the same scope. If a name is provided, they will have scope='function'. Note that in practice the
performance loss resulting from using `function` rather than a higher scope is negligible since the created
fixtures' body is a one-liner.
:return: the created fixtures.
"""
# get caller module to create the symbols
caller_module = get_caller_module()
return _unpack_fixture(caller_module, argnames, fixture)
def _unpack_fixture(caller_module, argnames, fixture):
"""
:param caller_module:
:param argnames:
:param fixture:
:return:
"""
# unpack fixture names to create if needed
argnames_lst = get_param_argnames_as_list(argnames)
# possibly get the source fixture name if the fixture symbol was provided
if not isinstance(fixture, str):
source_f_name = get_fixture_name(fixture)
scope = get_fixture_scope(fixture)
else:
source_f_name = fixture
# we dont have a clue about the real scope, so lets use function scope
scope = 'function'
# finally create the sub-fixtures
created_fixtures = []
for value_idx, argname in enumerate(argnames_lst):
# create the fixture
# To fix late binding issue with `value_idx` we add an extra layer of scope: a factory function
# See https://stackoverflow.com/questions/3431676/creating-functions-in-a-loop
def _create_fixture(value_idx):
# no need to autouse=True: this fixture does not bring any added value in terms of setup.
@pytest_fixture_plus(name=argname, scope=scope, autouse=False)
@with_signature("(%s)" % source_f_name)
def _param_fixture(**kwargs):
source_fixture_value = kwargs.pop(source_f_name)
# unpack
return source_fixture_value[value_idx]
return _param_fixture
# create it
fix = _create_fixture(value_idx)
# add to module
check_name_available(caller_module, argname, if_name_exists=WARN, caller=unpack_fixture)
setattr(caller_module, argname, fix)
# collect to return the whole list eventually
created_fixtures.append(fix)
return created_fixtures
def param_fixture(argname, argvalues, autouse=False, ids=None, scope="function", **kwargs):
"""
Identical to `param_fixtures` but for a single parameter name, so that you can assign its output to a single
variable.
```python
import pytest
from pytest_cases import param_fixtures, param_fixture
# create a single parameter fixture
my_parameter = param_fixture("my_parameter", [1, 2, 3, 4])
@pytest.fixture
def fixture_uses_param(my_parameter):
...
def test_uses_param(my_parameter, fixture_uses_param):
...
```
:param argname: see fixture `name`
:param argvalues: see fixture `params`
:param autouse: see fixture `autouse`
:param ids: see fixture `ids`
:param scope: see fixture `scope`
:param kwargs: any other argument for 'fixture'
:return: the create fixture
"""
if "," in argname:
raise ValueError("`param_fixture` is an alias for `param_fixtures` that can only be used for a single "
"parameter name. Use `param_fixtures` instead - but note that it creates several fixtures.")
elif len(argname.replace(' ', '')) == 0:
raise ValueError("empty argname")
caller_module = get_caller_module()
return _param_fixture(caller_module, argname, argvalues, autouse=autouse, ids=ids, scope=scope, **kwargs)
def _param_fixture(caller_module, argname, argvalues, autouse=False, ids=None, scope="function", **kwargs):
""" Internal method shared with param_fixture and param_fixtures """
# create the fixture
def __param_fixture(request):
return request.param
fix = pytest_fixture_plus(name=argname, scope=scope, autouse=autouse, params=argvalues, ids=ids,
**kwargs)(__param_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(caller_module, argname, if_name_exists=WARN, caller=param_fixture)
setattr(caller_module, argname, fix)
return fix
def get_caller_module(frame_offset=1):
# grab context from the caller frame
frame = _get_callerframe(offset=frame_offset)
return getmodule(frame)
class ExistingFixtureNameError(ValueError):
"""
Raised by `add_fixture_to_callers_module` when a fixture already exists in a module
"""
def __init__(self, module, name, caller):
self.module = module
self.name = name
self.caller = caller
def __str__(self):
return "Symbol `%s` already exists in module %s and therefore a corresponding fixture can not be created by " \
"`%s`" % (self.name, self.module, self.caller)
RAISE = 0
WARN = 1
CHANGE = 2
def check_name_available(module,
name, # type: str
if_name_exists=RAISE, # type: int
caller=None, # type: Callable[[Any], Any]
):
"""
Routine to
:param module:
:param name:
:param if_name_exists:
:param caller:
:return: a name that might be different if policy was CHANGE
"""
if name in dir(module):
if caller is None:
caller = ''
# Name already exists: act according to policy
if if_name_exists is RAISE:
raise ExistingFixtureNameError(module, name, caller)
elif if_name_exists is WARN:
warn("%s Overriding symbol %s in module %s" % (caller, name, module))
elif if_name_exists is CHANGE:
# find a non-used name in that module
i = 1
name2 = name + '_%s' % i
while name2 in dir(module):
i += 1
name2 = name + '_%s' % i
name = name2
else:
raise ValueError("invalid value for `if_name_exists`: %s" % if_name_exists)
return name
def param_fixtures(argnames, argvalues, autouse=False, ids=None, scope="function", **kwargs):
"""
Creates one or several "parameters" fixtures - depending on the number or coma-separated names in `argnames`. The
created fixtures are automatically registered into the callers' module, but you may wish to assign them to
variables for convenience. In that case make sure that you use the same names, e.g.
`p, q = param_fixtures('p,q', [(0, 1), (2, 3)])`.
Note that the (argnames, argvalues, ids) signature is similar to `@pytest.mark.parametrize` for consistency,
see https://docs.pytest.org/en/latest/reference.html?highlight=pytest.param#pytest-mark-parametrize
```python
import pytest
from pytest_cases import param_fixtures, param_fixture
# create a 2-tuple parameter fixture
arg1, arg2 = param_fixtures("arg1, arg2", [(1, 2), (3, 4)])
@pytest.fixture
def fixture_uses_param2(arg2):
...
def test_uses_param2(arg1, arg2, fixture_uses_param2):
...
```
:param argnames: same as `@pytest.mark.parametrize` `argnames`.
:param argvalues: same as `@pytest.mark.parametrize` `argvalues`.
:param autouse: see fixture `autouse`
:param ids: same as `@pytest.mark.parametrize` `ids`
:param scope: see fixture `scope`
:param kwargs: any other argument for the created 'fixtures'
:return: the created fixtures
"""
created_fixtures = []
argnames_lst = get_param_argnames_as_list(argnames)
caller_module = get_caller_module()
if len(argnames_lst) < 2:
return _param_fixture(caller_module, argnames, argvalues, autouse=autouse, ids=ids, scope=scope, **kwargs)
# create the root fixture that will contain all parameter values
# note: we sort the list so that the first in alphabetical order appears first. Indeed pytest uses this order.
root_fixture_name = "%s__param_fixtures_root" % ('_'.join(sorted(argnames_lst)))
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
root_fixture_name = check_name_available(caller_module, root_fixture_name, if_name_exists=CHANGE, caller=param_fixtures)
@pytest_fixture_plus(name=root_fixture_name, autouse=autouse, scope=scope, **kwargs)
@pytest.mark.parametrize(argnames, argvalues, ids=ids)
@with_signature("(%s)" % argnames)
def _root_fixture(**kwargs):
return tuple(kwargs[k] for k in argnames_lst)
# Override once again the symbol with the correct contents
setattr(caller_module, root_fixture_name, _root_fixture)
# finally create the sub-fixtures
for param_idx, argname in enumerate(argnames_lst):
# create the fixture
# To fix late binding issue with `param_idx` we add an extra layer of scope: a factory function
# See https://stackoverflow.com/questions/3431676/creating-functions-in-a-loop
def _create_fixture(param_idx):
@pytest_fixture_plus(name=argname, scope=scope, autouse=autouse, **kwargs)
@with_signature("(%s)" % root_fixture_name)
def _param_fixture(**kwargs):
params = kwargs.pop(root_fixture_name)
return params[param_idx]
return _param_fixture
# create it
fix = _create_fixture(param_idx)
# add to module
check_name_available(caller_module, argname, if_name_exists=WARN, caller=param_fixtures)
setattr(caller_module, argname, fix)
# collect to return the whole list eventually
created_fixtures.append(fix)
return created_fixtures
def _get_callerframe(offset=0):
# inspect.stack is extremely slow, the fastest is sys._getframe or inspect.currentframe().
# See https://gist.github.com/JettJones/c236494013f22723c1822126df944b12
# frame = sys._getframe(2 + offset)
frame = currentframe()
for _ in range(2 + offset):
frame = frame.f_back
return frame
@function_decorator
def cases_fixture(cases=None, # type: Union[Callable[[Any], Any], Iterable[Callable[[Any], Any]]]
module=None, # type: Union[ModuleType, Iterable[ModuleType]]
case_data_argname='case_data', # type: str
has_tag=None, # type: Any
filter=None, # type: Callable[[List[Any]], bool]
f=DECORATED,
**kwargs
):
"""
DEPRECATED - use double annotation `@pytest_fixture_plus` + `@cases_data` instead
```python
@pytest_fixture_plus
@cases_data(module=xxx)
def my_fixture(case_data)
```
Decorates a function so that it becomes a parametrized fixture.
The fixture will be automatically parametrized with all cases listed in module `module`, or with
all cases listed explicitly in `cases`.
Using it with a non-None `module` argument is equivalent to
* extracting all cases from `module`
* then decorating your function with @pytest.fixture(params=cases) with all the cases
So
```python
from pytest_cases import cases_fixture, CaseData
# import the module containing the test cases
import test_foo_cases
@cases_fixture(module=test_foo_cases)
def foo_fixture(case_data: CaseData):
...
```
is equivalent to:
```python
import pytest
from pytest_cases import get_all_cases, CaseData
# import the module containing the test cases
import test_foo_cases
# manually list the available cases
cases = get_all_cases(module=test_foo_cases)
# parametrize the fixture manually
@pytest.fixture(params=cases)
def foo_fixture(request):
case_data = request.param # type: CaseData
...
```
Parameters (cases, module, has_tag, filter) can be used to perform explicit listing, or filtering. See
`get_all_cases()` for details.
:param cases: a single case or a hardcoded list of cases to use. Only one of `cases` and `module` should be set.
:param module: a module or a hardcoded list of modules to use. You may use `THIS_MODULE` to indicate that the
module is the current one. Only one of `cases` and `module` should be set.
:param case_data_argname: the optional name of the function parameter that should receive the `CaseDataGetter`
object. Default is 'case_data'.
:param has_tag: an optional tag used to filter the cases. Only cases with the given tag will be selected. Only
cases with the given tag will be selected.
:param filter: an optional filtering function taking as an input a list of tags associated with a case, and
returning a boolean indicating if the case should be selected. It will be used to filter the cases in the
`module`. It both `has_tag` and `filter` are set, both will be applied in sequence.
:return:
"""
# apply @cases_data (that will translate to a @pytest.mark.parametrize)
parametrized_f = cases_data(cases=cases, module=module,
case_data_argname=case_data_argname, has_tag=has_tag, filter=filter)(f)
# apply @pytest_fixture_plus
return pytest_fixture_plus(**kwargs)(parametrized_f)
@function_decorator
def pytest_fixture_plus(scope="function",
autouse=False,
name=None,
unpack_into=None,
fixture_func=DECORATED,
**kwargs):
""" decorator to mark a fixture factory function.
Identical to `@pytest.fixture` decorator, except that
- it supports multi-parametrization with `@pytest.mark.parametrize` as requested in
https://github.com/pytest-dev/pytest/issues/3960. As a consequence it does not support the `params` and `ids`
arguments anymore.
- it supports a new argument `unpack_into` where you can provide names for fixtures where to unpack this fixture
into.
:param scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module" or "session".
:param autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:param name: the name of the fixture. This defaults to the name of the
decorated function. Note: If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
:param unpack_into: an optional iterable of names, or string containing coma-separated names, for additional
fixtures to create to represent parts of this fixture. See `unpack_fixture` for details.
:param kwargs: other keyword arguments for `@pytest.fixture`
"""
if name is not None:
# Compatibility for the 'name' argument
if LooseVersion(pytest.__version__) >= LooseVersion('3.0.0'):
# pytest version supports "name" keyword argument
kwargs['name'] = name
elif name is not None:
# 'name' argument is not supported in this old version, use the __name__ trick.
fixture_func.__name__ = name
# if unpacking is requested, do it first
if unpack_into is not None:
# get the future fixture name if needed
if name is None:
name = fixture_func.__name__
# get caller module to create the symbols
caller_module = get_caller_module(frame_offset=2)
_unpack_fixture(caller_module, unpack_into, name)
# (1) Collect all @pytest.mark.parametrize markers (including those created by usage of @cases_data)
parametrizer_marks = get_pytest_parametrize_marks(fixture_func)
if len(parametrizer_marks) < 1:
return _create_fixture_without_marks(fixture_func, scope, autouse, **kwargs)
else:
if 'params' in kwargs:
raise ValueError(
"With `pytest_fixture_plus` you cannot mix usage of the keyword argument `params` and of "
"the pytest.mark.parametrize marks")
# (2) create the huge "param" containing all params combined
# --loop (use the same order to get it right)
params_names_or_name_combinations = []
params_values = []
params_ids = []
params_marks = []
for pmark in parametrizer_marks:
# check number of parameter names in this parameterset
if len(pmark.param_names) < 1:
raise ValueError("Fixture function '%s' decorated with '@pytest_fixture_plus' has an empty parameter "
"name in a @pytest.mark.parametrize mark")
# remember
params_names_or_name_combinations.append(pmark.param_names)
# extract all parameters that have a specific configuration (pytest.param())
_pids, _pmarks, _pvalues = extract_parameterset_info(pmark.param_names, pmark)
# Create the proper id for each test
if pmark.param_ids is not None:
# overridden at global pytest.mark.parametrize level - this takes precedence.
try: # an explicit list of ids ?
paramids = list(pmark.param_ids)
except TypeError: # a callable to apply on the values
paramids = list(pmark.param_ids(v) for v in _pvalues)
else:
# default: values-based...
paramids = get_test_ids_from_param_values(pmark.param_names, _pvalues)
# ...but local pytest.param takes precedence
for i, _id in enumerate(_pids):
if _id is not None:
paramids[i] = _id
# Finally store the ids, marks, and values for this parameterset
params_ids.append(paramids)
params_marks.append(tuple(_pmarks))
params_values.append(tuple(_pvalues))
# (3) generate the ids and values, possibly reapplying marks
if len(params_names_or_name_combinations) == 1:
# we can simplify - that will be more readable
final_ids = params_ids[0]
final_marks = params_marks[0]
final_values = list(params_values[0])
# reapply the marks
for i, marks in enumerate(final_marks):
if marks is not None:
final_values[i] = make_marked_parameter_value(final_values[i], marks=marks)
else:
final_values = list(product(*params_values))
final_ids = get_test_ids_from_param_values(params_names_or_name_combinations, product(*params_ids))
final_marks = tuple(product(*params_marks))
# reapply the marks
for i, marks in enumerate(final_marks):
ms = [m for mm in marks if mm is not None for m in mm]
if len(ms) > 0:
final_values[i] = make_marked_parameter_value(final_values[i], marks=ms)
if len(final_values) != len(final_ids):
raise ValueError("Internal error related to fixture parametrization- please report")
# (4) wrap the fixture function so as to remove the parameter names and add 'request' if needed
all_param_names = tuple(v for l in params_names_or_name_combinations for v in l)
# --create the new signature that we want to expose to pytest
old_sig = signature(fixture_func)
for p in all_param_names:
if p not in old_sig.parameters:
raise ValueError("parameter '%s' not found in fixture signature '%s%s'"
"" % (p, fixture_func.__name__, old_sig))
new_sig = remove_signature_parameters(old_sig, *all_param_names)
# add request if needed
func_needs_request = 'request' in old_sig.parameters
if not func_needs_request:
new_sig = add_signature_parameters(new_sig, first=Parameter('request', kind=Parameter.POSITIONAL_OR_KEYWORD))
# --common routine used below. Fills kwargs with the appropriate names and values from fixture_params
def _get_arguments(*args, **kwargs):
request = kwargs['request'] if func_needs_request else kwargs.pop('request')
# populate the parameters
if len(params_names_or_name_combinations) == 1:
_params = [request.param] # remove the simplification
else:
_params = request.param
for p_names, fixture_param_value in zip(params_names_or_name_combinations, _params):
if len(p_names) == 1:
# a single parameter for that generated fixture (@pytest.mark.parametrize with a single name)
kwargs[p_names[0]] = fixture_param_value
else:
# several parameters for that generated fixture (@pytest.mark.parametrize with several names)
# unpack all of them and inject them in the kwargs
for old_p_name, old_p_value in zip(p_names, fixture_param_value):
kwargs[old_p_name] = old_p_value
return args, kwargs
# --Finally create the fixture function, a wrapper of user-provided fixture with the new signature
if not isgeneratorfunction(fixture_func):
# normal function with return statement
@wraps(fixture_func, new_sig=new_sig)
def wrapped_fixture_func(*args, **kwargs):
if not is_used_request(kwargs['request']):
return NOT_USED
else:
args, kwargs = _get_arguments(*args, **kwargs)
return fixture_func(*args, **kwargs)
# transform the created wrapper into a fixture
fixture_decorator = pytest.fixture(scope=scope, params=final_values, autouse=autouse, ids=final_ids, **kwargs)
return fixture_decorator(wrapped_fixture_func)
else:
# generator function (with a yield statement)
@wraps(fixture_func, new_sig=new_sig)
def wrapped_fixture_func(*args, **kwargs):
if not is_used_request(kwargs['request']):
yield NOT_USED
else:
args, kwargs = _get_arguments(*args, **kwargs)
for res in fixture_func(*args, **kwargs):
yield res
# transform the created wrapper into a fixture
fixture_decorator = yield_fixture(scope=scope, params=final_values, autouse=autouse, ids=final_ids, **kwargs)
return fixture_decorator(wrapped_fixture_func)
def _create_fixture_without_marks(fixture_func, scope, autouse, **kwargs):
"""
creates a fixture for decorated fixture function `fixture_func`.
:param fixture_func:
:param scope:
:param autouse:
:param kwargs:
:return:
"""
# IMPORTANT: even if 'params' is not in kwargs, the fixture
# can be used in a fixture union and therefore a param will be received
# on some calls (and the fixture will be called several times - only once for real)
# - we have to handle the NOT_USED.
# --create a wrapper where we will be able to auto-detect
# TODO we could put this in a dedicated wrapper 'ignore_unsused'..
old_sig = signature(fixture_func)
# add request if needed
func_needs_request = 'request' in old_sig.parameters
if not func_needs_request:
new_sig = add_signature_parameters(old_sig,
first=Parameter('request', kind=Parameter.POSITIONAL_OR_KEYWORD))
else:
new_sig = old_sig
if not isgeneratorfunction(fixture_func):
# normal function with return statement
@wraps(fixture_func, new_sig=new_sig)
def wrapped_fixture_func(*args, **kwargs):
request = kwargs['request'] if func_needs_request else kwargs.pop('request')
if is_used_request(request):
return fixture_func(*args, **kwargs)
else:
return NOT_USED
# transform the created wrapper into a fixture
fixture_decorator = pytest.fixture(scope=scope, autouse=autouse, **kwargs)
return fixture_decorator(wrapped_fixture_func)
else:
# generator function (with a yield statement)
@wraps(fixture_func, new_sig=new_sig)
def wrapped_fixture_func(*args, **kwargs):
request = kwargs['request'] if func_needs_request else kwargs.pop('request')
if is_used_request(request):
for res in fixture_func(*args, **kwargs):
yield res
else:
yield NOT_USED
# transform the created wrapper into a fixture
fixture_decorator = yield_fixture(scope=scope, autouse=autouse, **kwargs)
return fixture_decorator(wrapped_fixture_func)
class _NotUsed:
def __repr__(self):
return "pytest_cases.NOT_USED"
NOT_USED = _NotUsed()
"""Object representing a fixture value when the fixture is not used"""
class UnionFixtureAlternative(object):
"""A special class that should be used to wrap a fixture name"""
def __init__(self,
fixture_name,
idstyle # type: IdStyle
):
self.fixture_name = fixture_name
self.idstyle = idstyle
# def __str__(self):
# that is maybe too dangerous...
# return self.fixture_name
def __repr__(self):
return "UnionAlternative<%s, idstyle=%s>" % (self.fixture_name, self.idstyle)
@staticmethod
def to_list_of_fixture_names(alternatives_lst # type: List[UnionFixtureAlternative]
):
return [f.fixture_name for f in alternatives_lst]
class IdStyle(Enum):
"""
The enum defining all possible id styles.
"""
none = None
explicit = 'explicit'
compact = 'compact'
def apply_id_style(id, union_fixture_name, idstyle):
"""
Applies the id style defined in `idstyle` to the given id.
See https://github.com/smarie/python-pytest-cases/issues/41
:param id:
:param union_fixture_name:
:param idstyle:
:return:
"""
if idstyle is IdStyle.none:
return id
elif idstyle is IdStyle.explicit:
return "%s_is_%s" % (union_fixture_name, id)
elif idstyle is IdStyle.compact:
return "U%s" % id
else:
raise ValueError("Invalid id style")
class InvalidParamsList(Exception):
"""
Exception raised when users attempt to provide a non-iterable `argvalues` in pytest parametrize.
See https://docs.pytest.org/en/latest/reference.html#pytest-mark-parametrize-ref
"""
__slots__ = 'params',
def __init__(self, params):
self.params = params
def __str__(self):
return "Invalid parameters list (`argvalues`) in pytest parametrize: %s" % self.params
def is_fixture_union_params(params):
"""
Internal helper to quickly check if a bunch of parameters correspond to a union fixture.
:param params:
:return:
"""
try:
return len(params) >= 1 and isinstance(params[0], UnionFixtureAlternative)
except TypeError:
raise InvalidParamsList(params)
def is_used_request(request):
"""
Internal helper to check if a given request for fixture is active or not. Inactive fixtures
happen when a fixture is not used in the current branch of a UNION fixture.
This helper is used in all fixtures created in this module.
:param request:
:return:
"""
return getattr(request, 'param', None) is not NOT_USED
def fixture_alternative_to_str(fixture_alternative, # type: UnionFixtureAlternative
):
return fixture_alternative.fixture_name
def fixture_union(name,
fixtures,
scope="function",
idstyle='explicit',
ids=fixture_alternative_to_str,
unpack_into=None,
autouse=False,
**kwargs):
"""
Creates a fixture that will take all values of the provided fixtures in order. That fixture is automatically
registered into the callers' module, but you may wish to assign it to a variable for convenience. In that case
make sure that you use the same name, e.g. `a = fixture_union('a', ['b', 'c'])`
The style of test ids corresponding to the union alternatives can be changed with `idstyle`. Three values are
allowed:
- `'explicit'` (default) favors readability,
- `'compact'` adds a small mark so that at least one sees which parameters are union parameters and which others
are normal parameters,
- `None` does not change the ids.
:param name: the name of the fixture to create
:param fixtures: an array-like containing fixture names and/or fixture symbols
:param scope: the scope of the union. Since the union depends on the sub-fixtures, it should be smaller than the
smallest scope of fixtures referenced.
:param idstyle: The style of test ids corresponding to the union alternatives. One of `'explicit'` (default),
`'compact'`, or `None`.
:param ids: as in pytest. The default value returns the correct fixture
:param unpack_into: an optional iterable of names, or string containing coma-separated names, for additional
fixtures to create to represent parts of this fixture. See `unpack_fixture` for details.
:param autouse: as in pytest
:param kwargs: other pytest fixture options. They might not be supported correctly.
:return: the new fixture. Note: you do not need to capture that output in a symbol, since the fixture is
automatically registered in your module. However if you decide to do so make sure that you use the same name.
"""
caller_module = get_caller_module()
return _fixture_union(caller_module, name, fixtures, scope=scope, idstyle=idstyle, ids=ids, autouse=autouse,
unpack_into=unpack_into, **kwargs)
def _fixture_union(caller_module, name, fixtures, idstyle, scope="function", ids=fixture_alternative_to_str,
unpack_into=None, autouse=False, **kwargs):
"""
Internal implementation for fixture_union
:param caller_module:
:param name:
:param fixtures:
:param idstyle:
:param scope:
:param ids:
:param unpack_into:
:param autouse:
:param kwargs:
:return:
"""
# test the `fixtures` argument to avoid common mistakes
if not isinstance(fixtures, (tuple, set, list)):
raise TypeError("fixture_union: the `fixtures` argument should be a tuple, set or list")
# validate the idstyle
idstyle = IdStyle(idstyle)
# first get all required fixture names
f_names = []
for f in fixtures:
# possibly get the fixture name if the fixture symbol was provided
f_names.append(get_fixture_name(f) if not isinstance(f, str) else f)
if len(f_names) < 1:
raise ValueError("Empty fixture unions are not permitted")
# then generate the body of our union fixture. It will require all of its dependent fixtures and receive as
# a parameter the name of the fixture to use
@with_signature("(%s, request)" % ', '.join(f_names))
def _new_fixture(request, **all_fixtures):
if not is_used_request(request):
return NOT_USED
else:
alternative = request.param
if isinstance(alternative, UnionFixtureAlternative):
fixture_to_use = alternative.fixture_name
return all_fixtures[fixture_to_use]
else:
raise TypeError("Union Fixture %s received invalid parameter type: %s. Please report this issue."
"" % (name, alternative.__class__))
_new_fixture.__name__ = name
# finally create the fixture per se.
# WARNING we do not use pytest.fixture but pytest_fixture_plus so that NOT_USED is discarded
f_decorator = pytest_fixture_plus(scope=scope,
params=[UnionFixtureAlternative(_name, idstyle) for _name in f_names],
autouse=autouse, ids=ids, **kwargs)
fix = f_decorator(_new_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(caller_module, name, if_name_exists=WARN, caller=param_fixture)
setattr(caller_module, name, fix)
# if unpacking is requested, do it here
if unpack_into is not None:
_unpack_fixture(caller_module, argnames=unpack_into, fixture=name)
return fix
def _fixture_product(caller_module, name, fixtures_or_values, fixture_positions,
scope="function", ids=fixture_alternative_to_str,
unpack_into=None, autouse=False, **kwargs):
"""
Internal implementation for fixture products created by pytest parametrize plus.
:param caller_module:
:param name:
:param fixtures_or_values:
:param fixture_positions:
:param idstyle:
:param scope:
:param ids:
:param unpack_into:
:param autouse:
:param kwargs:
:return:
"""
# test the `fixtures` argument to avoid common mistakes
if not isinstance(fixtures_or_values, (tuple, set, list)):
raise TypeError("fixture_product: the `fixtures_or_values` argument should be a tuple, set or list")
_tuple_size = len(fixtures_or_values)
# first get all required fixture names
f_names = [None] * _tuple_size
for f_pos in fixture_positions:
# possibly get the fixture name if the fixture symbol was provided
f = fixtures_or_values[f_pos]
# and remember the position in the tuple
f_names[f_pos] = get_fixture_name(f) if not isinstance(f, str) else f
# remove duplicates by making it an ordered set
all_names = remove_duplicates((n for n in f_names if n is not None))
if len(all_names) < 1:
raise ValueError("Empty fixture products are not permitted")
def _tuple_generator(all_fixtures):
for i in range(_tuple_size):
fix_at_pos_i = f_names[i]
if fix_at_pos_i is None:
# fixed value
yield fixtures_or_values[i]
else:
# fixture value
yield all_fixtures[fix_at_pos_i]
# then generate the body of our product fixture. It will require all of its dependent fixtures
@with_signature("(%s)" % ', '.join(all_names))
def _new_fixture(**all_fixtures):
return tuple(_tuple_generator(all_fixtures))
_new_fixture.__name__ = name
# finally create the fixture per se.
# WARNING we do not use pytest.fixture but pytest_fixture_plus so that NOT_USED is discarded
f_decorator = pytest_fixture_plus(scope=scope, autouse=autouse, ids=ids, **kwargs)
fix = f_decorator(_new_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(caller_module, name, if_name_exists=WARN, caller=param_fixture)
setattr(caller_module, name, fix)
# if unpacking is requested, do it here
if unpack_into is not None:
_unpack_fixture(caller_module, argnames=unpack_into, fixture=name)
return fix
class fixture_ref:
"""
A reference to a fixture, to be used in `pytest_parametrize_plus`.
You can create it from a fixture name or a fixture object (function).
"""
__slots__ = 'fixture',
def __init__(self, fixture):
self.fixture = fixture
def pytest_parametrize_plus(argnames, argvalues, indirect=False, ids=None, scope=None, **kwargs):
"""
Equivalent to `@pytest.mark.parametrize` but also supports the fact that in argvalues one can include references to
fixtures with `fixture_ref(<fixture>)` where <fixture> can be the fixture name or fixture function.
When such a fixture reference is detected in the argvalues, a new function-scope fixture will be created with a
unique name, and the test function will be wrapped so as to be injected with the correct parameters. Special test
ids will be created to illustrate the switching between normal parameters and fixtures.
:param argnames:
:param argvalues:
:param indirect:
:param ids:
:param scope:
:param kwargs:
:return:
"""
# make sure that we do not destroy the argvalues if it is provided as an iterator
try:
argvalues = list(argvalues)
except TypeError:
raise InvalidParamsList(argvalues)
# get the param names
all_param_names = get_param_argnames_as_list(argnames)
nb_params = len(all_param_names)
# find if there are fixture references in the values provided
fixture_indices = []
if nb_params == 1:
for i, v in enumerate(argvalues):
if isinstance(v, fixture_ref):
fixture_indices.append((i, None))
elif nb_params > 1:
for i, v in enumerate(argvalues):
try:
j = 0
fix_pos = []
for j, _pval in enumerate(v):
if isinstance(_pval, fixture_ref):
fix_pos.append(j)
if len(fix_pos) > 0:
fixture_indices.append((i, fix_pos))
if j+1 != nb_params:
raise ValueError("Invalid parameter values containing %s items while the number of parameters is %s: "
"%s." % (j+1, nb_params, v))
except TypeError:
# a fixture ref is
if isinstance(v, fixture_ref):
fixture_indices.append((i, None))
else:
raise ValueError(
"Invalid parameter values containing %s items while the number of parameters is %s: "
"%s." % (1, nb_params, v))
if len(fixture_indices) == 0:
# no fixture reference: do as usual
return pytest.mark.parametrize(argnames, argvalues, indirect=indirect, ids=ids, scope=scope, **kwargs)
else:
# there are fixture references: we have to create a specific decorator
caller_module = get_caller_module()
def _create_param_fixture(from_i, to_i, p_fix_name):
""" Routine that will be used to create a parameter fixture for argvalues between prev_i and i"""
selected_argvalues = argvalues[from_i:to_i]
try:
# an explicit list of ids
selected_ids = ids[from_i:to_i]
except TypeError:
# a callable to create the ids
selected_ids = ids
# default behaviour is not the same betwee pytest params and pytest fixtures
if selected_ids is None:
# selected_ids = ['-'.join([str(_v) for _v in v]) for v in selected_argvalues]
selected_ids = get_test_ids_from_param_values(all_param_names, selected_argvalues)
if to_i == from_i + 1:
p_fix_name = "%s_is_%s" % (p_fix_name, from_i)
else:
p_fix_name = "%s_is_%sto%s" % (p_fix_name, from_i, to_i - 1)
p_fix_name = check_name_available(caller_module, p_fix_name, if_name_exists=CHANGE,
caller=pytest_parametrize_plus)
param_fix = _param_fixture(caller_module, argname=p_fix_name, argvalues=selected_argvalues,
ids=selected_ids)
return param_fix
def _create_fixture_product(argvalue_i, fixture_ref_positions, base_name):
# do not use base name - we dont care if there is another in the same module, it will still be more readable
p_fix_name = "fixtureproduct__%s" % (argvalue_i, )
p_fix_name = check_name_available(caller_module, p_fix_name, if_name_exists=CHANGE,
caller=pytest_parametrize_plus)
# unpack the fixture references
_vtuple = argvalues[argvalue_i]
fixtures_or_values = tuple(v.fixture if i in fixture_ref_positions else v for i, v in enumerate(_vtuple))
product_fix = _fixture_product(caller_module, p_fix_name, fixtures_or_values, fixture_ref_positions)
return product_fix
# then create the decorator
def parametrize_plus_decorate(test_func):
"""
A decorator that wraps the test function so that instead of receiving the parameter names, it receives the
new fixture. All other decorations are unchanged.
:param test_func:
:return:
"""
# first check if the test function has the parameters as arguments
old_sig = signature(test_func)
for p in all_param_names:
if p not in old_sig.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func.__name__, old_sig))
# The base name for all fixtures that will be created below
# style_template = "%s_param__%s"
style_template = "%s_%s"
base_name = style_template % (test_func.__name__, argnames.replace(' ', '').replace(',', '_'))
base_name = check_name_available(caller_module, base_name, if_name_exists=CHANGE, caller=pytest_parametrize_plus)
# Retrieve (if ref) or create (for normal argvalues) the fixtures that we will union
# TODO important note: we could either wish to create one fixture for parameter value or to create one for
# each consecutive group as shown below. This should not lead to different results but perf might differ.
# maybe add a parameter in the signature so that users can test it ?
fixtures_to_union = []
fixtures_to_union_names_for_ids = []
prev_i = -1
for i, j_list in fixture_indices:
if i > prev_i + 1:
# there was a non-empty group of 'normal' parameters before this fixture_ref.
# create a new fixture parametrized with all of that consecutive group.
param_fix = _create_param_fixture(prev_i + 1, i, base_name)
fixtures_to_union.append(param_fix)
fixtures_to_union_names_for_ids.append(get_fixture_name(param_fix))
if j_list is None:
# add the fixture referenced with `fixture_ref`
referenced_fixture = argvalues[i].fixture
fixtures_to_union.append(referenced_fixture)
id_for_fixture = apply_id_style(get_fixture_name(referenced_fixture), base_name, IdStyle.explicit)
fixtures_to_union_names_for_ids.append(id_for_fixture)
else:
# create a fixture refering to all the fixtures required in the tuple
prod_fix = _create_fixture_product(i, j_list, base_name)
fixtures_to_union.append(prod_fix)
id_for_fixture = apply_id_style(get_fixture_name(prod_fix), base_name, IdStyle.explicit)
fixtures_to_union_names_for_ids.append(id_for_fixture)
prev_i = i
# handle last consecutive group of normal parameters, if any
i = len(argvalues)
if i > prev_i + 1:
param_fix = _create_param_fixture(prev_i + 1, i, base_name)
fixtures_to_union.append(param_fix)
fixtures_to_union_names_for_ids.append(get_fixture_name(param_fix))
# Finally create a "main" fixture with a unique name for this test function
# note: the function automatically registers it in the module
# note 2: idstyle is set to None because we provide an explicit enough list of ids
big_param_fixture = _fixture_union(caller_module, base_name, fixtures_to_union, idstyle=None,
ids=fixtures_to_union_names_for_ids)
# --create the new test function's signature that we want to expose to pytest
# it is the same than existing, except that we want to replace all parameters with the new fixture
new_sig = remove_signature_parameters(old_sig, *all_param_names)
new_sig = add_signature_parameters(new_sig, Parameter(base_name, kind=Parameter.POSITIONAL_OR_KEYWORD))
# --Finally create the fixture function, a wrapper of user-provided fixture with the new signature
def replace_paramfixture_with_values(kwargs):
# remove the created fixture value
encompassing_fixture = kwargs.pop(base_name)
# and add instead the parameter values
if nb_params > 1:
for i, p in enumerate(all_param_names):
kwargs[p] = encompassing_fixture[i]
else:
kwargs[all_param_names[0]] = encompassing_fixture
# return
return kwargs
if not isgeneratorfunction(test_func):
# normal test function with return statement
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs):
if kwargs.get(base_name, None) is NOT_USED:
return NOT_USED
else:
replace_paramfixture_with_values(kwargs)
return test_func(*args, **kwargs)
else:
# generator test function (with one or several yield statement)
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs):
if kwargs.get(base_name, None) is NOT_USED:
yield NOT_USED
else:
replace_paramfixture_with_values(kwargs)
for res in test_func(*args, **kwargs):
yield res
# move all pytest marks from the test function to the wrapper
# not needed because the __dict__ is automatically copied when we use @wraps
# move_all_pytest_marks(test_func, wrapped_test_func)
# With this hack we will be ordered correctly by pytest https://github.com/pytest-dev/pytest/issues/4429
wrapped_test_func.place_as = test_func
# return the new test function
return wrapped_test_func
return parametrize_plus_decorate
| 7,116 | -6 | 810 |
da8d8071d750b685956d79ac77bbc0a8b708951e | 5,257 | py | Python | nb/toxin.py | pgniewko/deep-toxin | fa61b06405749e5de7d74eedadb5de7c67981471 | [
"BSD-3-Clause"
] | 1 | 2020-08-20T07:49:10.000Z | 2020-08-20T07:49:10.000Z | nb/toxin.py | pgniewko/deep-toxin | fa61b06405749e5de7d74eedadb5de7c67981471 | [
"BSD-3-Clause"
] | null | null | null | nb/toxin.py | pgniewko/deep-toxin | fa61b06405749e5de7d74eedadb5de7c67981471 | [
"BSD-3-Clause"
] | null | null | null | from pydpi.pypro import PyPro
import logging
AA_MODIFICATIONS = {
"Benzoylphenylalanine": "F",
"C-term amidation": "",
"Sulfotyrosine": "Y",
"4-Hydroxyproline": "P",
"Pyroglutamic acid": "E",
"Gamma carboxylic glutamic acid": "E",
"Any": "G",
"D-leucine": "L",
"D-phenylalanine": "F",
"D-methionine": "M",
"D-tryptophan": "W",
"D-tyrosine": "Y",
"Bromotryptophan": "W",
"glycosylated serine": "S",
"2_2-dimethylthiazolidine": "G",
"glycosylated threonine": "T",
"Oxomethionine": "M",
"Selenocystine (half)": "C",
"gamma-hydroxy-D-valine": "V",
"5-hydroxy-lysine": "K",
"Norleucine": "L",
"N-Acetate (on N-terminus)": "",
"3-iodotyrosine": "Y",
"5-amino-3-oxo-pentanoic acid": "G",
"2-amino-DL-dodecanoic acid": "G",
"Carbabridge [C2 unsaturated] (half)": "G",
"alpha-aminobutyric acid": "G",
"Asymmetric dimethylarginine": "R",
"4-(R)-amino-proline": "P",
"4-(S)-amino-proline": "P",
"4-(R)-guanidino-proline": "P",
"4-(R)-betainamidyl-proline": "P",
"4-(R)-fluoro-proline": "P",
"4-(S)-fluoro-proline": "P",
"4-(R)-phenyl-proline": "P",
"4-(S)-phenyl-proline": "P",
"4-(R)-benzyl-proline": "P",
"4-(S)-benzyl-proline": "P",
"4-(R)-1-naphtylmehyl-proline": "P",
"4-(S)-1-naphtylmehyl-proline": "P",
"3-(R)-phenyl-proline": "P",
"3-(S)-phenyl-proline": "P",
"5-(R)-phenyl-proline": "P",
"5-(S)-phenyl-proline": "P",
"Diiodotyrosine": "Y",
"D-alanine": "A",
"Carbabridge [C4 unsaturated] (half)": "G",
"Carbabridge [C4 saturated] (half)": "G",
"Carbabridge [C7 unsaturated] (half)": "G",
" L-4,5-dithiolnorvaline": "V",
}
| 27.814815 | 141 | 0.561917 | from pydpi.pypro import PyPro
import logging
AA_MODIFICATIONS = {
"Benzoylphenylalanine": "F",
"C-term amidation": "",
"Sulfotyrosine": "Y",
"4-Hydroxyproline": "P",
"Pyroglutamic acid": "E",
"Gamma carboxylic glutamic acid": "E",
"Any": "G",
"D-leucine": "L",
"D-phenylalanine": "F",
"D-methionine": "M",
"D-tryptophan": "W",
"D-tyrosine": "Y",
"Bromotryptophan": "W",
"glycosylated serine": "S",
"2_2-dimethylthiazolidine": "G",
"glycosylated threonine": "T",
"Oxomethionine": "M",
"Selenocystine (half)": "C",
"gamma-hydroxy-D-valine": "V",
"5-hydroxy-lysine": "K",
"Norleucine": "L",
"N-Acetate (on N-terminus)": "",
"3-iodotyrosine": "Y",
"5-amino-3-oxo-pentanoic acid": "G",
"2-amino-DL-dodecanoic acid": "G",
"Carbabridge [C2 unsaturated] (half)": "G",
"alpha-aminobutyric acid": "G",
"Asymmetric dimethylarginine": "R",
"4-(R)-amino-proline": "P",
"4-(S)-amino-proline": "P",
"4-(R)-guanidino-proline": "P",
"4-(R)-betainamidyl-proline": "P",
"4-(R)-fluoro-proline": "P",
"4-(S)-fluoro-proline": "P",
"4-(R)-phenyl-proline": "P",
"4-(S)-phenyl-proline": "P",
"4-(R)-benzyl-proline": "P",
"4-(S)-benzyl-proline": "P",
"4-(R)-1-naphtylmehyl-proline": "P",
"4-(S)-1-naphtylmehyl-proline": "P",
"3-(R)-phenyl-proline": "P",
"3-(S)-phenyl-proline": "P",
"5-(R)-phenyl-proline": "P",
"5-(S)-phenyl-proline": "P",
"Diiodotyrosine": "Y",
"D-alanine": "A",
"Carbabridge [C4 unsaturated] (half)": "G",
"Carbabridge [C4 saturated] (half)": "G",
"Carbabridge [C7 unsaturated] (half)": "G",
" L-4,5-dithiolnorvaline": "V",
}
class Toxin:
def __init__(
self,
pid,
seq,
name,
toxin_class,
organism,
geneSuperfamily,
cysteineFramewrok,
pharmacologicalFamily,
isoelecticPoint,
clean_seq=True,
):
if pid is None:
logging.debug("Protein id given: None")
self.pid = pid
self.seq = seq
self.name = name
self.toxin_class = toxin_class
self.organism = organism
self.geneSuperfamily = geneSuperfamily
self.cysteineFramewrok = cysteineFramewrok
self.pharmacologicalFamily = pharmacologicalFamily
self.isoelecticPoint = isoelecticPoint
self.clean_seq = clean_seq
self.features = None
self.modifications = []
def get_pid(self):
return self.pid
def get_organism(self):
return self.organism
def get_seq(self):
return self.seq
def get_pharmacologicalFamily(self):
return self.pharmacologicalFamily
def get_features(self):
if self.features is not None:
return self.features
else:
self._calc_features()
return self.features
return None
def add_modification(self, mod):
self.modifications.append(mod)
def _calc_features(self, long_feats=False):
cds = PyPro()
if self.clean_seq:
self._clean_seq()
cds.ReadProteinSequence(self.seq)
try:
all_feats = cds.GetALL()
tpc_feats = cds.GetTPComp()
except ZeroDivisionError as e:
logging.warning(self.seq)
raise
all_feats_list = list(all_feats.values())
if long_feats:
tpc_feats_list = list(tpc_feats.values())
else:
tpc_feats_list = []
self.features = all_feats_list + tpc_feats_list
return self.features
def _clean_seq(self):
"""For sequences that contain non-standard residues, the non-standard
residues is replaced by their parent amino acids.
In cases where no parent amino acids were available,
these residues were either deleted or replaced by a glycine residue."""
if self.seq is None:
return
seq_list = list(self.seq)
for mod in self.modifications:
position = int(mod["position"]) - 1
name = mod["name"]
orig_aa = AA_MODIFICATIONS[name]
try:
seq_list[position] = orig_aa
except IndexError:
continue
self.seq = self.seq.replace("X", "")
def __len__(self):
if self.seq is not None:
return len(self.seq)
else:
return 0
def __str__(self):
if self.seq is not None:
out_string = f"{self.pid}\n{self.toxin_class}\n{self.geneSuperfamily}\n{self.organism}\n{self.seq}\n{self.pharmacologicalFamily}"
return out_string
else:
return None
def __eq__(self, pid):
"""Are pids the same"""
if self.pid == pid:
return True
else:
return False
def copy(self):
return Toxin(
self.pid,
self.seq,
self.name,
self.toxin_class,
self.organism,
self.geneSuperfamily,
self.cysteineFramewrok,
self.pharmacologicalFamily,
self.isoelecticPoint,
clean_seq=self.clean_seq,
)
| 2,358 | 1,154 | 23 |
c068615ba5c8b41d1a83e195bf09fa87e4327bd6 | 247 | py | Python | exercicios/ex029.py | RaquelBotelhoof/Python-curso-em-video | 919b2f44e85647c096c6b734c991635f1bfd1af9 | [
"MIT"
] | null | null | null | exercicios/ex029.py | RaquelBotelhoof/Python-curso-em-video | 919b2f44e85647c096c6b734c991635f1bfd1af9 | [
"MIT"
] | null | null | null | exercicios/ex029.py | RaquelBotelhoof/Python-curso-em-video | 919b2f44e85647c096c6b734c991635f1bfd1af9 | [
"MIT"
] | null | null | null | v = int(input('Digite a velocidade do carro: '))
if v<=80:
print('Dirija com segurança. Boa viagem.')
else:
print('Você foi multado por exeder o limite de 80km/h.')
m = (v - 80) * 7
print('A multa vai custar {:.2f} reais'.format(m)) | 35.285714 | 60 | 0.62753 | v = int(input('Digite a velocidade do carro: '))
if v<=80:
print('Dirija com segurança. Boa viagem.')
else:
print('Você foi multado por exeder o limite de 80km/h.')
m = (v - 80) * 7
print('A multa vai custar {:.2f} reais'.format(m)) | 0 | 0 | 0 |