hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2acf5cc827236e1632b4ab5f89604160d95afc87 | 102 | py | Python | yucheng_ner/__init__.py | 131250208/TPLinkerNER | ef7c9a4d9a3324f7a4e4a9f11727367c5ca4e4c0 | [
"MIT"
] | 1 | 2020-11-19T13:02:34.000Z | 2020-11-19T13:02:34.000Z | yucheng_ner/__init__.py | 131250208/TPLinkerNER | ef7c9a4d9a3324f7a4e4a9f11727367c5ca4e4c0 | [
"MIT"
] | null | null | null | yucheng_ner/__init__.py | 131250208/TPLinkerNER | ef7c9a4d9a3324f7a4e4a9f11727367c5ca4e4c0 | [
"MIT"
] | null | null | null | from yucheng_ner.tplinker_ner import tplinker_ner
from yucheng_ner.ner_common import components, utils | 51 | 52 | 0.892157 | from yucheng_ner.tplinker_ner import tplinker_ner
from yucheng_ner.ner_common import components, utils | 0 | 0 | 0 |
2e03545e72140c8b2d853118bc5b211a21e8896e | 1,742 | py | Python | misc/zmqsnoop.py | brgirgis/pyzmqrpc3 | a93339f98686e7f695f7c8a19dac198e4fc56aab | [
"MIT"
] | null | null | null | misc/zmqsnoop.py | brgirgis/pyzmqrpc3 | a93339f98686e7f695f7c8a19dac198e4fc56aab | [
"MIT"
] | null | null | null | misc/zmqsnoop.py | brgirgis/pyzmqrpc3 | a93339f98686e7f695f7c8a19dac198e4fc56aab | [
"MIT"
] | null | null | null |
'''
Created on Apr 2014
Edited on Oct 2020
@author: Jan Verhoeven
@author: Bassem Girgis
@copyright: MIT license, see http://opensource.org/licenses/MIT
'''
import argparse
import signal
import sys
from typing import Optional, Tuple
import zmq
# Handle OS signals (like keyboard interrupt)
signal.signal(signal.SIGINT, _signal_handler)
if __name__ == '__main__':
sys.exit(main())
| 21.775 | 73 | 0.619977 |
'''
Created on Apr 2014
Edited on Oct 2020
@author: Jan Verhoeven
@author: Bassem Girgis
@copyright: MIT license, see http://opensource.org/licenses/MIT
'''
import argparse
import signal
import sys
from typing import Optional, Tuple
import zmq
# Handle OS signals (like keyboard interrupt)
def _signal_handler(_, __):
print('Ctrl+C detected. Exiting...')
sys.exit(0)
signal.signal(signal.SIGINT, _signal_handler)
def _get_args(args) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='Reads and prints messages from a remote pub socket.'
)
parser.add_argument(
'--sub',
nargs='+',
required=True,
help='The PUB endpoint',
)
return parser.parse_args(args)
def main(args: Optional[Tuple[str]] = None) -> int:
p_args = _get_args(args)
print('Starting zmqsnoop...')
try:
context = zmq.Context()
# Subscribe to all provided end-points
sub_socket = context.socket(zmq.SUB)
sub_socket.setsockopt(zmq.SUBSCRIBE, b'')
for sub in p_args.sub:
sub_socket.connect(sub)
print('Connected to {0}'.format(sub))
while True:
# Process all parts of the message
try:
message_lines = sub_socket.recv_string().splitlines()
except Exception as e:
print('Error occurred with exception {0}'.format(e))
for line in message_lines:
print('>' + line)
except Exception as e:
print('Connection error {0}'.format(e))
# Never gets here, but close anyway
sub_socket.close()
print('Exiting zmqsnoop...')
return 0
if __name__ == '__main__':
sys.exit(main())
| 1,275 | 0 | 68 |
9b5f2a4b67b1ee2b3bd4b1a97fea1b31e9003769 | 2,038 | py | Python | T28_batch_ui.py | NathanMacDiarmid/ECOR-1051-Project | fae9e274ef57a29af511131908dcfb85e791af9a | [
"Unlicense"
] | null | null | null | T28_batch_ui.py | NathanMacDiarmid/ECOR-1051-Project | fae9e274ef57a29af511131908dcfb85e791af9a | [
"Unlicense"
] | null | null | null | T28_batch_ui.py | NathanMacDiarmid/ECOR-1051-Project | fae9e274ef57a29af511131908dcfb85e791af9a | [
"Unlicense"
] | null | null | null | # Submitted April 2, 2020
# Team 28:
# Nathan MacDiarmid 101098993
# Anita Ntomchukwu 101138391
# Sam Hurd 101146639
# Yahya Shah 101169280
# MILESTONE 3
# IMPORTS
from T28_image_filters import *
from Cimpl import *
# DEFINITIONS
def execute_filter(command: tuple) -> Image:
"""
Returns an image with the filters applied that are found in the batch file.
>>>execute_filter('image.jpg', 'test1.jpg', 'T')
image.jpg is saved as test1.jpg with the sepia filter applied
"""
input_filename, output_filename, filters = command
if filters == 'X':
image = extreme_contrast((input_filename))
return image
elif filters == 'T':
image = sepia((input_filename))
return image
elif filters == 'P':
image = posterize((input_filename))
return image
elif filters == 'V':
image = flip_vertical((input_filename))
return image
elif filters == 'H':
image = flip_horizontal((input_filename))
return image
elif filters == '2':
col1 = 'yellow'
col2 = 'cyan'
image = two_tone((input_filename), col1, col2)
return image
elif filters == '3':
col1 = 'yellow'
col2 = 'magenta'
col3 = 'cyan'
image = three_tone((input_filename), col1, col2, col3)
return image
elif filters == 'E':
image = detect_edges((input_filename), 10)
return image
elif filters == 'I':
image = detect_edges_better((input_filename), 10)
return image
# SCRIPTING
filename = input("Please input the name of the batch file: ")
batch_file = open(filename, 'r')
i = 0
count = len(open(filename).readlines())
newlist = [0] * count
for line in batch_file:
newline = line.split()
newlist[i] = tuple(newline)
i += 1
for x in newlist:
lenght = len(x)
i = 2
image = load_image(x[0])
while i < lenght:
image = execute_filter((image, x[1], x[i]))
i += 1
save_as(image, x[1])
batch_file.close()
| 22.395604 | 79 | 0.609421 | # Submitted April 2, 2020
# Team 28:
# Nathan MacDiarmid 101098993
# Anita Ntomchukwu 101138391
# Sam Hurd 101146639
# Yahya Shah 101169280
# MILESTONE 3
# IMPORTS
from T28_image_filters import *
from Cimpl import *
# DEFINITIONS
def execute_filter(command: tuple) -> Image:
"""
Returns an image with the filters applied that are found in the batch file.
>>>execute_filter('image.jpg', 'test1.jpg', 'T')
image.jpg is saved as test1.jpg with the sepia filter applied
"""
input_filename, output_filename, filters = command
if filters == 'X':
image = extreme_contrast((input_filename))
return image
elif filters == 'T':
image = sepia((input_filename))
return image
elif filters == 'P':
image = posterize((input_filename))
return image
elif filters == 'V':
image = flip_vertical((input_filename))
return image
elif filters == 'H':
image = flip_horizontal((input_filename))
return image
elif filters == '2':
col1 = 'yellow'
col2 = 'cyan'
image = two_tone((input_filename), col1, col2)
return image
elif filters == '3':
col1 = 'yellow'
col2 = 'magenta'
col3 = 'cyan'
image = three_tone((input_filename), col1, col2, col3)
return image
elif filters == 'E':
image = detect_edges((input_filename), 10)
return image
elif filters == 'I':
image = detect_edges_better((input_filename), 10)
return image
# SCRIPTING
filename = input("Please input the name of the batch file: ")
batch_file = open(filename, 'r')
i = 0
count = len(open(filename).readlines())
newlist = [0] * count
for line in batch_file:
newline = line.split()
newlist[i] = tuple(newline)
i += 1
for x in newlist:
lenght = len(x)
i = 2
image = load_image(x[0])
while i < lenght:
image = execute_filter((image, x[1], x[i]))
i += 1
save_as(image, x[1])
batch_file.close()
| 0 | 0 | 0 |
88dc25a7bff37aeba8e20d34161d9fc923acd8ac | 544 | py | Python | python/p003.py | livioribeiro/project-euler | 71f915b1ddad90c3a5b805cad7047cd6e4ce64ed | [
"MIT"
] | 2 | 2015-12-16T18:39:23.000Z | 2015-12-19T03:49:07.000Z | python/p003.py | livioribeiro/project-euler | 71f915b1ddad90c3a5b805cad7047cd6e4ce64ed | [
"MIT"
] | null | null | null | python/p003.py | livioribeiro/project-euler | 71f915b1ddad90c3a5b805cad7047cd6e4ce64ed | [
"MIT"
] | null | null | null | """
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
import math
INPUT = 600851475143
if __name__ == '__main__':
for i in range(math.ceil(math.sqrt(INPUT)), 1, -2):
if INPUT % i == 0 and is_prime(i):
print(i)
break
| 18.758621 | 60 | 0.558824 | """
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
import math
def is_prime(num):
if num <= 2:
return True
if num % 2 == 0:
return False
for i in range(3, math.ceil(math.sqrt(num)) + 1, 2):
if num % i == 0:
return False
return True
INPUT = 600851475143
if __name__ == '__main__':
for i in range(math.ceil(math.sqrt(INPUT)), 1, -2):
if INPUT % i == 0 and is_prime(i):
print(i)
break
| 202 | 0 | 23 |
cbbe64b49ba07940c302c5be1b82d33d7e5ea708 | 3,841 | py | Python | gpn/models/matern_ggp.py | WodkaRHR/Graph-Posterior-Network | 139e7c45c37324c9286e0cca60360a4978b3f411 | [
"MIT"
] | 23 | 2021-11-16T01:31:55.000Z | 2022-03-04T05:49:03.000Z | gpn/models/matern_ggp.py | WodkaRHR/Graph-Posterior-Network | 139e7c45c37324c9286e0cca60360a4978b3f411 | [
"MIT"
] | 1 | 2021-12-17T01:25:16.000Z | 2021-12-20T10:38:30.000Z | gpn/models/matern_ggp.py | WodkaRHR/Graph-Posterior-Network | 139e7c45c37324c9286e0cca60360a4978b3f411 | [
"MIT"
] | 7 | 2021-12-03T11:13:44.000Z | 2022-02-06T03:12:10.000Z | from typing import Tuple
import torch
import os
import tensorflow as tf
import networkx as nx
import scipy as sp
import numpy as np
import torch_geometric.utils as tu
from torch_geometric.data import Data
import gpflow
from gpn.utils import ModelConfiguration
from .gpflow_gpp import GPFLOWGGP
from .matern_ggp_utils import GPInducingVariables, GraphMaternKernel, optimize_SVGP
gpflow.config.set_default_float(tf.float64)
gpflow.config.set_default_summary_fmt("notebook")
tf.get_logger().setLevel('ERROR')
class MaternGGP(GPFLOWGGP):
"""model wrapping MaternGGP into our pipeline
code taken from https://github.com/spbu-math-cs/Graph-Gaussian-Processes
"""
| 36.580952 | 106 | 0.660505 | from typing import Tuple
import torch
import os
import tensorflow as tf
import networkx as nx
import scipy as sp
import numpy as np
import torch_geometric.utils as tu
from torch_geometric.data import Data
import gpflow
from gpn.utils import ModelConfiguration
from .gpflow_gpp import GPFLOWGGP
from .matern_ggp_utils import GPInducingVariables, GraphMaternKernel, optimize_SVGP
gpflow.config.set_default_float(tf.float64)
gpflow.config.set_default_summary_fmt("notebook")
tf.get_logger().setLevel('ERROR')
class MaternGGP(GPFLOWGGP):
"""model wrapping MaternGGP into our pipeline
code taken from https://github.com/spbu-math-cs/Graph-Gaussian-Processes
"""
def __init__(self, params: ModelConfiguration):
super().__init__(params)
self.nu = 3/2
self.kappa = 5
self.sigma_f = 1.0
self.epochs = 20_000
self.learning_rate = 0.001
self.num_eigenpairs = 500
def _train_model(self, data: Data) -> None:
num_classes = self.params.num_classes
num_train = data.train_mask.sum().item()
dtype = tf.float64
x_id_all = torch.arange(data.x.size(0)).double().view(-1, 1)
y_all = data.y.double()
x_train = x_id_all[data.train_mask].cpu().numpy()
y_train = y_all[data.train_mask].cpu().numpy()
x_id_all = x_id_all.cpu().numpy()
y_all = y_all.cpu().numpy()
data_train = (x_train, y_train)
eigen_dir = os.path.join(os.getcwd(), 'saved_experiments', 'uncertainty_experiments')
eigen_dir = os.path.join(eigen_dir, 'eigenpairs', self.storage_params['dataset'])
if os.path.exists(eigen_dir):
eigenvalues = tf.convert_to_tensor(np.load(
os.path.join(eigen_dir, 'eigenvalues.npy'), allow_pickle=False))
eigenvectors = tf.convert_to_tensor(
np.load(os.path.join(eigen_dir, 'eigenvectors.npy'), allow_pickle=False))
else:
os.makedirs(eigen_dir)
G = tu.to_networkx(data, to_undirected=True)
laplacian = sp.sparse.csr_matrix(nx.laplacian_matrix(G), dtype=np.float64)
if self.num_eigenpairs >= len(G):
num_eigenpairs = len(G)
else:
num_eigenpairs = self.num_eigenpairs
eigenvalues, eigenvectors = tf.linalg.eigh(laplacian.toarray())
eigenvectors, eigenvalues = eigenvectors[:, :num_eigenpairs], eigenvalues[:num_eigenpairs]
np.save(os.path.join(eigen_dir, 'eigenvalues.npy'), eigenvalues.numpy(), allow_pickle=False)
np.save(os.path.join(eigen_dir, 'eigenvectors.npy'), eigenvectors.numpy(), allow_pickle=False)
eigenvalues = tf.convert_to_tensor(eigenvalues, dtype=dtype)
eigenvectors = tf.convert_to_tensor(eigenvectors, dtype)
inducing_points = GPInducingVariables(x_train)
kernel = GraphMaternKernel(
(eigenvectors, eigenvalues), nu=self.nu, kappa=self.kappa, sigma_f=self.sigma_f,
vertex_dim=0, point_kernel=None, dtype=dtype)
model = gpflow.models.SVGP(
kernel=kernel,
likelihood=gpflow.likelihoods.MultiClass(num_classes),
inducing_variable=inducing_points,
num_latent_gps=num_classes,
whiten=True,
q_diag=True,
)
adam_opt = tf.optimizers.Adam(self.learning_rate)
natgrad_opt = gpflow.optimizers.NaturalGradient(gamma=self.learning_rate)
optimize_SVGP(model, (adam_opt, natgrad_opt), self.epochs, data_train, num_train, True)
self.model = model
def _predict(self, data: Data) -> Tuple[np.array, np.array]:
x_id_all = torch.arange(data.x.size(0)).double().view(-1, 1).cpu().numpy()
mean, var = self.model.predict_y(x_id_all)
return mean, var
| 3,082 | 0 | 80 |
5c8bbe5f5e6b002ce0ad5bbf441e36d1ceb4eeb0 | 3,664 | py | Python | analytics_utils/autocorrelation.py | patricksferraz/analytics-utils | 3b083e1d5eec9825bddf536d1f05db0643b2a710 | [
"MIT"
] | 1 | 2019-08-14T02:41:55.000Z | 2019-08-14T02:41:55.000Z | analytics_utils/autocorrelation.py | patricksferraz/analytics-utils | 3b083e1d5eec9825bddf536d1f05db0643b2a710 | [
"MIT"
] | null | null | null | analytics_utils/autocorrelation.py | patricksferraz/analytics-utils | 3b083e1d5eec9825bddf536d1f05db0643b2a710 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This is the find module.
The find module supplies one function,
autocorrelation()
"""
from statsmodels.tsa.stattools import acf
import pandas as pd
def autocorrelation(
data_frame: pd.DataFrame,
unbiased: bool = False,
nlags: int = 40,
fft: bool = None,
alpha: float = None,
missing: str = "none",
headers: [str] = None,
) -> pd.DataFrame:
"""Autocorrelation function for 1d arrays. This is a adapted acf function
of statsmodels package.
Parameters
----------
data_frame : pd.DataFrame
Input dataframe
unbiased : bool, optional
See statsmodels.tsa.stattools.acf, by default False
nlags : int, optional
See statsmodels.tsa.stattools.acf, by default 40
fft : bool, optional
See statsmodels.tsa.stattools.acf, by default None
alpha : float, optional
See statsmodels.tsa.stattools.acf, by default None
missing : str, optional
See statsmodels.tsa.stattools.acf, by default "none"
headers : [type], optional
Chosen dataframe headers, by default None
Returns
-------
pd.DataFrame
A object with autocorrelation function.
"""
if headers:
data_frame = data_frame.loc[:, headers]
return pd.DataFrame(
{
"acf": acf(
data_frame,
unbiased=unbiased,
nlags=nlags,
fft=fft,
alpha=alpha,
missing=missing,
)
}
)
if __name__ == "__main__":
import argparse
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument(
"-d", "--dataset", required=True, help="path to input dataset"
)
ap.add_argument(
"-f", "--file-out", type=str, help="path to file of output json"
)
ap.add_argument(
"-o",
"--orient",
type=str,
default="columns",
help="""format json output
{'split', 'records', 'index', 'values', 'table', 'columns'}
(default: 'columns')""",
)
ap.add_argument(
"-pd",
"--parse-dates",
type=str,
nargs="*",
help="""Headers of columns to parse dates. A column named datetime is
created.""",
)
ap.add_argument(
"-i",
"--index",
type=str,
nargs="*",
help="Headers of columns to set as index.",
)
ap.add_argument(
"-hd",
"--headers",
type=str,
nargs="*",
help="an string for the header in the dataset",
)
ap.add_argument("--unbiased", type=bool, default=False)
ap.add_argument("--nlags", type=int, default=40)
ap.add_argument("--fft", type=bool, default=None)
ap.add_argument("--alpha", type=float, default=None)
ap.add_argument("--missing", type=str, default="none")
args = vars(ap.parse_args())
# If exist parse_dates, creates a structure with column name datetime
if args["parse_dates"]:
args["parse_dates"] = {"datetime": args["parse_dates"]}
# Apply
result = autocorrelation(
pd.read_csv(
args["dataset"],
parse_dates=args["parse_dates"],
index_col=args["index"],
),
unbiased=args["unbiased"],
nlags=args["nlags"],
fft=args["fft"],
alpha=args["alpha"],
missing=args["missing"],
headers=args["headers"],
)
# Output in json format
result = result.to_json(
args.get("file_out"), force_ascii=False, orient=args["orient"]
)
if result:
print(result)
| 26.550725 | 77 | 0.567959 | # -*- coding: utf-8 -*-
"""
This is the find module.
The find module supplies one function,
autocorrelation()
"""
from statsmodels.tsa.stattools import acf
import pandas as pd
def autocorrelation(
data_frame: pd.DataFrame,
unbiased: bool = False,
nlags: int = 40,
fft: bool = None,
alpha: float = None,
missing: str = "none",
headers: [str] = None,
) -> pd.DataFrame:
"""Autocorrelation function for 1d arrays. This is a adapted acf function
of statsmodels package.
Parameters
----------
data_frame : pd.DataFrame
Input dataframe
unbiased : bool, optional
See statsmodels.tsa.stattools.acf, by default False
nlags : int, optional
See statsmodels.tsa.stattools.acf, by default 40
fft : bool, optional
See statsmodels.tsa.stattools.acf, by default None
alpha : float, optional
See statsmodels.tsa.stattools.acf, by default None
missing : str, optional
See statsmodels.tsa.stattools.acf, by default "none"
headers : [type], optional
Chosen dataframe headers, by default None
Returns
-------
pd.DataFrame
A object with autocorrelation function.
"""
if headers:
data_frame = data_frame.loc[:, headers]
return pd.DataFrame(
{
"acf": acf(
data_frame,
unbiased=unbiased,
nlags=nlags,
fft=fft,
alpha=alpha,
missing=missing,
)
}
)
if __name__ == "__main__":
import argparse
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument(
"-d", "--dataset", required=True, help="path to input dataset"
)
ap.add_argument(
"-f", "--file-out", type=str, help="path to file of output json"
)
ap.add_argument(
"-o",
"--orient",
type=str,
default="columns",
help="""format json output
{'split', 'records', 'index', 'values', 'table', 'columns'}
(default: 'columns')""",
)
ap.add_argument(
"-pd",
"--parse-dates",
type=str,
nargs="*",
help="""Headers of columns to parse dates. A column named datetime is
created.""",
)
ap.add_argument(
"-i",
"--index",
type=str,
nargs="*",
help="Headers of columns to set as index.",
)
ap.add_argument(
"-hd",
"--headers",
type=str,
nargs="*",
help="an string for the header in the dataset",
)
ap.add_argument("--unbiased", type=bool, default=False)
ap.add_argument("--nlags", type=int, default=40)
ap.add_argument("--fft", type=bool, default=None)
ap.add_argument("--alpha", type=float, default=None)
ap.add_argument("--missing", type=str, default="none")
args = vars(ap.parse_args())
# If exist parse_dates, creates a structure with column name datetime
if args["parse_dates"]:
args["parse_dates"] = {"datetime": args["parse_dates"]}
# Apply
result = autocorrelation(
pd.read_csv(
args["dataset"],
parse_dates=args["parse_dates"],
index_col=args["index"],
),
unbiased=args["unbiased"],
nlags=args["nlags"],
fft=args["fft"],
alpha=args["alpha"],
missing=args["missing"],
headers=args["headers"],
)
# Output in json format
result = result.to_json(
args.get("file_out"), force_ascii=False, orient=args["orient"]
)
if result:
print(result)
| 0 | 0 | 0 |
ebfc27beaee1b32063685d09cce85b000da8edf1 | 431 | py | Python | Ex029.py | raphaeltertuliano/Python | ffa9813aaa13ccca807f7c08be9489a2d88d3d62 | [
"MIT"
] | 1 | 2021-11-23T21:38:46.000Z | 2021-11-23T21:38:46.000Z | Ex029.py | raphaeltertuliano/Python | ffa9813aaa13ccca807f7c08be9489a2d88d3d62 | [
"MIT"
] | null | null | null | Ex029.py | raphaeltertuliano/Python | ffa9813aaa13ccca807f7c08be9489a2d88d3d62 | [
"MIT"
] | null | null | null | #Escreva um progrma que leia a velocidade de um carro.
#Se ele ultrapassar 80Km/h, mostre um mensagem de que ele foi multado
#A multa vai custar R$7,00 por cada Km acima do limite.
v = float(input('Velocidade do carro: '))
if v <= 80:
print('Dentro do limite de velocidade. Boa viagem')
else:
print(f'Velocidade: {v:.1f}Km/h. Acima do limite!')
p = (v - 80)*7
print(f'Você foi multado. Valor da multa: R${p:.2f}')
| 35.916667 | 69 | 0.677494 | #Escreva um progrma que leia a velocidade de um carro.
#Se ele ultrapassar 80Km/h, mostre um mensagem de que ele foi multado
#A multa vai custar R$7,00 por cada Km acima do limite.
v = float(input('Velocidade do carro: '))
if v <= 80:
print('Dentro do limite de velocidade. Boa viagem')
else:
print(f'Velocidade: {v:.1f}Km/h. Acima do limite!')
p = (v - 80)*7
print(f'Você foi multado. Valor da multa: R${p:.2f}')
| 0 | 0 | 0 |
5dce94849acc5a2df23e97ba2c96d11861c4c527 | 455 | py | Python | Python/middle-of-the-linked-list.py | Ravan339/LeetCode | 4276e562aa67e4c39cd92be5d2d6700a9465a579 | [
"MIT"
] | 4 | 2019-12-09T20:23:17.000Z | 2021-11-24T08:59:21.000Z | Python/middle-of-the-linked-list.py | Ravan339/LeetCode | 4276e562aa67e4c39cd92be5d2d6700a9465a579 | [
"MIT"
] | null | null | null | Python/middle-of-the-linked-list.py | Ravan339/LeetCode | 4276e562aa67e4c39cd92be5d2d6700a9465a579 | [
"MIT"
] | 9 | 2020-03-15T23:32:26.000Z | 2022-02-25T05:51:26.000Z | # https://leetcode.com/problems/middle-of-the-linked-list/
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
| 19.782609 | 58 | 0.531868 | # https://leetcode.com/problems/middle-of-the-linked-list/
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def middleNode(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
p1, p2 = head, head
while p2 and p2.next:
p1 = p1.next
p2 = p2.next.next
return p1
| 0 | 237 | 23 |
bda2a5e2ce26d03c188538f3c96972517acbd7d7 | 1,723 | py | Python | tests/test_field_amount.py | BitySA/swissdta | 046f466610e1197eea1e04683085b7008898c93a | [
"MIT"
] | null | null | null | tests/test_field_amount.py | BitySA/swissdta | 046f466610e1197eea1e04683085b7008898c93a | [
"MIT"
] | 3 | 2017-10-21T08:45:01.000Z | 2021-06-02T00:16:43.000Z | tests/test_field_amount.py | BitySA/swissdta | 046f466610e1197eea1e04683085b7008898c93a | [
"MIT"
] | 2 | 2017-10-20T09:45:52.000Z | 2018-12-03T16:00:40.000Z | """Tests for the Amount field"""
from decimal import Decimal
import pytest
from swissdta.fields import Amount
from swissdta.records.record import DTARecord
FIELD_LENGTH = 8
class ARecord(DTARecord):
"""Subclass of DTARecord for testing the Numeric field"""
field = Amount(length=FIELD_LENGTH)
@pytest.mark.parametrize(('value', 'expected_value'), (
(Decimal('1_4_3'), '143, '),
(Decimal('14_00_0'), '14000, '),
(Decimal(0b11), '3, '),
(Decimal(0B11), '3, '),
(Decimal(0b11_11), '15, '),
(Decimal(0B11_1), '7, '),
(Decimal(0o17), '15, '),
(Decimal(0O31), '25, '),
(Decimal(0o10_42), '546, '),
(Decimal(0O23_5), '157, '),
(Decimal(0xAF), '175, '),
(Decimal(0Xa3), '163, '),
(Decimal(0xf4_4c), '62540, '),
(Decimal(0Xfb_1), '4017, '),
(Decimal('5.34'), '5,34 ')
))
@pytest.mark.parametrize(('value', 'expected_errors'), (
(Decimal('5'), tuple()),
(Decimal('5.'), tuple()),
(Decimal('-5'), ("[field] INVALID: May not be negative",)),
(Decimal('-5.'), ("[field] INVALID: May not be negative",)),
(Decimal('0'), ("[field] INVALID: May not be zero",)),
(Decimal('0.'), ("[field] INVALID: May not be zero",))
))
def test_invalid_values(value, expected_errors):
"""Verify that non positive values are detected"""
record = ARecord()
record.field = value
assert not record.validation_warnings
assert record.validation_errors == expected_errors
| 30.22807 | 64 | 0.612304 | """Tests for the Amount field"""
from decimal import Decimal
import pytest
from swissdta.fields import Amount
from swissdta.records.record import DTARecord
FIELD_LENGTH = 8
class ARecord(DTARecord):
"""Subclass of DTARecord for testing the Numeric field"""
field = Amount(length=FIELD_LENGTH)
@pytest.mark.parametrize(('value', 'expected_value'), (
(Decimal('1_4_3'), '143, '),
(Decimal('14_00_0'), '14000, '),
(Decimal(0b11), '3, '),
(Decimal(0B11), '3, '),
(Decimal(0b11_11), '15, '),
(Decimal(0B11_1), '7, '),
(Decimal(0o17), '15, '),
(Decimal(0O31), '25, '),
(Decimal(0o10_42), '546, '),
(Decimal(0O23_5), '157, '),
(Decimal(0xAF), '175, '),
(Decimal(0Xa3), '163, '),
(Decimal(0xf4_4c), '62540, '),
(Decimal(0Xfb_1), '4017, '),
(Decimal('5.34'), '5,34 ')
))
def test_format_values(value, expected_value):
record = ARecord()
record.field = value
assert record.field == expected_value
assert not record.validation_warnings
assert not record.validation_errors
@pytest.mark.parametrize(('value', 'expected_errors'), (
(Decimal('5'), tuple()),
(Decimal('5.'), tuple()),
(Decimal('-5'), ("[field] INVALID: May not be negative",)),
(Decimal('-5.'), ("[field] INVALID: May not be negative",)),
(Decimal('0'), ("[field] INVALID: May not be zero",)),
(Decimal('0.'), ("[field] INVALID: May not be zero",))
))
def test_invalid_values(value, expected_errors):
"""Verify that non positive values are detected"""
record = ARecord()
record.field = value
assert not record.validation_warnings
assert record.validation_errors == expected_errors
| 197 | 0 | 22 |
ce2e8e0ca9f87c9bcb289954d8f5c250c1e39f69 | 3,064 | py | Python | src/scripts/ct.py | xuanxiaoliqu/CRC4Docker | 5ee26f9a590b727693202d8ad3b6460970304bd9 | [
"MIT"
] | 1 | 2020-10-26T12:02:08.000Z | 2020-10-26T12:02:08.000Z | src/scripts/ct.py | TonyZPW/CRC4Docker | e52a6e88d4469284a071c0b96d009f6684dbb2ea | [
"MIT"
] | null | null | null | src/scripts/ct.py | TonyZPW/CRC4Docker | e52a6e88d4469284a071c0b96d009f6684dbb2ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#******************************************************************************
# Name: ct.py
# Purpose: determine classification accuracy and contingency table
# from test data
# Usage:
# python ct.py
#
# Copyright (c) 2018, Mort Canty
import numpy as np
import contextlib
import sys, getopt
@contextlib.contextmanager
if __name__ == '__main__':
main() | 31.265306 | 79 | 0.465731 | #!/usr/bin/env python
#******************************************************************************
# Name: ct.py
# Purpose: determine classification accuracy and contingency table
# from test data
# Usage:
# python ct.py
#
# Copyright (c) 2018, Mort Canty
import numpy as np
import contextlib
import sys, getopt
@contextlib.contextmanager
def printoptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
yield
np.set_printoptions(**original)
def main():
usage = '''
Usage:
python %s testfile ''' %sys.argv[0]
options, args = getopt.getopt(sys.argv[1:],'h')
for option, _ in options:
if option == '-h':
print usage
return
if len(args) != 1:
print 'Incorrect number of arguments'
print usage
sys.exit(1)
tstfile = args[0]
if not tstfile:
return
print '========================='
print 'classification statistics'
print '========================='
with open(tstfile,'r') as f:
line = ''
for i in range(4):
line += f.readline()
print line
line = f.readline().split()
n = int(line[0])
K = int(line[1])
CT = np.zeros((K+2,K+2))
# fill the contingency table
y = 0.0
line = f.readline()
while line:
k = map(int,line.split())
k1 = k[0]-1
k2 = k[1]-1
CT[k1,k2] += 1
if k1 != k2:
y += 1
line = f.readline()
f.close()
CT[K,:] = np.sum(CT, axis=0)
CT[:,K] = np.sum(CT, axis=1)
for i in range(K):
CT[K+1,i] = CT[i,i]/CT[K,i]
CT[i,K+1] = CT[i,i]/CT[i,K]
# overall misclassification rate
sigma = np.sqrt(y*(n-y)/n**3)
low = (y+1.921-1.96*np.sqrt(0.96+y*(n-y)/n))/(3.842+n)
high= (y+1.921+1.96*np.sqrt(0.96+y*(n-y)/n))/(3.842+n)
print 'Misclassification rate: %f'%(y/n)
print 'Standard deviation: %f'%sigma
print 'Conf. interval (95 percent): [%f , %f]'%(low, high)
# Kappa coefficient
t1 = float(n-y)/n
t2 = np.sum(CT[K,0:K]*np.transpose(CT[0:K,K]))/n**2
Kappa = (t1 - t2)/(1 - t2)
t3 = 0.0
for i in range(K):
t3 = t3 + CT[i,i]*(CT[K,i]+CT[i,K])
t3 = t3/n**2
t4 = 0.0
for i in range(K):
for j in range(K):
t4 += CT[j,i]*(CT[K,j]+CT[i,K])**2
t4 = t4/n**3
sigma2 = t1*(1-t1)/(1-t2)**2
sigma2 = sigma2 + 2*(1-t1)*(2*t1*t2-t3)/(1-t2)**3
sigma2 = sigma2 + ((1-t1)**2)*(t4-4*t2**2)/(1-t2)**4
sigma = np.sqrt(sigma2/n)
print 'Kappa coefficient: %f'%Kappa
print 'Standard deviation: %f'%sigma
print 'Contingency Table'
with printoptions(precision=3, linewidth = 200, suppress=True):
print CT
if __name__ == '__main__':
main() | 2,585 | 0 | 45 |
2d974f6f2e2ec53dacc65c4d74b242efd37bc595 | 2,085 | py | Python | botcommands/morbidity.py | pastorhudson/mtb-pykeybasebot | af977f5823b178c91fb870058369f8a65205f7d6 | [
"BSD-3-Clause"
] | null | null | null | botcommands/morbidity.py | pastorhudson/mtb-pykeybasebot | af977f5823b178c91fb870058369f8a65205f7d6 | [
"BSD-3-Clause"
] | null | null | null | botcommands/morbidity.py | pastorhudson/mtb-pykeybasebot | af977f5823b178c91fb870058369f8a65205f7d6 | [
"BSD-3-Clause"
] | null | null | null | import gspread
import pandas as pd
from datetime import datetime
import os
import json
# print(os.environ.get('google_p_key'))
credentials = json.loads(os.environ.get('google_p_key'))
gc = gspread.service_account_from_dict(credentials)
sh = gc.open_by_key("1b9o6uDO18sLxBqPwl_Gh9bnhW-ev_dABH83M5Vb5L8o")
worksheet = sh.sheet1
dataframe = pd.DataFrame(worksheet.get_all_records())
last_date = sh.sheet1.get('C2')[0][0]
last_date = datetime.strptime(last_date, "%m/%d/%y")
tspan = datetime.now() - last_date
days_this_year = (datetime.now() - datetime(datetime.now().year, 1, 1)).days
# print(days_this_year)
#
#
# if __name__ == "__main__":
# print(get_morbid()) | 30.661765 | 84 | 0.656595 | import gspread
import pandas as pd
from datetime import datetime
import os
import json
# print(os.environ.get('google_p_key'))
credentials = json.loads(os.environ.get('google_p_key'))
gc = gspread.service_account_from_dict(credentials)
sh = gc.open_by_key("1b9o6uDO18sLxBqPwl_Gh9bnhW-ev_dABH83M5Vb5L8o")
worksheet = sh.sheet1
dataframe = pd.DataFrame(worksheet.get_all_records())
last_date = sh.sheet1.get('C2')[0][0]
last_date = datetime.strptime(last_date, "%m/%d/%y")
tspan = datetime.now() - last_date
days_this_year = (datetime.now() - datetime(datetime.now().year, 1, 1)).days
# print(days_this_year)
def get_years_avarage():
start_year = 1982
year_avg = []
while start_year < datetime.now().year:
df = dataframe[dataframe['year'] == start_year]
avg = int(df.count()[['case']].to_string(index=False)) / 365
year_avg.append(avg)
start_year += 1
chance = 100 - (sum(year_avg) / len(year_avg) * 100)
# print(year_avg)
return round(chance, 2)
def get_weapon():
n = 5
weapons = dataframe['weapon_type'].value_counts()[:n].index.tolist()
w_msg = "Top 5 Frequently used Weapons:\n"
for weapon in weapons:
w_msg += f"- {weapon}\n"
return w_msg
def get_morbid():
# selecting rows based on condition
rslt_df = dataframe[dataframe['year'] == datetime.now().year]
# print(rslt_df[["case", "fatalities", "injured", "mental_health_details"]])
msg = f"Mass Shooting Data for {datetime.now().year}\n"
msg += f"```Cases: {rslt_df.count()[['case']].to_string(index=False)}\n"
msg += rslt_df.sum()[['injured', 'fatalities']].to_string()
# msg +=
msg += f"\nDays since last case: {tspan.days}\n" \
f"{get_years_avarage()}% likelyhood there is no mass shooting today```"
msg += "A mass shooting is 3 or more people being killed.\n" \
"We are tracking random acts unrelated to other disputes or rivalries.\n"
msg += "Other Data:\n```"
msg += f"{get_weapon()}```"
return msg
#
#
# if __name__ == "__main__":
# print(get_morbid()) | 1,343 | 0 | 69 |
6252e057a7f774fa6c73d66594dcda75d9fbb137 | 689 | py | Python | MetaheuristicOptimization/Assignment2/TEST/NQUEENS_CODE_AND_DATA/convert.py | bhattacharjee/ml-assignments | 631492b1f1aa1ace5365abfa7fec9c187e99d28a | [
"MIT"
] | null | null | null | MetaheuristicOptimization/Assignment2/TEST/NQUEENS_CODE_AND_DATA/convert.py | bhattacharjee/ml-assignments | 631492b1f1aa1ace5365abfa7fec9c187e99d28a | [
"MIT"
] | null | null | null | MetaheuristicOptimization/Assignment2/TEST/NQUEENS_CODE_AND_DATA/convert.py | bhattacharjee/ml-assignments | 631492b1f1aa1ace5365abfa7fec9c187e99d28a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
filename = sys.argv[1]
out_filename = filename[:-3] + "csv"
with open(filename, "r", encoding='utf-16le') as inputFile:
with open(out_filename, "w") as outputFile:
lines = [line.strip() for line in inputFile.readlines()]
lines = [line[2:] for line in lines if line.startswith('=')]
final_lines = []
header_line = None
for line in lines:
if line.startswith("Run"):
header_line = line
else:
final_lines.append(line)
sys.stdout = outputFile
print(header_line)
[print(line) for line in final_lines]
inputFile.close()
outputFile.close()
| 28.708333 | 68 | 0.596517 | #!/usr/bin/python3
import sys
filename = sys.argv[1]
out_filename = filename[:-3] + "csv"
with open(filename, "r", encoding='utf-16le') as inputFile:
with open(out_filename, "w") as outputFile:
lines = [line.strip() for line in inputFile.readlines()]
lines = [line[2:] for line in lines if line.startswith('=')]
final_lines = []
header_line = None
for line in lines:
if line.startswith("Run"):
header_line = line
else:
final_lines.append(line)
sys.stdout = outputFile
print(header_line)
[print(line) for line in final_lines]
inputFile.close()
outputFile.close()
| 0 | 0 | 0 |
0782aa9b1306a84d232eb6fb81a8e64dd65ec477 | 1,039 | py | Python | cli/iotexetl/utils/iotex_utils.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 3 | 2020-07-04T13:53:38.000Z | 2020-07-30T15:07:35.000Z | cli/iotexetl/utils/iotex_utils.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 13 | 2020-07-16T06:07:33.000Z | 2020-08-20T10:35:10.000Z | cli/iotexetl/utils/iotex_utils.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 1 | 2021-01-20T10:06:20.000Z | 2021-01-20T10:06:20.000Z | import bech32
from eth_hash.auto import keccak as keccak_256
DEFAULT_ADDRESS_PREFIX = 'io'
def pubkey_to_address(pubkey, prefix=None):
"""This implements the algorithm described here https://github.com/iotexproject/iotex-address"""
if prefix is None:
prefix = DEFAULT_ADDRESS_PREFIX
if pubkey is None or len(pubkey) < 1:
return None
pubkey_hash = keccak_256(pubkey[1:])
if pubkey_hash is None or len(pubkey_hash) < 12:
return None
payload = pubkey_hash[12:]
return bech32_encode(prefix, payload)
| 27.342105 | 100 | 0.732435 | import bech32
from eth_hash.auto import keccak as keccak_256
DEFAULT_ADDRESS_PREFIX = 'io'
def set_iotex_utils_context(address_prefix):
global DEFAULT_ADDRESS_PREFIX
DEFAULT_ADDRESS_PREFIX = address_prefix
def pubkey_to_address(pubkey, prefix=None):
"""This implements the algorithm described here https://github.com/iotexproject/iotex-address"""
if prefix is None:
prefix = DEFAULT_ADDRESS_PREFIX
if pubkey is None or len(pubkey) < 1:
return None
pubkey_hash = keccak_256(pubkey[1:])
if pubkey_hash is None or len(pubkey_hash) < 12:
return None
payload = pubkey_hash[12:]
return bech32_encode(prefix, payload)
def pubkey_hex_to_address(pubkey_hex):
if pubkey_hex is None:
return None
return pubkey_to_address(bytearray.fromhex(pubkey_hex))
def bech32_encode(hrp, witprog):
five_bit_witprog = bech32.convertbits(witprog, 8, 5)
if five_bit_witprog is None:
return None
ret = bech32.bech32_encode(hrp, five_bit_witprog)
return ret
| 415 | 0 | 69 |
297572fc491a36c41352e325663d421d04d40933 | 177 | py | Python | Python/src/util/data.py | LN-STEMpunks/VexBot | f7bebe01ab35686cab92b8c2035d32f8f8372d64 | [
"RSA-MD"
] | null | null | null | Python/src/util/data.py | LN-STEMpunks/VexBot | f7bebe01ab35686cab92b8c2035d32f8f8372d64 | [
"RSA-MD"
] | null | null | null | Python/src/util/data.py | LN-STEMpunks/VexBot | f7bebe01ab35686cab92b8c2035d32f8f8372d64 | [
"RSA-MD"
] | null | null | null | """
Input and output data
"""
from networktables import NetworkTables
import logging
logging.basicConfig(level=logging.DEBUG)
SD = NetworkTables.getTable("SmartDashboard")
| 13.615385 | 45 | 0.785311 | """
Input and output data
"""
from networktables import NetworkTables
import logging
logging.basicConfig(level=logging.DEBUG)
SD = NetworkTables.getTable("SmartDashboard")
| 0 | 0 | 0 |
3cb80b80af56503f39e62a6900fb4e57018aac52 | 2,096 | py | Python | NetTtest/res.py | FoyerSociety/QPC-SESAME | 7512f9e038f7fb6070c40783f4b7bda812eb419b | [
"Unlicense"
] | 1 | 2019-06-16T06:13:43.000Z | 2019-06-16T06:13:43.000Z | NetTtest/res.py | FoyerSociety/QPC-SESAME | 7512f9e038f7fb6070c40783f4b7bda812eb419b | [
"Unlicense"
] | null | null | null | NetTtest/res.py | FoyerSociety/QPC-SESAME | 7512f9e038f7fb6070c40783f4b7bda812eb419b | [
"Unlicense"
] | null | null | null | import time, threading
from scapy.all import *
listc = []
lists = []
print('debut')
x = time.time()
p1 = Find(1,50)
p2 = Find(50,100)
p3 = Find(100, 150)
p4 = Find(150,200)
p1.start()
p2.start()
p3.start()
p4.start()
for i in range(200, 250):
print(time.time() - x , 's :', f'192.168.8.{i}')
rep, non_rep = sr(IP(dst=f'192.168.8.{i}') / ICMP(), timeout=0.005)
for elem in rep:
if elem[1].type == 0:
print('**********************************')
print('Connected adress' ,elem[1].src + ' est connecter')
listc.append(elem[1].src)
print('**********************************')
p1.join()
p2.join()
p3.join()
p4.join()
print('temps totaux:', time.time() - x)
print(len(listc), 'connecter')
for i in listc:
print(i)
print(len(lists), 'serveur')
for i in lists:
print(i) | 29.942857 | 122 | 0.435115 | import time, threading
from scapy.all import *
listc = []
lists = []
print('debut')
x = time.time()
class Find(threading.Thread):
def __init__(self, a, b):
self.a = a
self.b = b
threading.Thread.__init__(self)
def search(self, a, b):
global listc
for i in range(a, b):
print(time.time() - x , 's :', f'192.168.8.{i}')
rep, non_rep = sr(IP(dst=f'192.168.8.{i}') / ICMP(), timeout=0.005)
for elem in rep:
if elem[1].type == 0:
print('**********************************')
print('Connected adress' ,elem[1].src + ' est connecter')
listc.append(elem[1].src)
print('**********************************')
ans, unans = sr(IP(dst=elem[1].src)/TCP(dport=80), timeout=0.01)
for val in ans:
if val[1].sport == 80:
print('###################')
print('Serveur Trouvee', (val[1].src + ' est un serveur avec temps:' + str(time.time() - x)))
lists.append((val[1].src, (str(time.time() - x) + 's')))
print('###################')
def run(self):
self.search(self.a, self.b)
p1 = Find(1,50)
p2 = Find(50,100)
p3 = Find(100, 150)
p4 = Find(150,200)
p1.start()
p2.start()
p3.start()
p4.start()
for i in range(200, 250):
print(time.time() - x , 's :', f'192.168.8.{i}')
rep, non_rep = sr(IP(dst=f'192.168.8.{i}') / ICMP(), timeout=0.005)
for elem in rep:
if elem[1].type == 0:
print('**********************************')
print('Connected adress' ,elem[1].src + ' est connecter')
listc.append(elem[1].src)
print('**********************************')
p1.join()
p2.join()
p3.join()
p4.join()
print('temps totaux:', time.time() - x)
print(len(listc), 'connecter')
for i in listc:
print(i)
print(len(lists), 'serveur')
for i in lists:
print(i) | 1,115 | 8 | 108 |
347fe3f18ce0feef76ca7424f649fe2df32a9534 | 2,825 | py | Python | taco/test/test_bedgraph.py | tacorna/taco | eeaeb879b8622365123edbc61ebc100d84194b80 | [
"MIT"
] | 22 | 2016-04-03T16:30:54.000Z | 2022-03-07T23:01:08.000Z | taco/test/test_bedgraph.py | tacorna/taco | eeaeb879b8622365123edbc61ebc100d84194b80 | [
"MIT"
] | 18 | 2016-04-10T15:33:09.000Z | 2022-02-06T15:53:25.000Z | taco/test/test_bedgraph.py | tacorna/taco | eeaeb879b8622365123edbc61ebc100d84194b80 | [
"MIT"
] | 5 | 2016-11-23T22:26:00.000Z | 2021-06-09T11:23:20.000Z | '''
TACO: Multi-sample transcriptome assembly from RNA-Seq
'''
import os
import cStringIO
import timeit
import numpy as np
from taco.lib.dtypes import FLOAT_DTYPE
from taco.lib.bedgraph import array_to_bedgraph, bedgraph_to_array
from taco.lib.cbedgraph import array_to_bedgraph as c_array_to_bedgraph
| 27.427184 | 71 | 0.647788 | '''
TACO: Multi-sample transcriptome assembly from RNA-Seq
'''
import os
import cStringIO
import timeit
import numpy as np
from taco.lib.dtypes import FLOAT_DTYPE
from taco.lib.bedgraph import array_to_bedgraph, bedgraph_to_array
from taco.lib.cbedgraph import array_to_bedgraph as c_array_to_bedgraph
def write_and_read_array(a, ref='chr1', start=0):
buf = cStringIO.StringIO()
array_to_bedgraph(a, ref, start, buf)
contents = buf.getvalue()
a = bedgraph_to_array(cStringIO.StringIO(contents))
return a.get(ref, None)
def c_write_and_read_array(a, ref='chr1', start=0):
filename = "tmp.bedgraph"
with open(filename, 'w') as fileh:
c_array_to_bedgraph(a, ref, start, fileh)
a = bedgraph_to_array(open(filename))
os.remove(filename)
return a.get(ref, None)
def test_array1():
a = np.array([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5],
dtype=FLOAT_DTYPE)
y = write_and_read_array(a)
assert np.array_equal(a, y)
y = c_write_and_read_array(a)
assert np.array_equal(a, y)
def test_array2():
return
a = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0], dtype=FLOAT_DTYPE)
y = write_and_read_array(a)
assert np.array_equal(a[:-3], y)
y = c_write_and_read_array(a)
assert np.array_equal(a[:-3], y)
def test_array3():
return
a = np.ones(5, dtype=FLOAT_DTYPE) * 10
y = write_and_read_array(a)
assert np.array_equal(a, y)
y = c_write_and_read_array(a)
assert np.array_equal(a, y)
def test_empty():
return
a = np.zeros(0, dtype=FLOAT_DTYPE)
y = write_and_read_array(a)
assert y is None
y = c_write_and_read_array(a)
assert y is None
def test_zeros():
return
a = np.zeros(5, dtype=FLOAT_DTYPE)
y = write_and_read_array(a)
assert y is None
y = c_write_and_read_array(a)
assert y is None
def test_performance():
def stmt1():
a = np.array(np.random.random(100000), dtype=FLOAT_DTYPE)
buf = cStringIO.StringIO()
array_to_bedgraph(a, ref='chr1', start=0, fileh=buf)
# filename = "tmp.bedgraph"
# with open(filename, 'w') as fileh:
# array_to_bedgraph(a, ref='chr1', start=0, fileh=fileh)
# os.remove(filename)
def stmt2():
a = np.array(np.random.random(100000), dtype=FLOAT_DTYPE)
filename = "tmp.bedgraph"
with open(filename, 'w') as fileh:
c_array_to_bedgraph(a, ref='chr1', start=0, fileh=fileh)
os.remove(filename)
#t1 = timeit.Timer(stmt1)
#t2 = timeit.Timer(stmt2)
#print t1.timeit(number=2)
#print t2.timeit(number=2)
# import pstats, cProfile
# cProfile.runctx("stmt2()", globals(), locals(), "Profile.prof")
# s = pstats.Stats("Profile.prof")
# s.strip_dirs().sort_stats("time").print_stats()
| 2,330 | 0 | 184 |
8f1ae0df3d2caf0aac1e3036584ca2faea7b680b | 1,093 | py | Python | tests/test_utils.py | sodre/sqs-workers | 46e14694805c4c2185a29ce2e906143358d06d8c | [
"MIT"
] | 21 | 2018-10-06T21:51:51.000Z | 2021-04-30T19:22:38.000Z | tests/test_utils.py | sodre/sqs-workers | 46e14694805c4c2185a29ce2e906143358d06d8c | [
"MIT"
] | 15 | 2019-02-27T13:19:34.000Z | 2022-03-16T17:40:05.000Z | tests/test_utils.py | sodre/sqs-workers | 46e14694805c4c2185a29ce2e906143358d06d8c | [
"MIT"
] | 4 | 2019-02-27T12:21:26.000Z | 2021-09-20T05:04:09.000Z | from textwrap import TextWrapper
from sqs_workers.utils import (
adv_bind_arguments,
adv_validate_arguments,
instantiate_from_dict,
instantiate_from_string,
string_to_object,
)
| 24.288889 | 70 | 0.675206 | from textwrap import TextWrapper
from sqs_workers.utils import (
adv_bind_arguments,
adv_validate_arguments,
instantiate_from_dict,
instantiate_from_string,
string_to_object,
)
def test_string_to_object():
splitext = string_to_object("os.path.splitext")
assert splitext("foo.txt") == ("foo", ".txt")
def test_instantiate_from_dict():
options = {"maker": "textwrap.TextWrapper", "width": 80}
w = instantiate_from_dict(options)
assert isinstance(w, TextWrapper)
assert w.width == 80
def test_instantiate_from_string():
w = instantiate_from_string("textwrap.TextWrapper", width=80)
assert isinstance(w, TextWrapper)
assert w.width == 80
def test_adv_bind_arguments_converts_to_unicode():
def foo(a, b):
pass
kwargs = adv_bind_arguments(foo, [], {b"a": 1, b"b": 2})
assert kwargs == {"a": 1, "b": 2}
def test_adv_validate_arguments_converts_to_unicode():
def foo(a, b):
pass
args, kwargs = adv_validate_arguments(foo, [], {b"a": 1, b"b": 2})
assert args == (1, 2)
assert kwargs == {}
| 775 | 0 | 115 |
823d5ccd7f4b25190eae376bdf8bf96d5298b941 | 6,202 | py | Python | loaders.py | singleswitch/ticker | 1e793316f2a3252d80339a69672ad81df550875d | [
"MIT"
] | null | null | null | loaders.py | singleswitch/ticker | 1e793316f2a3252d80339a69672ad81df550875d | [
"MIT"
] | 1 | 2018-11-06T09:30:23.000Z | 2018-11-06T09:30:23.000Z | loaders.py | singleswitch/ticker | 1e793316f2a3252d80339a69672ad81df550875d | [
"MIT"
] | 1 | 2019-01-23T14:46:11.000Z | 2019-01-23T14:46:11.000Z |
import numpy as np
import cPickle
"""* This file contains everything that has to be loaded from lookuptables e.g., the sound file lengths, the alphabet etc
* Lookuptables stored in files, all depend on a root directory"""
class AlphabetLoader(FileLoader):
"""This class contains all the loading functions associated with loading the alphabet, and configuring it for multiple channels usage
Input:
* The setChannels functions is expected to be called to change the configuration
* Otherwise the get functions should be called for different representations of the same alphabet."""
###################################### Init functions
##################################### Load the alphabet
##################################### Get functions
##################################### Private functions
##################################### Display functions | 45.270073 | 204 | 0.632215 |
import numpy as np
import cPickle
"""* This file contains everything that has to be loaded from lookuptables e.g., the sound file lengths, the alphabet etc
* Lookuptables stored in files, all depend on a root directory"""
class FileLoader():
def __init__(self , i_root_dir):
self.setRootDir(i_root_dir)
def setRootDir( self, i_root_dir):
self.__root_dir = i_root_dir
def getRootDir(self):
return self.__root_dir
class LookupTables(FileLoader):
def __init__(self, i_dir="./"):
FileLoader.__init__(self, i_dir)
self.__file_lengths = SoundFileLengthLoader(self.getRootDir() + "config/channels");
self.__alphabet = AlphabetLoader(self.getRootDir() + "config/channels");
self.__letter_utterances = LetterUtteranceLookupTables()
def setChannels(self, i_nchannels):
self.__alphabet.load(i_nchannels)
self.__nchannels = i_nchannels
def getSoundFileLengths(self):
return self.__file_lengths.load(self.__nchannels)
def getAlphabetLoader(self):
return self.__alphabet
def getChannels(self):
return self.__nchannels
def getLetterUtteranceFromIndex(self, i_index):
return self.__letter_utterances.getLetterStringFromIndex(self, i_index)
class SoundFileLengthLoader(FileLoader):
def __init__(self, i_dir):
FileLoader.__init__(self, i_dir)
def load(self, i_nchannels):
file_name = self.getRootDir() + str(i_nchannels) + "/sound_lengths.cPickle"
f = open( file_name, 'r')
file_lengths = cPickle.load(f)
f.close()
return file_lengths
class AlphabetLoader(FileLoader):
"""This class contains all the loading functions associated with loading the alphabet, and configuring it for multiple channels usage
Input:
* The setChannels functions is expected to be called to change the configuration
* Otherwise the get functions should be called for different representations of the same alphabet."""
###################################### Init functions
def __init__(self, i_dir ):
FileLoader.__init__(self, i_dir)
##################################### Load the alphabet
def load(self, i_nchannels):
file_name = self.getRootDir() + str(i_nchannels) + "/alphabet.txt"
file_name = file(file_name)
alphabet = file_name.read()
file_name.close()
alphabet = alphabet.split('\n')[0]
alphabet = alphabet.split(" ")[0]
alphabet = [letter for letter in alphabet if not (letter == '') ]
array_alphabet = np.array(alphabet)
repeat = np.array([len(np.nonzero(array_alphabet == letter)[0]) for letter in alphabet if not( letter == '*') ])
idx = np.nonzero(repeat == repeat[0])[0]
if not ( len(idx) == len(repeat) ):
print "Repeat = ", repeat
raise ValueError("Error in alphabet, all letters should repeat the same number of times")
repeat = repeat[0]
self.__alphabet = list(alphabet)
alphabet_len = len(self.__alphabet) / repeat
self.__unique_alphabet = list( self.__alphabet[0:alphabet_len])
self.__alphabet_len = self.__getAlphabetLength(self.__alphabet)
self.__unique_alphabet_len = self.__getAlphabetLength(self.__unique_alphabet)
##################################### Get functions
def getAlphabet(self, i_with_spaces=True):
if i_with_spaces:
return self.__alphabet
return self.__getSequenceAlphabet(self.__alphabet)
def getAlphabetLen(self, i_with_spaces=True):
if i_with_spaces:
return len(self.__alphabet)
return self.__alphabet_len
def getUniqueAlphabet(self, i_with_spaces=True):
if i_with_spaces:
return self.__unique_alphabet
return self.__getSequenceAlphabet(self.__unique_alphabet)
def getUniqueAlphabetLen(self, i_with_spaces=True):
if i_with_spaces:
return len(self.__unique_alphabet)
return self.__unique_alphabet_len
##################################### Private functions
def __getSequenceAlphabet(self, i_alphabet):
#Return the alphabet in sequence without the spaces
return [letter for letter in i_alphabet if not letter == '*']
def __getAlphabetLength(self, i_alphabet):
seq_alphabet = self.__getSequenceAlphabet(i_alphabet)
return len(seq_alphabet)
##################################### Display functions
def plotIntegerDistances(self):
alphabet = np.array(i_alphabet)
sequence = self.getSequenceAlphabet(self.__alphabet)
for letter in alphabet:
idx = np.nonzero(sequence == letter)[0]
if not (len(idx) == 2):
disp_str = "Letter " + letter + " occurances= " +str(len(idx))
raise ValueError(disp_str)
pylab.plot( dx[0], idx[1], '+' )
pylab.text(idx[0]+0.3, idx[1], letter)
class LetterUtteranceLookupTables():
def __init__(self):
self.__letter_dict = {1:"first",2:"second",3:"third",4:"fourth",5:"fifth",6:"sixth",7:"seventh",8:"eighth",9:"ninth",10:"tenth",
11:"elenvth",12:"twelfth",13:"thirteenth",14:"fourteenth",15:"fifteenth", 16:"sixteenth",17:"seventeenth",18:"eighteenth",19:"nineteenth",20:"twentieth",
21:"twentyfirst",22:"twentysecond",23:"twentythird",24:"twentyfourth",25:"twentyfifth",26:"twentysixth",27:"twentyseventh",28:"twentyeighth",29:"twentyninth",30:"thirtieth",
31:"thirtyfirst",32: "thirtysecond",33:"thirtythird",34:"thirtyfourth",35:"thirtyfifth",36:"thirtysixth",37:"thirtyseventh",38:"thirtyeighth",39:"thirtyninth",40:"fourtieth",
41:"fourtyfirst",42: "fourtysecond",43:"fourtythird",44:"fourtyfourth",45:"fourtyfifth",46:"fourtysixth",47:"fourtyseventh",48:"fourtyeighth",49:"fourtyninth",50:"fiftieth"}
def getLetterStringFromIndex(self, i_index):
return self.__letter_dict[i_index] | 4,448 | 43 | 772 |
d257ca048420318e5eb15a7666e242097d2ed7a8 | 394 | py | Python | server.py | michaelrbock/hackers-job-apply | c5f6c26046946316067897cf9ab9b5e6d7310e8a | [
"MIT"
] | 20 | 2015-05-28T20:08:55.000Z | 2020-10-12T21:51:12.000Z | server.py | michaelrbock/hackers-job-apply | c5f6c26046946316067897cf9ab9b5e6d7310e8a | [
"MIT"
] | null | null | null | server.py | michaelrbock/hackers-job-apply | c5f6c26046946316067897cf9ab9b5e6d7310e8a | [
"MIT"
] | 5 | 2016-02-16T13:54:04.000Z | 2020-06-26T18:50:22.000Z | import os
from flask import Flask, request
app = Flask(__name__)
@app.route('/', methods=["GET", "POST"])
if __name__ == "__main__":
app.run(debug=True)
| 23.176471 | 109 | 0.64467 | import os
from flask import Flask, request
app = Flask(__name__)
@app.route('/', methods=["GET", "POST"])
def index():
if request.json and request.json.get("answer") == os.getenv("ANSWER"):
return os.getenv("JOB_EMAIL") + "\n"
return "%s %s" % (os.getenv("QUESTION"), "POST json to the server with the answer -> { 'answer': 'xxx' }\n")
if __name__ == "__main__":
app.run(debug=True)
| 212 | 0 | 22 |
b4fa2855a728102eb7e89aa73c52415f48029918 | 1,782 | py | Python | paypaladaptive/settings.py | amineck/django-paypal-adaptive | 98a5d4674a4ae2b619ff4f9ee11240c27d03ac73 | [
"CC-BY-3.0"
] | 4 | 2015-01-21T10:42:21.000Z | 2016-01-19T09:16:55.000Z | paypaladaptive/settings.py | amineck/django-paypal-adaptive | 98a5d4674a4ae2b619ff4f9ee11240c27d03ac73 | [
"CC-BY-3.0"
] | 6 | 2015-01-14T22:13:10.000Z | 2021-06-10T20:34:41.000Z | paypaladaptive/settings.py | amineck/django-paypal-adaptive | 98a5d4674a4ae2b619ff4f9ee11240c27d03ac73 | [
"CC-BY-3.0"
] | 10 | 2015-03-23T14:16:30.000Z | 2021-02-21T02:05:27.000Z | from datetime import timedelta
from django.conf import settings
from money import set_default_currency
DEBUG = getattr(settings, "DEBUG", False)
if DEBUG:
# use sandboxes while in debug mode
PAYPAL_ENDPOINT = 'https://svcs.sandbox.paypal.com/AdaptivePayments/'
PAYPAL_PAYMENT_HOST = 'https://www.sandbox.paypal.com/au/cgi-bin/webscr'
EMBEDDED_ENDPOINT = 'https://www.sandbox.paypal.com/webapps/adaptivepayment/flow/pay'
PAYPAL_APPLICATION_ID = 'APP-80W284485P519543T' # sandbox only
else:
PAYPAL_ENDPOINT = 'https://svcs.paypal.com/AdaptivePayments/' # production
PAYPAL_PAYMENT_HOST = 'https://www.paypal.com/webscr' # production
EMBEDDED_ENDPOINT = 'https://paypal.com/webapps/adaptivepayment/flow/pay'
PAYPAL_APPLICATION_ID = settings.PAYPAL_APPLICATION_ID
# These settings are required
PAYPAL_USERID = settings.PAYPAL_USERID
PAYPAL_PASSWORD = settings.PAYPAL_PASSWORD
PAYPAL_SIGNATURE = settings.PAYPAL_SIGNATURE
PAYPAL_EMAIL = settings.PAYPAL_EMAIL
USE_IPN = getattr(settings, 'PAYPAL_USE_IPN', True)
USE_DELAYED_UPDATES = getattr(settings, 'PAYPAL_USE_DELAYED_UPDATES', False)
DELAYED_UPDATE_COUNTDOWN = getattr(
settings, 'PAYPAL_DELAYED_UPDATE_COUNTDOWN', timedelta(minutes=60))
USE_CHAIN = getattr(settings, 'PAYPAL_USE_CHAIN', True)
USE_EMBEDDED = getattr(settings, 'PAYPAL_USE_EMBEDDED', True)
SHIPPING = getattr(settings, 'PAYPAL_USE_SHIPPING', False)
DEFAULT_CURRENCY = getattr(settings, 'DEFAULT_CURRENCY', 'USD')
set_default_currency(code=DEFAULT_CURRENCY)
DECIMAL_PLACES = getattr(settings, 'PAYPAL_DECIMAL_PLACES', 2)
MAX_DIGITS = getattr(settings, 'PAYPAL_MAX_DIGITS', 10)
# Should tests hit Paypaladaptive or not? Defaults to using mock responses
TEST_WITH_MOCK = getattr(settings, 'PAYPAL_TEST_WITH_MOCK', True)
| 40.5 | 89 | 0.795174 | from datetime import timedelta
from django.conf import settings
from money import set_default_currency
DEBUG = getattr(settings, "DEBUG", False)
if DEBUG:
# use sandboxes while in debug mode
PAYPAL_ENDPOINT = 'https://svcs.sandbox.paypal.com/AdaptivePayments/'
PAYPAL_PAYMENT_HOST = 'https://www.sandbox.paypal.com/au/cgi-bin/webscr'
EMBEDDED_ENDPOINT = 'https://www.sandbox.paypal.com/webapps/adaptivepayment/flow/pay'
PAYPAL_APPLICATION_ID = 'APP-80W284485P519543T' # sandbox only
else:
PAYPAL_ENDPOINT = 'https://svcs.paypal.com/AdaptivePayments/' # production
PAYPAL_PAYMENT_HOST = 'https://www.paypal.com/webscr' # production
EMBEDDED_ENDPOINT = 'https://paypal.com/webapps/adaptivepayment/flow/pay'
PAYPAL_APPLICATION_ID = settings.PAYPAL_APPLICATION_ID
# These settings are required
PAYPAL_USERID = settings.PAYPAL_USERID
PAYPAL_PASSWORD = settings.PAYPAL_PASSWORD
PAYPAL_SIGNATURE = settings.PAYPAL_SIGNATURE
PAYPAL_EMAIL = settings.PAYPAL_EMAIL
USE_IPN = getattr(settings, 'PAYPAL_USE_IPN', True)
USE_DELAYED_UPDATES = getattr(settings, 'PAYPAL_USE_DELAYED_UPDATES', False)
DELAYED_UPDATE_COUNTDOWN = getattr(
settings, 'PAYPAL_DELAYED_UPDATE_COUNTDOWN', timedelta(minutes=60))
USE_CHAIN = getattr(settings, 'PAYPAL_USE_CHAIN', True)
USE_EMBEDDED = getattr(settings, 'PAYPAL_USE_EMBEDDED', True)
SHIPPING = getattr(settings, 'PAYPAL_USE_SHIPPING', False)
DEFAULT_CURRENCY = getattr(settings, 'DEFAULT_CURRENCY', 'USD')
set_default_currency(code=DEFAULT_CURRENCY)
DECIMAL_PLACES = getattr(settings, 'PAYPAL_DECIMAL_PLACES', 2)
MAX_DIGITS = getattr(settings, 'PAYPAL_MAX_DIGITS', 10)
# Should tests hit Paypaladaptive or not? Defaults to using mock responses
TEST_WITH_MOCK = getattr(settings, 'PAYPAL_TEST_WITH_MOCK', True)
| 0 | 0 | 0 |
a6c3a422b10ad2352d84749db1185f4c78782d2e | 11,228 | py | Python | vcd2json.py | anders-code/vcd2json | 146384371f6b877b5a787c5bad2f9f171bad30e2 | [
"MIT"
] | 1 | 2022-01-29T23:32:40.000Z | 2022-01-29T23:32:40.000Z | vcd2json.py | anders-code/vcd2json | 146384371f6b877b5a787c5bad2f9f171bad30e2 | [
"MIT"
] | null | null | null | vcd2json.py | anders-code/vcd2json | 146384371f6b877b5a787c5bad2f9f171bad30e2 | [
"MIT"
] | null | null | null | """Create WaveJSON text string from VCD file."""
import sys
| 34.336391 | 77 | 0.483969 | """Create WaveJSON text string from VCD file."""
import sys
class _SignalDef:
def __init__(self, name, sid, length):
self._name = name
self._sid = sid
self._length = length
self._fmt = ''
class WaveExtractor:
def __init__(self, vcd_file, json_file, path_list):
"""
Extract signal values from VCD file and output in JSON format.
Specify VCD filename, JSON filename, and signal path list.
If <json_file> is an empty string, standard output is used.
Use slashes to separate signal path hierarchies.
The first signal of the list is regarded as clock.
Other signals are sampled on the negative edge of the clock.
"""
self._vcd_file = vcd_file
self._json_file = json_file
self._path_list = [path.strip('/') for path in path_list]
self._wave_chunk = 20
self._start_time = 0
self._end_time = 0
self._setup()
@property
def wave_chunk(self):
"""Number of wave samples per time group."""
return self._wave_chunk
@wave_chunk.setter
def wave_chunk(self, value):
self._wave_chunk = value
@property
def start_time(self):
"""Sampling start time."""
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def end_time(self):
"""Sampling end time."""
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
def _setup(self):
def create_path_dict(fin):
hier_list = []
path_list = []
path_dict = {}
while True:
line = fin.readline()
if not line:
raise EOFError('Can\'t find word "$enddefinitions".')
words = line.split()
if words[0] == '$enddefinitions':
return path_list, path_dict
if words[0] == '$scope':
hier_list.append(words[2])
elif words[0] == '$var':
path = '/'.join(hier_list + [words[4]])
path_list.append(path)
path_dict[path] = _SignalDef(name=words[4],
sid=words[3],
length=int(words[2]))
elif words[0] == '$upscope':
del hier_list[-1]
def update_path_dict(path_list, path_dict):
new_path_dict = {}
for path in path_list:
signal_def = path_dict.get(path, None)
if not signal_def:
raise ValueError('Can\'t find path "{0}".'.format(path))
new_path_dict[path] = signal_def
return new_path_dict
fin = open(self._vcd_file, 'rt')
path_list, path_dict = create_path_dict(fin)
if self._path_list:
path_dict = update_path_dict(self._path_list, path_dict)
else:
self._path_list = path_list
self._path_dict = path_dict
self._fin = fin
def print_props(self):
"""
Display the properties. If an empty path list is given to
the constructor, display the list created from the VCD file.
"""
print("vcd_file = '" + self._vcd_file + "'")
print("json_file = '" + self._json_file + "'")
print("path_list = [", end='')
for i, path in enumerate(self._path_list):
if i != 0:
print(" ", end='')
print("'" + path + "'", end='')
if i != len(self._path_list)-1:
print(",")
else:
print("]")
print("wave_chunk = " + str(self._wave_chunk))
print("start_time = " + str(self._start_time))
print("end_time = " + str(self._end_time))
return 0
def wave_format(self, signal_path, fmt):
"""
Set the display format of the multi-bit signal. <fmt> is
one of the following characters. The default is 'x'.
'b' - Binary.
'd' - Signed decimal.
'u' - Unsigned decimal.
'x' - Hexa-decimal, lowercase is used.
'X' - Hexa-decimal, uppercase is used.
"""
if fmt not in ('b', 'd', 'u', 'x', 'X'):
raise ValueError('"{0}": Invalid format character.'.format(fmt))
self._path_dict[signal_path]._fmt = fmt
return 0
def execute(self):
"""Perform signal sampling and JSON generation."""
fin = self._fin
path_list = self._path_list
path_dict = self._path_dict
wave_chunk = self._wave_chunk
start_time = self._start_time
end_time = self._end_time
sampler = _SignalSampler(wave_chunk, start_time, end_time)
jsongen = _JsonGenerator(path_list, path_dict, wave_chunk)
clock_id = path_dict[path_list[0]]._sid
id_list = [path_dict[path]._sid for path in path_list]
value_dict = {sid: 'x' for sid in id_list}
sample_dict = {sid: [] for sid in id_list}
if self._json_file == '':
fout = sys.stdout
else:
self.print_props()
print()
print('Create WaveJSON file "{0}".'.format(self._json_file))
fout = open(self._json_file, 'wt')
fout.write(jsongen.create_header())
while True:
origin = sampler.run(fin, clock_id, value_dict, sample_dict)
if len(sample_dict[clock_id]) == 0:
break
fout.write(",\n");
fout.write(jsongen.create_body(origin, sample_dict))
fout.write(jsongen.create_footer())
fin.close()
fout.close()
return 0
class _SignalSampler():
def __init__(self, wave_chunk, start_time, end_time):
self._wave_chunk = wave_chunk
self._start_time = start_time
self._end_time = end_time
self._now = 0
def run(self, fin, clock_id, value_dict, sample_dict):
origin = self._now
clock_prev = value_dict[clock_id]
for sid in sample_dict:
del sample_dict[sid][:]
data_count = 0
while True:
if self._end_time != 0 and self._end_time < int(self._now):
return origin
line = fin.readline()
if not line:
return origin
words = line.split()
if not words:
continue
char = words[0][0]
if char == '$':
continue
if char in ('0', '1', 'x', 'z'):
sid = words[0][1:]
if sid in value_dict:
value_dict[sid] = char
continue
if char == 'b':
sid = words[1]
if sid in value_dict:
value_dict[sid] = words[0][1:]
continue
if char == '#':
next_now = words[0][1:]
clock = value_dict[clock_id]
if clock_prev == '0' and clock == '1':
if data_count == 0:
origin = self._now
elif self._start_time <= int(origin) and \
clock_prev == '1' and clock == '0':
for sid in sample_dict:
sample_dict[sid].append(value_dict[sid])
data_count += 1
if data_count == self._wave_chunk:
self._now = next_now
return origin
self._now = next_now
clock_prev = clock
continue
raise ValueError('"{0}": Unexpected character.'.format(char))
class _JsonGenerator():
def __init__(self, path_list, path_dict, wave_chunk):
self._path_list = path_list
self._path_dict = path_dict
self._wave_chunk = wave_chunk
self._clock_name = path_dict[path_list[0]]._name
self._name_width = max([len(path_dict[path]._name)
for path in path_list])
def create_header(self):
name = "\"{0}\"".format(self._clock_name).ljust(self._name_width + 2)
wave = "\"{0}\"".format('p' + '.' * (self._wave_chunk - 1))
json = ""
json += "{ \"head\": {\"tock\":1},\n"
json += " \"signal\": [\n"
json += " { \"name\": "+name+", \"wave\": "+wave+" }"
return json
def create_body(self, origin, sample_dict):
def create_wave(samples):
prev = None
wave = ""
for value in samples:
if value == prev:
wave += '.'
else:
wave += value
prev = value
return "\""+wave+"\""
def create_wave_data(samples, length, fmt):
prev = None
wave = ""
data = ""
for value in samples:
if value == prev:
wave += '.'
elif all([c == '0' or c == '1' for c in value]):
wave += '='
data += ' ' + data_format(value, length, fmt)
elif all([c == 'z' for c in value]):
wave += 'z'
else:
wave += 'x'
prev = value
return "\""+wave+"\"", "\""+data[1:]+"\""
def data_format(value, length, fmt):
value = int(value, 2)
if fmt == 'b':
fmt = '0' + str(length) + 'b'
elif fmt == 'd':
if value >= 2**(length-1):
value -= 2**length
elif fmt == 'u':
fmt = 'd'
elif fmt == 'X':
fmt = '0' + str((length+3)//4) + 'X'
else:
fmt = '0' + str((length+3)//4) + 'x'
return format(value, fmt)
group = "\"{0}\"".format(origin)
json = ""
json += " {},\n"
json += " ["+group+",\n"
for i, path in enumerate(self._path_list[1:]):
name = self._path_dict[path]._name
sid = self._path_dict[path]._sid
length = self._path_dict[path]._length
if length == 1:
name = "\"{0}\"".format(name).ljust(self._name_width + 2)
wave = create_wave(sample_dict[sid])
json += " { \"name\": "+name+", \"wave\": "+wave+" }"
else:
fmt = self._path_dict[path]._fmt
name = "\"{0}\"".format(name).ljust(self._name_width + 2)
wave, data = create_wave_data(sample_dict[sid], length, fmt)
json += " { \"name\": "+name+", \"wave\": "+wave+\
", \"data\": "+data+" }"
if i != len(self._path_list)-2:
json += ",\n"
else:
json += "\n"
json += " ]"
return json
def create_footer(self):
json = "\n"
json += " ]\n"
json += "}\n"
return json
| 7,002 | 3,882 | 280 |
c141fd6371c72cd5ede68d8b52c9dc5b2a9703e2 | 3,532 | py | Python | agent.py | lmbaeza/Laberinto-SI | c86b459f13c9d9a58a64e17fcf228fe486755df7 | [
"MIT"
] | null | null | null | agent.py | lmbaeza/Laberinto-SI | c86b459f13c9d9a58a64e17fcf228fe486755df7 | [
"MIT"
] | null | null | null | agent.py | lmbaeza/Laberinto-SI | c86b459f13c9d9a58a64e17fcf228fe486755df7 | [
"MIT"
] | null | null | null | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from screenshot import screen_component_by_id
from image_to_asciify import map_to_ascii
from image_map_processing import run_map_processing
from get_path import get_path
| 36.040816 | 110 | 0.583522 | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from screenshot import screen_component_by_id
from image_to_asciify import map_to_ascii
from image_map_processing import run_map_processing
from get_path import get_path
class Agent:
def __init__(self, driver, level):
self.driver = driver
# Ruta que debe tomar el Bot compuesta por caracteres {'L', 'R', 'D', 'U'}
self.path = ''
self.level = level
self.MAP_FILE_NAME = 'img/map-' + str(level) + '.png'
self.SERIALIZED_MAP_PATH = 'img/serialized-map.png'
self.MAP_ASCII = 'ascii/map.txt'
# Lista de Direcciones
self.DIRECTIONS = [Keys.UP, Keys.DOWN, Keys.LEFT, Keys.RIGHT]
def percibir(self):
screen_component_by_id(driver=self.driver, id_name="animation_container", filename=self.MAP_FILE_NAME)
run_map_processing(level=self.level)
def pensar(self):
self.path = get_path(level=self.level)
print("path:", self.path)
def actuar(self):
# milliseconds = 0.084
milliseconds = 0.0
eps_up = 0.0
eps_down = 0.0
eps_left = 0.0
eps_right = 0.0
if self.level == 1:
milliseconds = 0.18
eps_right = 0.04
eps_left = 0.04
eps_up = 0.0
eps_down = 0.0
elif self.level == 2:
milliseconds = 0.18
eps_right = 0.026
eps_left = 0.026
eps_up = 0.0
eps_down = 0.0
elif self.level == 3:
milliseconds = 0.18
eps_right = 0.03
eps_left = 0.03
eps_up = -0.04
eps_down = -0.04
for direction in self.path:
time.sleep(0.4)
if direction == 'U':
print("Press UP")
# Selecionar Tecla
ActionChains(self.driver).key_down(self.DIRECTIONS[0]).perform()
time.sleep(milliseconds-eps_up)
# Parar Seleción
ActionChains(self.driver).key_up(self.DIRECTIONS[0]).perform()
elif direction == 'D':
print("Press DOWN")
# Selecionar Tecla
ActionChains(self.driver).key_down(self.DIRECTIONS[1]).perform()
time.sleep(milliseconds-eps_down)
# Parar Seleción
ActionChains(self.driver).key_up(self.DIRECTIONS[1]).perform()
elif direction == 'L':
print("Press LEFT")
# Selecionar Tecla
ActionChains(self.driver).key_down(self.DIRECTIONS[2]).perform()
time.sleep(milliseconds-eps_left)
# Parar Seleción
ActionChains(self.driver).key_up(self.DIRECTIONS[2]).perform()
elif direction == 'R':
print("Press RIGHT")
# Selecionar Tecla
ActionChains(self.driver).key_down(self.DIRECTIONS[3]).perform()
time.sleep(milliseconds-eps_right)
# Parar Seleción
ActionChains(self.driver).key_up(self.DIRECTIONS[3]).perform()
def close(self):
self.driver.close()
| 2,818 | -9 | 174 |
5ad1a42fbcf99a0df17e4be175d2b9c068c6de4d | 593 | py | Python | fish_core/scrapy/run_crawler.py | SylvanasSun/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
] | 60 | 2018-03-09T07:06:10.000Z | 2021-11-18T15:53:04.000Z | fish_core/scrapy/run_crawler.py | qiubaiying/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
] | 1 | 2018-04-03T11:05:54.000Z | 2018-04-03T20:06:41.000Z | fish_core/scrapy/run_crawler.py | qiubaiying/FishFishJump | 696212d242d8d572f3f1b43925f3d8ab8acc6a2d | [
"MIT"
] | 8 | 2018-03-12T03:07:00.000Z | 2021-06-11T05:16:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sched
import time
"""
Perform crawling tasks on a regular basis,
this module default starts crawler 'fish_simple_crawler' on the everyday.
"""
scheduler = sched.scheduler(time.time, time.sleep)
if __name__ == '__main__':
scheduler.enter(0, 0, crawl_sched, ('fish_simple_crawler', 86400,))
scheduler.run()
| 21.962963 | 76 | 0.716695 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sched
import time
"""
Perform crawling tasks on a regular basis,
this module default starts crawler 'fish_simple_crawler' on the everyday.
"""
scheduler = sched.scheduler(time.time, time.sleep)
def crawl_tasks(spider_name):
os.system('scrapy crawl %s' % spider_name)
def crawl_sched(spider_name, interval):
scheduler.enter(interval, 0, crawl_sched, (interval,))
crawl_tasks(spider_name)
if __name__ == '__main__':
scheduler.enter(0, 0, crawl_sched, ('fish_simple_crawler', 86400,))
scheduler.run()
| 161 | 0 | 46 |
3c8a8feacb5678a703a405eb0fbe5a06f7f05dc1 | 437 | py | Python | model/roi_layers/nms.py | ZhangHanbo/Visual-Manipulation-Relationship-Network-Pytorch | 9dd24947db318f6e404918d4758f1d824eea3748 | [
"MIT"
] | 26 | 2019-10-31T08:21:46.000Z | 2022-03-11T13:58:43.000Z | model/roi_layers/nms.py | moli1026/regrad | f66c38c00405b22cb746cc3f5c38d2b49f77d854 | [
"MIT"
] | 12 | 2019-11-07T09:12:50.000Z | 2022-03-12T02:58:18.000Z | model/roi_layers/nms.py | moli1026/regrad | f66c38c00405b22cb746cc3f5c38d2b49f77d854 | [
"MIT"
] | 11 | 2019-10-30T08:44:47.000Z | 2022-03-11T13:58:48.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# from ._utils import _C
import torch
if torch.__version__.split(".")[0] == "1":
from torchvision.ops import nms
elif torch.__version__ == "0.4.0":
from model.nms.nms_wrapper import nms
else:
raise RuntimeError("unsupported torch version. Supported: 0.4.0 (recommended) and 1.x")
# nms.__doc__ = """
# This function performs Non-maximum suppresion"""
| 31.214286 | 91 | 0.713959 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# from ._utils import _C
import torch
if torch.__version__.split(".")[0] == "1":
from torchvision.ops import nms
elif torch.__version__ == "0.4.0":
from model.nms.nms_wrapper import nms
else:
raise RuntimeError("unsupported torch version. Supported: 0.4.0 (recommended) and 1.x")
# nms.__doc__ = """
# This function performs Non-maximum suppresion"""
| 0 | 0 | 0 |
2a873e86730c2b7e035acf29e0e6c9308282c3c7 | 1,834 | py | Python | motion_sensor.py | joelghill/catcam | 1b95b23d48bcaf42a028a90f728c0609b2ef9f79 | [
"MIT"
] | 1 | 2018-05-09T06:51:49.000Z | 2018-05-09T06:51:49.000Z | motion_sensor.py | joelghill/catcam | 1b95b23d48bcaf42a028a90f728c0609b2ef9f79 | [
"MIT"
] | null | null | null | motion_sensor.py | joelghill/catcam | 1b95b23d48bcaf42a028a90f728c0609b2ef9f79 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
#monitor = SonicDistanceMonitor(print_distance)
#monitor.start(0.2)
| 26.970588 | 78 | 0.605234 | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
class MotionDetector() :
_input_pin = 11
_is_running = False
_on_motion_detected = None
def __init__(self, callback, input_pin=11) :
"""
Initializes a new instance of the SonicDistance class
tigger is the GPIO pin connected to the trigger sensor pin
echo is the GPIO pin connected to the echo sensor pin
"""
self._input_pin = input_pin
self._on_motion_detected = callback
def start(self, wait=0.5) :
"""
Begins monitoring for distance changes
offset - The amount of change in distance before callback is activated
wait - wait time in seconds before checking distance changes
"""
self._prepare()
self._is_running = True
print('Detecting motion...')
while self._is_running == True :
GPIO.wait_for_edge(self._input_pin, GPIO.RISING)
self._on_motion_detected()
def stop(self):
"""
Stops monitoring for distance. Cleans up GPIO
"""
self._is_running = false
GPIO.cleanup()
def _prepare(self):
"""
Prepares this instance for using the GPIO board
to interact with HC-SR04 distance sensor
"""
try:
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self._input_pin, GPIO.IN, pull_up_down = GPIO.PUD_UP)
print "Waiting for sensor to settle"
time.sleep(5)
print("ready to detect motion")
except Exception as e:
print("prepare call failed: " + str(e))
self.print_config()
raise
def print_config(self):
print("Input Pin: " + str(self._input_pin))
#monitor = SonicDistanceMonitor(print_distance)
#monitor.start(0.2)
| 54 | 1,633 | 23 |
d7fa59b9d3ca261fe2244e4fe4242ca509271090 | 682 | py | Python | kfusiontables/kft/sync.py | kula1922/kfusiontables | 149ddaddb95319a237bb94525db17b1b3a5ac66f | [
"BSD-3-Clause"
] | 4 | 2016-04-10T10:27:36.000Z | 2018-10-12T13:45:25.000Z | kfusiontables/kft/sync.py | kula1922/kfusiontables | 149ddaddb95319a237bb94525db17b1b3a5ac66f | [
"BSD-3-Clause"
] | 2 | 2020-06-05T17:30:32.000Z | 2021-06-01T21:52:49.000Z | kfusiontables/kft/sync.py | kula1922/kfusiontables | 149ddaddb95319a237bb94525db17b1b3a5ac66f | [
"BSD-3-Clause"
] | null | null | null | import logging
from kfusiontables.kft import KFusionTables
logger = logging.getLogger(__name__)
| 27.28 | 73 | 0.617302 | import logging
from kfusiontables.kft import KFusionTables
logger = logging.getLogger(__name__)
class KFusionTablesSync(KFusionTables):
def sync_tables(self, table_name=None, table_names=None, sender=None,
senders=None, _all=None, force=None):
"""
Synchronize local tables to google fusiontables.
"""
pass
def sync_rows(self, table_id=None, table_ids=None,
table_name=None, table_names=None, sender=None,
senders=None, row_id=None, row_ids=None,
_all=None, force=None):
"""
Synchronize local rows to google fusiontables.
"""
pass
| 0 | 559 | 23 |
92f7c60c2ca087b46a3cac5a9312bc2c42f94484 | 11,399 | py | Python | pyqode/core/_forms/search_panel_ui.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | pyqode/core/_forms/search_panel_ui.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | pyqode/core/_forms/search_panel_ui.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/colin/dev/pyQode/pyqode.core/forms/search_panel.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from qtpy import QtCore, QtGui, QtWidgets
from pyqode.core.widgets import PromptLineEdit
from . import pyqode_core_rc | 58.45641 | 115 | 0.737258 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/colin/dev/pyQode/pyqode.core/forms/search_panel.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from qtpy import QtCore, QtGui, QtWidgets
class Ui_SearchPanel(object):
def setupUi(self, SearchPanel):
SearchPanel.setObjectName("SearchPanel")
SearchPanel.resize(884, 90)
SearchPanel.setStyleSheet("")
self.verticalLayout = QtWidgets.QVBoxLayout(SearchPanel)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QtWidgets.QFrame(SearchPanel)
self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_2.setContentsMargins(9, 9, 9, 9)
self.verticalLayout_2.setSpacing(9)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widgetSearch = QtWidgets.QWidget(self.frame)
self.widgetSearch.setObjectName("widgetSearch")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widgetSearch)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.labelSearch = QtWidgets.QLabel(self.widgetSearch)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelSearch.sizePolicy().hasHeightForWidth())
self.labelSearch.setSizePolicy(sizePolicy)
self.labelSearch.setMinimumSize(QtCore.QSize(0, 0))
self.labelSearch.setMaximumSize(QtCore.QSize(18, 18))
self.labelSearch.setText("")
self.labelSearch.setPixmap(QtGui.QPixmap(":/pycode-icons/rc/edit-find.png"))
self.labelSearch.setScaledContents(True)
self.labelSearch.setObjectName("labelSearch")
self.horizontalLayout.addWidget(self.labelSearch)
self.lineEditSearch = PromptLineEdit(self.widgetSearch)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditSearch.sizePolicy().hasHeightForWidth())
self.lineEditSearch.setSizePolicy(sizePolicy)
self.lineEditSearch.setMinimumSize(QtCore.QSize(200, 0))
self.lineEditSearch.setObjectName("lineEditSearch")
self.horizontalLayout.addWidget(self.lineEditSearch)
self.toolButtonPrevious = QtWidgets.QToolButton(self.widgetSearch)
self.toolButtonPrevious.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/pyqode_icons/rc/go-up.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButtonPrevious.setIcon(icon)
self.toolButtonPrevious.setObjectName("toolButtonPrevious")
self.horizontalLayout.addWidget(self.toolButtonPrevious)
self.toolButtonNext = QtWidgets.QToolButton(self.widgetSearch)
self.toolButtonNext.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/pycode-icons/rc/go-down.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButtonNext.setIcon(icon1)
self.toolButtonNext.setObjectName("toolButtonNext")
self.horizontalLayout.addWidget(self.toolButtonNext)
self.checkBoxRegex = QtWidgets.QCheckBox(self.widgetSearch)
self.checkBoxRegex.setObjectName("checkBoxRegex")
self.horizontalLayout.addWidget(self.checkBoxRegex)
self.checkBoxCase = QtWidgets.QCheckBox(self.widgetSearch)
self.checkBoxCase.setStyleSheet("")
self.checkBoxCase.setObjectName("checkBoxCase")
self.horizontalLayout.addWidget(self.checkBoxCase)
self.checkBoxWholeWords = QtWidgets.QCheckBox(self.widgetSearch)
self.checkBoxWholeWords.setObjectName("checkBoxWholeWords")
self.horizontalLayout.addWidget(self.checkBoxWholeWords)
self.checkBoxInSelection = QtWidgets.QCheckBox(self.widgetSearch)
self.checkBoxInSelection.setObjectName("checkBoxInSelection")
self.horizontalLayout.addWidget(self.checkBoxInSelection)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.labelMatches = QtWidgets.QLabel(self.widgetSearch)
self.labelMatches.setObjectName("labelMatches")
self.horizontalLayout.addWidget(self.labelMatches)
self.toolButtonClose = QtWidgets.QToolButton(self.widgetSearch)
self.toolButtonClose.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/pycode-icons/rc/close.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButtonClose.setIcon(icon2)
self.toolButtonClose.setObjectName("toolButtonClose")
self.horizontalLayout.addWidget(self.toolButtonClose)
self.verticalLayout_2.addWidget(self.widgetSearch)
self.widgetReplace = QtWidgets.QWidget(self.frame)
self.widgetReplace.setObjectName("widgetReplace")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widgetReplace)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.labelReplace = QtWidgets.QLabel(self.widgetReplace)
self.labelReplace.setMaximumSize(QtCore.QSize(18, 18))
self.labelReplace.setText("")
self.labelReplace.setPixmap(QtGui.QPixmap(":/pycode-icons/rc/edit-find-replace.png"))
self.labelReplace.setScaledContents(True)
self.labelReplace.setObjectName("labelReplace")
self.horizontalLayout_2.addWidget(self.labelReplace)
self.lineEditReplace = PromptLineEdit(self.widgetReplace)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditReplace.sizePolicy().hasHeightForWidth())
self.lineEditReplace.setSizePolicy(sizePolicy)
self.lineEditReplace.setMinimumSize(QtCore.QSize(200, 0))
self.lineEditReplace.setObjectName("lineEditReplace")
self.horizontalLayout_2.addWidget(self.lineEditReplace)
self.toolButtonReplace = QtWidgets.QToolButton(self.widgetReplace)
self.toolButtonReplace.setObjectName("toolButtonReplace")
self.horizontalLayout_2.addWidget(self.toolButtonReplace)
self.toolButtonReplaceAll = QtWidgets.QToolButton(self.widgetReplace)
self.toolButtonReplaceAll.setObjectName("toolButtonReplaceAll")
self.horizontalLayout_2.addWidget(self.toolButtonReplaceAll)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.lineEditReplace.raise_()
self.toolButtonReplace.raise_()
self.toolButtonReplaceAll.raise_()
self.labelReplace.raise_()
self.verticalLayout_2.addWidget(self.widgetReplace)
self.verticalLayout.addWidget(self.frame)
self.actionSearch = QtWidgets.QAction(SearchPanel)
icon = QtGui.QIcon.fromTheme("edit-find")
self.actionSearch.setIcon(icon)
self.actionSearch.setIconVisibleInMenu(True)
self.actionSearch.setObjectName("actionSearch")
self.actionActionSearchAndReplace = QtWidgets.QAction(SearchPanel)
icon = QtGui.QIcon.fromTheme("edit-find-replace")
self.actionActionSearchAndReplace.setIcon(icon)
self.actionActionSearchAndReplace.setIconVisibleInMenu(True)
self.actionActionSearchAndReplace.setObjectName("actionActionSearchAndReplace")
self.actionFindNext = QtWidgets.QAction(SearchPanel)
icon = QtGui.QIcon.fromTheme("go-down")
self.actionFindNext.setIcon(icon)
self.actionFindNext.setIconVisibleInMenu(True)
self.actionFindNext.setObjectName("actionFindNext")
self.actionFindPrevious = QtWidgets.QAction(SearchPanel)
icon = QtGui.QIcon.fromTheme("go-up")
self.actionFindPrevious.setIcon(icon)
self.actionFindPrevious.setIconVisibleInMenu(True)
self.actionFindPrevious.setObjectName("actionFindPrevious")
self.retranslateUi(SearchPanel)
QtCore.QMetaObject.connectSlotsByName(SearchPanel)
SearchPanel.setTabOrder(self.lineEditSearch, self.lineEditReplace)
SearchPanel.setTabOrder(self.lineEditReplace, self.toolButtonPrevious)
SearchPanel.setTabOrder(self.toolButtonPrevious, self.toolButtonNext)
SearchPanel.setTabOrder(self.toolButtonNext, self.checkBoxCase)
SearchPanel.setTabOrder(self.checkBoxCase, self.checkBoxWholeWords)
SearchPanel.setTabOrder(self.checkBoxWholeWords, self.toolButtonReplace)
SearchPanel.setTabOrder(self.toolButtonReplace, self.toolButtonReplaceAll)
SearchPanel.setTabOrder(self.toolButtonReplaceAll, self.toolButtonClose)
def retranslateUi(self, SearchPanel):
SearchPanel.setWindowTitle(_("Form"))
self.lineEditSearch.setToolTip(_("Search term"))
self.toolButtonPrevious.setToolTip(_("Select previous occurence"))
self.toolButtonNext.setToolTip(_("Select next occurence"))
self.checkBoxRegex.setToolTip(_("Use a regular expression for search occurences"))
self.checkBoxRegex.setText(_("Regex"))
self.checkBoxCase.setToolTip(_("Enable case sensitive search"))
self.checkBoxCase.setText(_("Match case"))
self.checkBoxWholeWords.setToolTip(_("Search for whole words only"))
self.checkBoxWholeWords.setText(_("Whole words"))
self.checkBoxInSelection.setText(_("In Selection"))
self.labelMatches.setText(_("0 matches"))
self.lineEditReplace.setToolTip(_("Replacement text"))
self.toolButtonReplace.setToolTip(_("Replace current occurence"))
self.toolButtonReplace.setText(_("Replace"))
self.toolButtonReplaceAll.setToolTip(_("Replace all occurences"))
self.toolButtonReplaceAll.setText(_("Replace All"))
self.actionSearch.setText(_("Search"))
self.actionSearch.setToolTip(_("Show the search panel"))
self.actionSearch.setShortcut(_("Ctrl+F"))
self.actionActionSearchAndReplace.setText(_("Search and replace"))
self.actionActionSearchAndReplace.setToolTip(_("Show the search and replace panel"))
self.actionActionSearchAndReplace.setShortcut(_("Ctrl+R"))
self.actionFindNext.setText(_("Find next"))
self.actionFindNext.setToolTip(_("Find the next occurrence (downward)"))
self.actionFindNext.setShortcut(_("F3"))
self.actionFindPrevious.setText(_("Find previous"))
self.actionFindPrevious.setToolTip(_("Find previous occurrence (upward)"))
self.actionFindPrevious.setShortcut(_("Shift+F3"))
from pyqode.core.widgets import PromptLineEdit
from . import pyqode_core_rc | 10,956 | 8 | 76 |
b59b2caf90924f8b1f174e18235105b36f87f29b | 4,614 | py | Python | main.py | kodo-pp/hse-ws10 | 9dbad128d2cbaa65a7d7ae4418f3a03736df0211 | [
"Apache-2.0"
] | null | null | null | main.py | kodo-pp/hse-ws10 | 9dbad128d2cbaa65a7d7ae4418f3a03736df0211 | [
"Apache-2.0"
] | null | null | null | main.py | kodo-pp/hse-ws10 | 9dbad128d2cbaa65a7d7ae4418f3a03736df0211 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import json
import re
import sys
import time
from argparse import ArgumentParser
from queue import Queue
import requests as rq
from bs4 import BeautifulSoup
from loguru import logger
if __name__ == '__main__':
main()
| 26.982456 | 108 | 0.596012 | #!/usr/bin/env python
import json
import re
import sys
import time
from argparse import ArgumentParser
from queue import Queue
import requests as rq
from bs4 import BeautifulSoup
from loguru import logger
def parse_arguments():
ap = ArgumentParser(
description = 'Download an HTML page, find HTTPS links in it and save them into a JSON file',
)
ap.add_argument(
'--url',
'-u',
type = str,
help = 'The URL from which to download the HTML page',
required = True,
)
ap.add_argument(
'--output',
'-o',
type = str,
help = 'The name of the output file',
required = True,
)
ap.add_argument(
'--delay',
'-d',
type = float,
help = 'Delay before downloads (real number; defaults to 0)',
default = 0.0,
)
ap.add_argument(
'--max-iterations',
'-m',
type = int,
help = 'Max iterations',
default = 1000,
)
return ap.parse_args()
def download_html_page(url):
try:
response = rq.get(url)
except Exception as e:
# Error message provided by requests is too long and technical, so we'll just use a general message
#raise Exception(f'Unable to download the page: {e}') from e
raise Exception(f'Unable to download the web page') from e
if response.status_code != 200:
raise Exception(f'Server returned {response.status_code}')
return response.content
def find_text(element):
if isinstance(element, str):
return [element]
else:
return element.find_all(text=True)
def concat_lists(list_of_lists):
return sum(list_of_lists, [])
def find_links(html):
try:
bs = BeautifulSoup(markup=html, features='html.parser')
except Exception as e:
raise Exception(f'Unable to parse HTML: {e}') from e
for element in bs.find_all('a'):
try:
href = element['href']
except KeyError as e:
# Skip the link if it doesn't have a `href` attribute
continue
children = list(element.children)
text_elements = concat_lists(find_text(child) for child in children)
text = ' '.join(text_elements)
yield (text, href)
def is_internal(url):
return url.startswith('/wiki/')
def write_json(data, filename):
try:
with open(filename, 'w') as file:
json.dump(data, file)
except Exception as e:
raise Exception(f'Failed to write file: {e}') from e
def make_absolute(url, lang):
return f'https://{lang}.wikipedia.org' + url
def recursive_download_and_parse(url, lang, iteration_limit=1000, delay=0):
# Not actually recursive because downloading is DFS (Depth First Search) manner with the limit on
# the number on iterations instead of the recursion depth doesn't make much sense. Instead, the BFS-like
# algorithm is used
#
# Yields:
# Pairs of (text, href) of internal links
task_queue = Queue(iteration_limit + 10)
task_queue.put(url)
iterations = 0
while not task_queue.empty() and iterations < iteration_limit:
current_url = task_queue.get_nowait()
iterations += 1
logger.info('Iteration {}: get {}', iterations, current_url)
try:
html = download_html_page(current_url)
internal_links = (
(text, make_absolute(href, lang=lang))
for text, href in find_links(html)
if is_internal(href)
)
for text, href in internal_links:
yield text, href
if not task_queue.full():
task_queue.put_nowait(href)
time.sleep(delay)
except Exception as e:
logger.error(e)
continue
def parse_url(url):
return re.match(r'https?://([a-z]+)[.]wikipedia.org/', url)
def main():
try:
config = parse_arguments()
parsed_url = parse_url(config.url)
if parsed_url is None:
raise Exception('The program can only work with wikipedia urls: http(s)://<lang>.wikipedia.org')
lang = parsed_url.group(1)
result = dict(
recursive_download_and_parse(
config.url,
iteration_limit = config.max_iterations,
delay = config.delay,
lang = lang,
)
)
write_json(result, config.output)
except Exception as e:
logger.error('Error: {}', e)
sys.exit(1)
if __name__ == '__main__':
main()
| 4,103 | 0 | 253 |
2732bd4889b921a247d93555b57cc7fd26b0b357 | 24,762 | py | Python | load_data.py | vincekurtz/gracenet | 787ed3c559cd540bbbb53380d5b21879857fe254 | [
"MIT"
] | null | null | null | load_data.py | vincekurtz/gracenet | 787ed3c559cd540bbbb53380d5b21879857fe254 | [
"MIT"
] | null | null | null | load_data.py | vincekurtz/gracenet | 787ed3c559cd540bbbb53380d5b21879857fe254 | [
"MIT"
] | 1 | 2018-09-19T06:43:19.000Z | 2018-09-19T06:43:19.000Z | #!/usr/bin/env python3
##
#
# GraceNET v0.0
#
# Predict future anomolies based soley on the past 24 months of
# GRACE anomolies. This file generates training and testing data
# saving both to json files
#
##
import random
import csv
import glob
import json
import datetime
import numpy as np
import matplotlib.pyplot as plt
import time
import re
data_dir = "/home/vince/Groundwater/NeuralNet/data/"
grace_data = None
irrigation_data = None
population_data = None
precipitation_data = None
temperature_data = None
vegetation_data = None
def load_all_data():
"""
Load data from files as global variables.
Note that this requires a significant amount (~4.5GB)
of RAM.
"""
global grace_data
global irrigation_data
global population_data
global precipitation_data
global temperature_data
global vegetation_data
print("===> Loading GRACE data to memory")
grace_data = get_data_dict('grace/GRC*', 'grace')
#print("===> Loading IRRIGATION data to memory")
#irrigation_data = get_data_dict('irrigation/irrigation*', 'irr')
#print("===> Loading POPULATION data to memory")
#population_data = get_data_dict('population/population*', 'pop')
print("===> Loading PRECIPITATION data to memory")
precipitation_data = get_data_dict('precipitation/precipitation*', 'precip')
print("===> Loading TEMPERATURE data to memory")
temperature_data = get_data_dict('temperature/MOD11C3_LST*', 'temp')
print("===> Loading VEGETATION data to memory")
vegetation_data = get_data_dict('vegetation/MOD13C2_EVI_*', 'veg')
def get_regional_data():
"""
Get training/testing data from plaintext files.
Only use data from the conententla US (ish)
Return X, y, where y is the GRACE anomoly and
X is the data we'll use to derive the anomoly.
"""
X = []
y = []
print("===> Getting valid pixels")
dates = valid_date_list()
pixels = valid_pixel_list(dates)
print("===> Generating dataset")
for date in dates:
anomolies = []
precips = [] # store precipitation, temperature, and vegetation data
temps = [] # we'll collapse these into 1d after we get all the pixels
vegs = []
for pixel in pixels:
lat = pixel[1]
lon = pixel[0]
# grace anomoly --> output
grace = grace_data[date][pixel]
# other varialbes --> input
precip = precipitation_data[date][pixel]
temp = temperature_data[date][pixel]
veg = vegetation_data[date][pixel]
# Add to the datasets!
anomolies.append(grace)
precips.append(precip)
precips.append(temp)
vegs.append(veg)
#print(len(anomolies))
#print(len(precips+temps+vegs)/3)
print(str(len(X)) + " datapoints")
print("input dimensions: " + str(len(X[0])))
return (X, y)
def valid_date_list():
"""
Return a list of dates that have data for grace, precipitation,
temperature, and vegetation.
"""
dates = []
for date in grace_data:
if (date in precipitation_data and date in temperature_data and date in vegetation_data):
dates.append(date)
return dates
def valid_pixel_list(date_list):
"""
Return a list of pixels in the contental US with precipitation, temperature,
vegetation, and grace data for all the given dates.
"""
possiblepixels = set()
badpixels = set()
# get all grace pixels in the continental US
for pixel in grace_data[(2002, 4)]:
lon = pixel[0]
lat = pixel[1]
inbounds = True #(lat > 26 and lat < 49 and lon > -125 and lon < -67)
if inbounds:
possiblepixels.add(pixel)
# now go back and filter out pixels that aren't in all the places
for date in date_list:
for pixel in possiblepixels:
in_all_sets = (pixel in grace_data[date])
if not in_all_sets:
badpixels.add(pixel)
valid_pixels = possiblepixels - badpixels # pixels that are in possible but not in bad
print(len(possiblepixels))
print(len(badpixels))
print(len(valid_pixels))
return list(valid_pixels)
def get_data():
"""
Get training/testing data from plaintext files.
Return X, y, where y is the GRACE slope and
X is the data we'll use to derive the GRACE data.
"""
X = []
y = []
max_n=200000
print("===> Generating dataset")
i = 0 # number of iterations
for date in grace_data:
for pixel in grace_data[(2004,1)]: # use a consistent list of pixels
lat = pixel[1]
lon = pixel[0]
# restrict to lower asia ish region
try:
# grace slope --> output
grace = get_trend(pixel, date, grace_data)
# other varialbes --> input
# include both trend and average (over ~ 2 yrs)
precip = get_trend(pixel, date, precipitation_data)
temp = get_trend(pixel, date, temperature_data)
veg = get_trend(pixel, date, vegetation_data)
precipavg = get_average(pixel, date, precipitation_data)
vegavg = get_average(pixel, date, vegetation_data)
tempavg = get_average(pixel, date, temperature_data)
if grace: # it's useless to include data without an output!
# add to the master arrays of data
X.append([precip, precipavg, temp, tempavg, veg, vegavg, lat])
y.append(grace)
except KeyError:
# sometimes we won't have enough corresponding data on some of the
# extra variables. We'll just ignore that pixel/date pair in that case.
pass
n = len(X)
print("Date %s / %s | Sample %s / %s " % (i, len(grace_data), n, max_n))
if n > max_n: # quit when we have enough samples
break
i+=1 # iteration counter
print(str(len(X)) + " datapoints")
print("input dimensions: " + str(len(X[0])))
return (X, y)
def double_data(x_row, y_row):
"""
Create and return an artificial dataset by adding
gaussian noise to the given real data.
"""
pass
def nearby_valid_date(desired_date, dictionary):
"""
Sometimes we get a date (year, month, day) that does not exactly exist in
another dictionary. We want to find a nearby date that does exist in that
dictionary, but is part of the same month.
"""
for valid_date in dictionary:
if (valid_date[0:2] == desired_date[0:2]): # matching year and month
return valid_date
def get_prev_entry(year, month, day):
"""
For a given month's data, we might like to find the month
that preceeds it. This function returns the year, month, and
day that correspond to that month's data.
"""
files = glob.glob(data_dir + "grace/GRCTellus.JPL*")
files.sort() # sorting alphabetically is enough b/c nice naming scheme!
this_name = data_dir + "grace/GRCTellus.JPL.%04d%02d%02d.LND.RL05_1.DSTvSCS1411.txt" % (year, month, day)
for i in range(len(files)):
if files[i] == this_name:
fname = files[i-1]
yyyymmdd = fname[-35:-27] # looking backwards from the end in case data_dir changes
y = yyyymmdd[0:4]
m = yyyymmdd[4:6]
d = yyyymmdd[6:8]
return (int(y), int(m), int(d))
return None
def get_data_dict(fpattern, fformat):
"""
Load the data from a given file pattern into a dictionary.
This dictionary will hold all the data for a given variable.
Example input for vegetation: fpattern="vegetation/MOD13C2_EVI*", fformat="veg"
The fformat is used to differentiate between different file naming conventions.
Returns: {
DATE: {PIXEL: DATA, PIXEL1: DATA1, ...},
DATE2: {PIXEL: DATA, PIXEL1: DATA1, ...},
}
"""
d = {} # the dictionary that will hold all of our data
files = glob.glob(data_dir + fpattern)
files.sort() # sorting alphabetically puts files in chronological order
# figure out the date from the filename.
# This will depend on the naming conventions of the files, which we learn
# from the fformat variable.
if fformat == 'veg':
# vegetation
elif fformat == 'temp':
# temperature
elif fformat == 'precip':
# precipitation
elif fformat == 'pop':
# population
elif fformat == 'irr':
# irritation
elif fformat == 'grace':
# grace anomoly data
else:
print("ERROR: unrecognized file format %s" % fformat)
return None
for fname in files:
date = get_date(fname) # (year, month, day)
data = get_pixel_data(fname) # {(lon, lat): val, ...}
# add an entry to the dictionary
d[date] = data
return d
def get_pixel_data(fname):
"""
Return a dictionary of pixel tuples (lon, lat) and measurents
for all lines in the given file. Assume that each file is
for a unique date, and that columns 0, 1, and 2 are lat, lon,
and measurement respectively.
"""
d = {}
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if row[0] != "HDR": # exclude header rows
lon = float(row[0])
lat = float(row[1])
meas = float(row[2])
d[(lon,lat)] = meas
return d
def get_veg_trend(pixel, year, month, day):
"""
Return the 2 year vegetation trend for a given pixel
and date. The trend should be over the N months before
the given date.
pixel should be a (lon, lat) touple.
year, month, and day should be strings.
"""
N = 24
day_of_year = datetime.datetime(int(year), int(month), int(day)).strftime("%j")
files = glob.glob(data_dir + "vegetation/MOD13C2_EVI*")
files.sort() # sorting alphabetically is enough b/c nice naming scheme!
# find the vegetation data closest to the requested date
testf = data_dir + "vegetation/MOD13C2_EVI_%s_%s_monthly.csv" % (year, day_of_year) # data for the day we'd really like
if (testf in files):
# this date is already exactly included!
startf = testf
else:
# we need to look back a bit to find the entry closest to but before
# the given date
lst = files + [testf]
lst.sort()
for i in range(len(lst)):
if lst[i] == testf:
startf = lst[i-1]
# get data files for the previous N months
fnames = []
for i in range(len(files)):
if files[i] == startf:
start = i
for j in range(N):
fnames.append(files[start-j])
# get data for this pixel from these previous months
lon = str(pixel[0])
lat = str(pixel[1])
evi = []
months = []
n = 0
for fname in fnames:
found = False
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat): # check for matching pixel
evi.append(float(row[2]))
found = True
if found:
months.append(n)
n+=1
if len(evi) < 10:
print("no EVI data avilible for this pixel")
return None
# now fit a linear regression of the form y = mx+b
x = np.array(months)
y = np.array(evi)
A = np.vstack([x, np.ones(len(x))]).T
slope, y_int = np.linalg.lstsq(A, y)[0]
return(slope)
def get_anomoly(pixel, year, month, day):
"""
Return the anomoly found in with the given specifications.
pixel should be a (lon, lat) touple.
year, month, and day should be strings.
"""
fname = data_dir + "grace/GRCTellus.JPL.%04d%02d%02d.LND.RL05_1.DSTvSCS1411.txt" % (year, month, day)
lon = str(pixel[0])
lat = str(pixel[1])
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat): # check for matching pixel
return float(row[2])
return None
def get_irrigation_level(pixel):
"""
Get the 2013 percent of land equipped for irrigation for
a given pixel.
"""
fname = data_dir + "irrigation/irrigation_pct_2013.csv"
lon = str(pixel[0])
lat = str(pixel[1])
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat):
print("irrigation found!")
return float(row[2])
# assume no data means the level is zero. This prevents excessive
# pruning of pixels, since irrigation data is so sparce.
return 0.0
def get_precip_trend(pixel, year, month, day):
"""
Return the precipitation trend for a given pixel
and date. The trend should be over the N months before
the given date.
pixel should be a (lon, lat) touple.
year, month, and day should be strings.
"""
N = 24
decidate = str(toYearFraction(datetime.datetime(int(year), int(month), int(day))))[0:8]
files = glob.glob(data_dir + "precipitation/precipitation_20*")
files.sort() # sorting alphabetically is enough b/c nice naming scheme!
# find the vegetation data closest to the requested date
testf = data_dir + "precipitation/precipitation_%s" % (decidate) # data for the day we'd really like
if (testf in files):
# this date is already exactly included!
startf = testf
else:
# we need to look back a bit to find the entry closest to but before
# the given date
lst = files + [testf]
lst.sort()
for i in range(len(lst)):
if lst[i] == testf:
startf = lst[i-1]
# get data files for the previous N months
fnames = []
for i in range(len(files)):
if files[i] == startf:
start = i
for j in range(N):
fnames.append(files[start-j])
# get data for this pixel from these previous months
lon = str(pixel[0])
lat = str(pixel[1])
precip_pct = []
months = []
n = 0
for fname in fnames:
found = False
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat): # check for matching pixel
precip_pct.append(float(row[2]))
found = True
if found:
months.append(n)
n+=1
if len(precip_pct) < 10:
print("no precipitation data avilible for this pixel")
return None
# now fit a linear regression of the form y = mx+b
x = np.array(months)
y = np.array(precip_pct)
A = np.vstack([x, np.ones(len(x))]).T
slope, y_int = np.linalg.lstsq(A, y)[0]
return(slope)
def get_trend(pixel, date, dataset):
"""
Return a N month trend in the given dataset.
"""
N = 24
vals = []
months = []
n = 0
bad_cnt = 0
# generate lists of month numbers and values
for i in range(N):
try:
vals.append(dataset[date][pixel])
months.append(n)
except KeyError:
bad_cnt += 1 # ignore when we can't get a value
n+=1
date = previous_month(date)
if bad_cnt > 15:
return 0 # ingore if there are too few datapoints
# now fit a linear regression
x = np.array(months)
y = np.array(vals)
A = np.vstack([x, np.ones(len(x))]).T
slope, y_int = np.linalg.lstsq(A, y)[0]
return(slope)
def get_average(pixel, date, dataset):
"""
Return an N month average in the given dataset.
"""
N = 24
vals = []
bad_cnt = 0
# generate lists of month numbers and values
for i in range(N):
try:
vals.append(dataset[date][pixel])
except KeyError:
bad_cnt += 1 # ignore when we can't get a value
date = previous_month(date)
if bad_cnt > 15:
return 0 # ingore if there are too few datapoints
avg = np.average(vals)
return avg
def previous_month(date):
"""
Return the previous month for a given date
"""
year = date[0]
month = date[1]
new_year = year
new_month = month - 1
if new_month == 0:
new_year -= 1
new_month = 12
return (new_year, new_month)
def get_temperature_trend(pixel, date):
"""
Return the 2 year tempearature trend for a given pixel
and date. The trend should be over the N months before
the given date.
pixel should be a (lon, lat) touple. date should be a
(year, month) touple
"""
N = 24
# get data for this pixel from these previous months
lon = str(pixel[0])
lat = str(pixel[1])
this_temp = temperature_data[date][pixel]
print(this_temp)
return
# now fit a linear regression of the form y = mx+b
x = np.array(months)
y = np.array(temp)
A = np.vstack([x, np.ones(len(x))]).T
slope, y_int = np.linalg.lstsq(A, y)[0]
return(slope)
def random_valid_pixel(pixel_list):
"""
Randomly select a pixel that will yield valid training data.
This means that the given pixel
1. Must exist for the given date
2. Must exist in the previous 24 months
3. Must not be in pixel_list
Return a tuple of pixel, year, month, day
"""
files = glob.glob(data_dir + "grace/GRCTellus.JPL*")
files.sort() # sorting alphabetically is enough b/c nice naming scheme!
files = files[24:] # remove the first 24 months since there won't be enough data before these
startfile = files[random.randint(0,len(files)-1)] # choose a random month
# choose a random pixel
with open(startfile) as f:
for i, l in enumerate(f):
pass
num_lines = i
header_lines = 22
pixel_line = random.randint(header_lines, num_lines)
# get the value of that pixel
with open(startfile) as f:
reader = csv.reader(f, delimiter=" ")
for i, row in enumerate(reader):
if (i == pixel_line):
pixel = (float(row[0]), float(row[1]))
# make sure the pixel isn't already in our list
if pixel in pixel_list:
# this pixel is already in our list
#print("pixel already chosen. picking a new one")
return random_valid_pixel(pixel_list)
yyyymmdd = startfile[-35:-27] # looking backwards from the end in case data_dir changes
year = int(yyyymmdd[0:4])
month = int(yyyymmdd[4:6])
day = int(yyyymmdd[6:8])
# make sure that pixel exists for the previous 24 months
y, m, d = (year, month, day)
for i in range(24):
y, m, d = get_prev_entry(y, m, d)
if not exists(pixel, y, m, d):
# one of the previous months doesn't have our given pixel
# So do we give up? No. We try again
#print("Found invalid pixel. Trying again")
return random_valid_pixel(pixel_list)
return (pixel, year, month, day)
def exists(pixel, year, month, day):
"""
Check if a given pixel for a given date exists.
Return true or false.
"""
fname = data_dir + "grace/GRCTellus.JPL.%04d%02d%02d.LND.RL05_1.DSTvSCS1411.txt" % (year, month, day)
lon = str(pixel[0])
lat = str(pixel[1])
try:
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat): # check for matching pixel
return True # found the pixel!
return False
except: # if the file can't be opened, it's probably a bad date
return False
def save_validation_data():
"""
Save grace and input data in a csv file.
format:
LON LAT GRACESLOPE PRECIP TEMP VEG PRECIPAVG TEMPAVG VEGAVG
"""
X = [] # input vars
y = [] # grace
with open('validation.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=' ')
writer.writerow(["HDR","long","lat","grace","precip","temp","veg","precipavg","tempavg","vegavg"])
date = (2016,1)
for pixel in grace_data[(2004,1)]: # use a consistent list of pixels
lat = pixel[1]
lon = pixel[0]
try:
# grace slope --> output
grace = get_trend(pixel, date, grace_data)
# other varialbes --> input
# include both trend and average (over ~ 2 yrs)
precip = get_trend(pixel, date, precipitation_data)
temp = get_trend(pixel, date, temperature_data)
veg = get_trend(pixel, date, vegetation_data)
precipavg = get_average(pixel, date, precipitation_data)
vegavg = get_average(pixel, date, vegetation_data)
tempavg = get_average(pixel, date, temperature_data)
writer.writerow([lon, lat, grace, precip, temp, veg, precipavg, tempavg, vegavg])
except KeyError:
# sometimes we won't have enough corresponding data on some of the
# extra variables. We'll just ignore that pixel/date pair in that case.
pass
if __name__=="__main__":
load_all_data() # do this first since many functions reference global vars
main()
| 32.368627 | 125 | 0.593288 | #!/usr/bin/env python3
##
#
# GraceNET v0.0
#
# Predict future anomolies based soley on the past 24 months of
# GRACE anomolies. This file generates training and testing data
# saving both to json files
#
##
import random
import csv
import glob
import json
import datetime
import numpy as np
import matplotlib.pyplot as plt
import time
import re
data_dir = "/home/vince/Groundwater/NeuralNet/data/"
grace_data = None
irrigation_data = None
population_data = None
precipitation_data = None
temperature_data = None
vegetation_data = None
def load_all_data():
"""
Load data from files as global variables.
Note that this requires a significant amount (~4.5GB)
of RAM.
"""
global grace_data
global irrigation_data
global population_data
global precipitation_data
global temperature_data
global vegetation_data
print("===> Loading GRACE data to memory")
grace_data = get_data_dict('grace/GRC*', 'grace')
#print("===> Loading IRRIGATION data to memory")
#irrigation_data = get_data_dict('irrigation/irrigation*', 'irr')
#print("===> Loading POPULATION data to memory")
#population_data = get_data_dict('population/population*', 'pop')
print("===> Loading PRECIPITATION data to memory")
precipitation_data = get_data_dict('precipitation/precipitation*', 'precip')
print("===> Loading TEMPERATURE data to memory")
temperature_data = get_data_dict('temperature/MOD11C3_LST*', 'temp')
print("===> Loading VEGETATION data to memory")
vegetation_data = get_data_dict('vegetation/MOD13C2_EVI_*', 'veg')
def get_regional_data():
"""
Get training/testing data from plaintext files.
Only use data from the conententla US (ish)
Return X, y, where y is the GRACE anomoly and
X is the data we'll use to derive the anomoly.
"""
X = []
y = []
print("===> Getting valid pixels")
dates = valid_date_list()
pixels = valid_pixel_list(dates)
print("===> Generating dataset")
for date in dates:
anomolies = []
precips = [] # store precipitation, temperature, and vegetation data
temps = [] # we'll collapse these into 1d after we get all the pixels
vegs = []
for pixel in pixels:
lat = pixel[1]
lon = pixel[0]
# grace anomoly --> output
grace = grace_data[date][pixel]
# other varialbes --> input
precip = precipitation_data[date][pixel]
temp = temperature_data[date][pixel]
veg = vegetation_data[date][pixel]
# Add to the datasets!
anomolies.append(grace)
precips.append(precip)
precips.append(temp)
vegs.append(veg)
#print(len(anomolies))
#print(len(precips+temps+vegs)/3)
print(str(len(X)) + " datapoints")
print("input dimensions: " + str(len(X[0])))
return (X, y)
def valid_date_list():
"""
Return a list of dates that have data for grace, precipitation,
temperature, and vegetation.
"""
dates = []
for date in grace_data:
if (date in precipitation_data and date in temperature_data and date in vegetation_data):
dates.append(date)
return dates
def valid_pixel_list(date_list):
"""
Return a list of pixels in the contental US with precipitation, temperature,
vegetation, and grace data for all the given dates.
"""
possiblepixels = set()
badpixels = set()
# get all grace pixels in the continental US
for pixel in grace_data[(2002, 4)]:
lon = pixel[0]
lat = pixel[1]
inbounds = True #(lat > 26 and lat < 49 and lon > -125 and lon < -67)
if inbounds:
possiblepixels.add(pixel)
# now go back and filter out pixels that aren't in all the places
for date in date_list:
for pixel in possiblepixels:
in_all_sets = (pixel in grace_data[date])
if not in_all_sets:
badpixels.add(pixel)
valid_pixels = possiblepixels - badpixels # pixels that are in possible but not in bad
print(len(possiblepixels))
print(len(badpixels))
print(len(valid_pixels))
return list(valid_pixels)
def get_data():
"""
Get training/testing data from plaintext files.
Return X, y, where y is the GRACE slope and
X is the data we'll use to derive the GRACE data.
"""
X = []
y = []
max_n=200000
print("===> Generating dataset")
i = 0 # number of iterations
for date in grace_data:
for pixel in grace_data[(2004,1)]: # use a consistent list of pixels
lat = pixel[1]
lon = pixel[0]
# restrict to lower asia ish region
try:
# grace slope --> output
grace = get_trend(pixel, date, grace_data)
# other varialbes --> input
# include both trend and average (over ~ 2 yrs)
precip = get_trend(pixel, date, precipitation_data)
temp = get_trend(pixel, date, temperature_data)
veg = get_trend(pixel, date, vegetation_data)
precipavg = get_average(pixel, date, precipitation_data)
vegavg = get_average(pixel, date, vegetation_data)
tempavg = get_average(pixel, date, temperature_data)
if grace: # it's useless to include data without an output!
# add to the master arrays of data
X.append([precip, precipavg, temp, tempavg, veg, vegavg, lat])
y.append(grace)
except KeyError:
# sometimes we won't have enough corresponding data on some of the
# extra variables. We'll just ignore that pixel/date pair in that case.
pass
n = len(X)
print("Date %s / %s | Sample %s / %s " % (i, len(grace_data), n, max_n))
if n > max_n: # quit when we have enough samples
break
i+=1 # iteration counter
print(str(len(X)) + " datapoints")
print("input dimensions: " + str(len(X[0])))
return (X, y)
def double_data(x_row, y_row):
"""
Create and return an artificial dataset by adding
gaussian noise to the given real data.
"""
pass
def nearby_valid_date(desired_date, dictionary):
"""
Sometimes we get a date (year, month, day) that does not exactly exist in
another dictionary. We want to find a nearby date that does exist in that
dictionary, but is part of the same month.
"""
for valid_date in dictionary:
if (valid_date[0:2] == desired_date[0:2]): # matching year and month
return valid_date
def get_prev_entry(year, month, day):
"""
For a given month's data, we might like to find the month
that preceeds it. This function returns the year, month, and
day that correspond to that month's data.
"""
files = glob.glob(data_dir + "grace/GRCTellus.JPL*")
files.sort() # sorting alphabetically is enough b/c nice naming scheme!
this_name = data_dir + "grace/GRCTellus.JPL.%04d%02d%02d.LND.RL05_1.DSTvSCS1411.txt" % (year, month, day)
for i in range(len(files)):
if files[i] == this_name:
fname = files[i-1]
yyyymmdd = fname[-35:-27] # looking backwards from the end in case data_dir changes
y = yyyymmdd[0:4]
m = yyyymmdd[4:6]
d = yyyymmdd[6:8]
return (int(y), int(m), int(d))
return None
def get_data_dict(fpattern, fformat):
"""
Load the data from a given file pattern into a dictionary.
This dictionary will hold all the data for a given variable.
Example input for vegetation: fpattern="vegetation/MOD13C2_EVI*", fformat="veg"
The fformat is used to differentiate between different file naming conventions.
Returns: {
DATE: {PIXEL: DATA, PIXEL1: DATA1, ...},
DATE2: {PIXEL: DATA, PIXEL1: DATA1, ...},
}
"""
d = {} # the dictionary that will hold all of our data
files = glob.glob(data_dir + fpattern)
files.sort() # sorting alphabetically puts files in chronological order
# figure out the date from the filename.
# This will depend on the naming conventions of the files, which we learn
# from the fformat variable.
if fformat == 'veg':
# vegetation
def get_date(fname):
regex = r'MOD13C2_EVI_([0-9]*)_([0-9]*)_monthly.csv' # year, julian day of year format
m = re.search(regex, fname)
year = int(m.group(1))
day_of_year = int(m.group(2))
date = datetime.datetime(year, 1, 1) + datetime.timedelta(day_of_year-1)
return (date.year, date.month) # only use year and month since this is monthly data
elif fformat == 'temp':
# temperature
def get_date(fname):
regex = r'MOD11C3_LST_Day_CMG_([0-9]*)_([0-9]*)_monthly.csv' # year, day of year format
m = re.search(regex, fname)
year = int(m.group(1))
day_of_year = int(m.group(2))
date = datetime.datetime(year, 1, 1) + datetime.timedelta(day_of_year-1)
return (date.year, date.month)
elif fformat == 'precip':
# precipitation
def get_date(fname):
regex = r'precipitation_([\.0-9]+)' # decimal date format
m = re.search(regex, fname)
decidate = float(m.group(1))
year = int(decidate)
rem = decidate - year
base = datetime.datetime(year, 1, 1)
date = base + datetime.timedelta(seconds=(base.replace(year=base.year + 1) - base).total_seconds() * rem)
return (date.year, date.month)
elif fformat == 'pop':
# population
def get_date(fname):
regex = r'population_density_([0-9]*)_regridded.txt'
m = re.search(regex, fname)
year = int(m.group(1))
return (year, 1) # we only have population data on the year
elif fformat == 'irr':
# irritation
def get_date(fname):
regex = r'irrigation_pct_([0-9]*).csv'
m = re.search(regex, fname)
year = int(m.group(1))
return (year, 1)
elif fformat == 'grace':
# grace anomoly data
def get_date(fname):
regex = r'GRCTellus.JPL.([0-9]*).LND.RL05_1.DSTvSCS1411.txt'
m = re.search(regex, fname)
datestring = m.group(1)
year = int(datestring[0:4])
month = int(datestring[4:6])
day = int(datestring[6:8])
return (year, month)
else:
print("ERROR: unrecognized file format %s" % fformat)
return None
for fname in files:
date = get_date(fname) # (year, month, day)
data = get_pixel_data(fname) # {(lon, lat): val, ...}
# add an entry to the dictionary
d[date] = data
return d
def get_pixel_data(fname):
"""
Return a dictionary of pixel tuples (lon, lat) and measurents
for all lines in the given file. Assume that each file is
for a unique date, and that columns 0, 1, and 2 are lat, lon,
and measurement respectively.
"""
d = {}
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if row[0] != "HDR": # exclude header rows
lon = float(row[0])
lat = float(row[1])
meas = float(row[2])
d[(lon,lat)] = meas
return d
def get_veg_trend(pixel, year, month, day):
"""
Return the 2 year vegetation trend for a given pixel
and date. The trend should be over the N months before
the given date.
pixel should be a (lon, lat) touple.
year, month, and day should be strings.
"""
N = 24
day_of_year = datetime.datetime(int(year), int(month), int(day)).strftime("%j")
files = glob.glob(data_dir + "vegetation/MOD13C2_EVI*")
files.sort() # sorting alphabetically is enough b/c nice naming scheme!
# find the vegetation data closest to the requested date
testf = data_dir + "vegetation/MOD13C2_EVI_%s_%s_monthly.csv" % (year, day_of_year) # data for the day we'd really like
if (testf in files):
# this date is already exactly included!
startf = testf
else:
# we need to look back a bit to find the entry closest to but before
# the given date
lst = files + [testf]
lst.sort()
for i in range(len(lst)):
if lst[i] == testf:
startf = lst[i-1]
# get data files for the previous N months
fnames = []
for i in range(len(files)):
if files[i] == startf:
start = i
for j in range(N):
fnames.append(files[start-j])
# get data for this pixel from these previous months
lon = str(pixel[0])
lat = str(pixel[1])
evi = []
months = []
n = 0
for fname in fnames:
found = False
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat): # check for matching pixel
evi.append(float(row[2]))
found = True
if found:
months.append(n)
n+=1
if len(evi) < 10:
print("no EVI data avilible for this pixel")
return None
# now fit a linear regression of the form y = mx+b
x = np.array(months)
y = np.array(evi)
A = np.vstack([x, np.ones(len(x))]).T
slope, y_int = np.linalg.lstsq(A, y)[0]
return(slope)
def get_anomoly(pixel, year, month, day):
"""
Return the anomoly found in with the given specifications.
pixel should be a (lon, lat) touple.
year, month, and day should be strings.
"""
fname = data_dir + "grace/GRCTellus.JPL.%04d%02d%02d.LND.RL05_1.DSTvSCS1411.txt" % (year, month, day)
lon = str(pixel[0])
lat = str(pixel[1])
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat): # check for matching pixel
return float(row[2])
return None
def get_irrigation_level(pixel):
"""
Get the 2013 percent of land equipped for irrigation for
a given pixel.
"""
fname = data_dir + "irrigation/irrigation_pct_2013.csv"
lon = str(pixel[0])
lat = str(pixel[1])
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat):
print("irrigation found!")
return float(row[2])
# assume no data means the level is zero. This prevents excessive
# pruning of pixels, since irrigation data is so sparce.
return 0.0
def get_precip_trend(pixel, year, month, day):
"""
Return the precipitation trend for a given pixel
and date. The trend should be over the N months before
the given date.
pixel should be a (lon, lat) touple.
year, month, and day should be strings.
"""
N = 24
decidate = str(toYearFraction(datetime.datetime(int(year), int(month), int(day))))[0:8]
files = glob.glob(data_dir + "precipitation/precipitation_20*")
files.sort() # sorting alphabetically is enough b/c nice naming scheme!
# find the vegetation data closest to the requested date
testf = data_dir + "precipitation/precipitation_%s" % (decidate) # data for the day we'd really like
if (testf in files):
# this date is already exactly included!
startf = testf
else:
# we need to look back a bit to find the entry closest to but before
# the given date
lst = files + [testf]
lst.sort()
for i in range(len(lst)):
if lst[i] == testf:
startf = lst[i-1]
# get data files for the previous N months
fnames = []
for i in range(len(files)):
if files[i] == startf:
start = i
for j in range(N):
fnames.append(files[start-j])
# get data for this pixel from these previous months
lon = str(pixel[0])
lat = str(pixel[1])
precip_pct = []
months = []
n = 0
for fname in fnames:
found = False
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat): # check for matching pixel
precip_pct.append(float(row[2]))
found = True
if found:
months.append(n)
n+=1
if len(precip_pct) < 10:
print("no precipitation data avilible for this pixel")
return None
# now fit a linear regression of the form y = mx+b
x = np.array(months)
y = np.array(precip_pct)
A = np.vstack([x, np.ones(len(x))]).T
slope, y_int = np.linalg.lstsq(A, y)[0]
return(slope)
def get_trend(pixel, date, dataset):
"""
Return a N month trend in the given dataset.
"""
N = 24
vals = []
months = []
n = 0
bad_cnt = 0
# generate lists of month numbers and values
for i in range(N):
try:
vals.append(dataset[date][pixel])
months.append(n)
except KeyError:
bad_cnt += 1 # ignore when we can't get a value
n+=1
date = previous_month(date)
if bad_cnt > 15:
return 0 # ingore if there are too few datapoints
# now fit a linear regression
x = np.array(months)
y = np.array(vals)
A = np.vstack([x, np.ones(len(x))]).T
slope, y_int = np.linalg.lstsq(A, y)[0]
return(slope)
def get_average(pixel, date, dataset):
"""
Return an N month average in the given dataset.
"""
N = 24
vals = []
bad_cnt = 0
# generate lists of month numbers and values
for i in range(N):
try:
vals.append(dataset[date][pixel])
except KeyError:
bad_cnt += 1 # ignore when we can't get a value
date = previous_month(date)
if bad_cnt > 15:
return 0 # ingore if there are too few datapoints
avg = np.average(vals)
return avg
def previous_month(date):
"""
Return the previous month for a given date
"""
year = date[0]
month = date[1]
new_year = year
new_month = month - 1
if new_month == 0:
new_year -= 1
new_month = 12
return (new_year, new_month)
def get_temperature_trend(pixel, date):
"""
Return the 2 year tempearature trend for a given pixel
and date. The trend should be over the N months before
the given date.
pixel should be a (lon, lat) touple. date should be a
(year, month) touple
"""
N = 24
# get data for this pixel from these previous months
lon = str(pixel[0])
lat = str(pixel[1])
this_temp = temperature_data[date][pixel]
print(this_temp)
return
# now fit a linear regression of the form y = mx+b
x = np.array(months)
y = np.array(temp)
A = np.vstack([x, np.ones(len(x))]).T
slope, y_int = np.linalg.lstsq(A, y)[0]
return(slope)
def random_valid_pixel(pixel_list):
"""
Randomly select a pixel that will yield valid training data.
This means that the given pixel
1. Must exist for the given date
2. Must exist in the previous 24 months
3. Must not be in pixel_list
Return a tuple of pixel, year, month, day
"""
files = glob.glob(data_dir + "grace/GRCTellus.JPL*")
files.sort() # sorting alphabetically is enough b/c nice naming scheme!
files = files[24:] # remove the first 24 months since there won't be enough data before these
startfile = files[random.randint(0,len(files)-1)] # choose a random month
# choose a random pixel
with open(startfile) as f:
for i, l in enumerate(f):
pass
num_lines = i
header_lines = 22
pixel_line = random.randint(header_lines, num_lines)
# get the value of that pixel
with open(startfile) as f:
reader = csv.reader(f, delimiter=" ")
for i, row in enumerate(reader):
if (i == pixel_line):
pixel = (float(row[0]), float(row[1]))
# make sure the pixel isn't already in our list
if pixel in pixel_list:
# this pixel is already in our list
#print("pixel already chosen. picking a new one")
return random_valid_pixel(pixel_list)
yyyymmdd = startfile[-35:-27] # looking backwards from the end in case data_dir changes
year = int(yyyymmdd[0:4])
month = int(yyyymmdd[4:6])
day = int(yyyymmdd[6:8])
# make sure that pixel exists for the previous 24 months
y, m, d = (year, month, day)
for i in range(24):
y, m, d = get_prev_entry(y, m, d)
if not exists(pixel, y, m, d):
# one of the previous months doesn't have our given pixel
# So do we give up? No. We try again
#print("Found invalid pixel. Trying again")
return random_valid_pixel(pixel_list)
return (pixel, year, month, day)
def exists(pixel, year, month, day):
"""
Check if a given pixel for a given date exists.
Return true or false.
"""
fname = data_dir + "grace/GRCTellus.JPL.%04d%02d%02d.LND.RL05_1.DSTvSCS1411.txt" % (year, month, day)
lon = str(pixel[0])
lat = str(pixel[1])
try:
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter=" ")
for row in reader:
if (row[0] == lon and row[1] == lat): # check for matching pixel
return True # found the pixel!
return False
except: # if the file can't be opened, it's probably a bad date
return False
def toYearFraction(date):
dt = datetime.datetime
def sinceEpoch(date): # returns seconds since epoch
return time.mktime(date.timetuple())
s = sinceEpoch
year = date.year
startOfThisYear = dt(year=year, month=1, day=1)
startOfNextYear = dt(year=year+1, month=1, day=1)
yearElapsed = s(date) - s(startOfThisYear)
yearDuration = s(startOfNextYear) - s(startOfThisYear)
fraction = yearElapsed/yearDuration
return date.year + fraction
def save_validation_data():
"""
Save grace and input data in a csv file.
format:
LON LAT GRACESLOPE PRECIP TEMP VEG PRECIPAVG TEMPAVG VEGAVG
"""
X = [] # input vars
y = [] # grace
with open('validation.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=' ')
writer.writerow(["HDR","long","lat","grace","precip","temp","veg","precipavg","tempavg","vegavg"])
date = (2016,1)
for pixel in grace_data[(2004,1)]: # use a consistent list of pixels
lat = pixel[1]
lon = pixel[0]
try:
# grace slope --> output
grace = get_trend(pixel, date, grace_data)
# other varialbes --> input
# include both trend and average (over ~ 2 yrs)
precip = get_trend(pixel, date, precipitation_data)
temp = get_trend(pixel, date, temperature_data)
veg = get_trend(pixel, date, vegetation_data)
precipavg = get_average(pixel, date, precipitation_data)
vegavg = get_average(pixel, date, vegetation_data)
tempavg = get_average(pixel, date, temperature_data)
writer.writerow([lon, lat, grace, precip, temp, veg, precipavg, tempavg, vegavg])
except KeyError:
# sometimes we won't have enough corresponding data on some of the
# extra variables. We'll just ignore that pixel/date pair in that case.
pass
def main():
X, y = get_data()
# Separate training and test sets
n_test = int(len(y)*0.10) # 10% of the data for testing
X_train = X[0:-n_test]
y_train = y[0:-n_test]
X_test = X[-n_test:]
y_test = y[-n_test:]
print("\n===> Saving Data to json")
# save training data in json format
train_dct = {"y":y_train, "X":X_train}
with open('training_data.json', 'w') as f:
json.dump(train_dct, f, indent=2)
# save testing data in json format
test_dct = {"y":y_test, "X":X_test}
with open('testing_data.json', 'w') as f:
json.dump(test_dct, f, indent=2)
if __name__=="__main__":
load_all_data() # do this first since many functions reference global vars
main()
| 2,895 | 0 | 228 |
8b51079e178a0d8d86e85b0424d64cfac8f46bb3 | 1,414 | py | Python | template/config/train.py | penguinmenac3/deeptech-template | 63df98f9ff69ab0dbbb0e38287810928c4173b11 | [
"MIT"
] | null | null | null | template/config/train.py | penguinmenac3/deeptech-template | 63df98f9ff69ab0dbbb0e38287810928c4173b11 | [
"MIT"
] | null | null | null | template/config/train.py | penguinmenac3/deeptech-template | 63df98f9ff69ab0dbbb0e38287810928c4173b11 | [
"MIT"
] | null | null | null | """doc
# Train Config
This is the main configuration file used for training the approach.
"""
import os
from deeptech.core import Config, cli
from deeptech.model.module_from_json import Module
from deeptech.training.trainers import SupervisedTrainer
from deeptech.training.optimizers import smart_optimizer
from torch.optim import SGD
from ..data.dataset import FashionMNISTDataset
from ..training.loss import SparseCrossEntropyLossFromLogits
# Run with parameters parsed from commandline.
# python -m deeptech.examples.mnist_simple --mode=train --input=Datasets --output=Results
if __name__ == "__main__":
cli.run(FashionMNISTConfig)
| 36.25641 | 113 | 0.755304 | """doc
# Train Config
This is the main configuration file used for training the approach.
"""
import os
from deeptech.core import Config, cli
from deeptech.model.module_from_json import Module
from deeptech.training.trainers import SupervisedTrainer
from deeptech.training.optimizers import smart_optimizer
from torch.optim import SGD
from ..data.dataset import FashionMNISTDataset
from ..training.loss import SparseCrossEntropyLossFromLogits
class FashionMNISTConfig(Config):
def __init__(self, training_name, data_path, training_results_path):
super().__init__(training_name, data_path, training_results_path)
# Config of the data
self.data_dataset = FashionMNISTDataset
# Config of the model
model_json = os.path.join(os.path.dirname(__file__), "..", "model", "mnist_model.json")
self.model_model = lambda: Module.create_from_file(model_json, "MNISTModel", num_classes=10, logits=True)
# Config for training
self.training_loss = SparseCrossEntropyLossFromLogits
self.training_optimizer = smart_optimizer(SGD)
self.training_trainer = SupervisedTrainer
self.training_epochs = 10
self.training_batch_size = 32
# Run with parameters parsed from commandline.
# python -m deeptech.examples.mnist_simple --mode=train --input=Datasets --output=Results
if __name__ == "__main__":
cli.run(FashionMNISTConfig)
| 709 | 12 | 49 |
0d6c2a22e6d4c282df65ea5887d039eeeed9275c | 12,871 | py | Python | .tox/scenario/lib/python2.7/site-packages/psutil/_psbsd.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/psutil/_psbsd.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/psutil/_psbsd.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""FreeBSD platform implementation."""
import errno
import os
import sys
import warnings
import _psutil_bsd
import _psutil_posix
from psutil import _psposix
from psutil._common import *
from psutil._compat import namedtuple, wraps
from psutil._error import AccessDenied, NoSuchProcess, TimeoutExpired
__extra__all__ = []
# --- constants
# Since these constants get determined at import time we do not want to
# crash immediately; instead we'll set them to None and most likely
# we'll crash later as they're used for determining process CPU stats
# and creation_time
try:
NUM_CPUS = _psutil_bsd.get_num_cpus()
except Exception:
NUM_CPUS = None
warnings.warn("couldn't determine platform's NUM_CPUS", RuntimeWarning)
try:
TOTAL_PHYMEM = _psutil_bsd.get_virtual_mem()[0]
except Exception:
TOTAL_PHYMEM = None
warnings.warn("couldn't determine platform's TOTAL_PHYMEM", RuntimeWarning)
try:
BOOT_TIME = _psutil_bsd.get_system_boot_time()
except Exception:
BOOT_TIME = None
warnings.warn("couldn't determine platform's BOOT_TIME", RuntimeWarning)
PROC_STATUSES = {
_psutil_bsd.SSTOP: STATUS_STOPPED,
_psutil_bsd.SSLEEP: STATUS_SLEEPING,
_psutil_bsd.SRUN: STATUS_RUNNING,
_psutil_bsd.SIDL: STATUS_IDLE,
_psutil_bsd.SWAIT: STATUS_WAITING,
_psutil_bsd.SLOCK: STATUS_LOCKED,
_psutil_bsd.SZOMB: STATUS_ZOMBIE,
}
TCP_STATUSES = {
_psutil_bsd.TCPS_ESTABLISHED: CONN_ESTABLISHED,
_psutil_bsd.TCPS_SYN_SENT: CONN_SYN_SENT,
_psutil_bsd.TCPS_SYN_RECEIVED: CONN_SYN_RECV,
_psutil_bsd.TCPS_FIN_WAIT_1: CONN_FIN_WAIT1,
_psutil_bsd.TCPS_FIN_WAIT_2: CONN_FIN_WAIT2,
_psutil_bsd.TCPS_TIME_WAIT: CONN_TIME_WAIT,
_psutil_bsd.TCPS_CLOSED: CONN_CLOSE,
_psutil_bsd.TCPS_CLOSE_WAIT: CONN_CLOSE_WAIT,
_psutil_bsd.TCPS_LAST_ACK: CONN_LAST_ACK,
_psutil_bsd.TCPS_LISTEN: CONN_LISTEN,
_psutil_bsd.TCPS_CLOSING: CONN_CLOSING,
_psutil_bsd.PSUTIL_CONN_NONE: CONN_NONE,
}
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
nt_virtmem_info = namedtuple('vmem', ' '.join([
# all platforms
'total', 'available', 'percent', 'used', 'free',
# FreeBSD specific
'active',
'inactive',
'buffers',
'cached',
'shared',
'wired']))
def virtual_memory():
"""System virtual memory as a namedutple."""
mem = _psutil_bsd.get_virtual_mem()
total, free, active, inactive, wired, cached, buffers, shared = mem
avail = inactive + cached + free
used = active + wired + cached
percent = usage_percent((total - avail), total, _round=1)
return nt_virtmem_info(total, avail, percent, used, free,
active, inactive, buffers, cached, shared, wired)
def swap_memory():
"""System swap memory as (total, used, free, sin, sout) namedtuple."""
total, used, free, sin, sout = \
[x * PAGESIZE for x in _psutil_bsd.get_swap_mem()]
percent = usage_percent(used, total, _round=1)
return nt_swapmeminfo(total, used, free, percent, sin, sout)
_cputimes_ntuple = namedtuple('cputimes', 'user nice system idle irq')
def get_system_cpu_times():
"""Return system per-CPU times as a named tuple"""
user, nice, system, idle, irq = _psutil_bsd.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle, irq)
def get_system_per_cpu_times():
"""Return system CPU times as a named tuple"""
ret = []
for cpu_t in _psutil_bsd.get_system_per_cpu_times():
user, nice, system, idle, irq = cpu_t
item = _cputimes_ntuple(user, nice, system, idle, irq)
ret.append(item)
return ret
# XXX
# Ok, this is very dirty.
# On FreeBSD < 8 we cannot gather per-cpu information, see:
# http://code.google.com/p/psutil/issues/detail?id=226
# If NUM_CPUS > 1, on first call we return single cpu times to avoid a
# crash at psutil import time.
# Next calls will fail with NotImplementedError
if not hasattr(_psutil_bsd, "get_system_per_cpu_times"):
get_system_per_cpu_times.__called__ = False
get_pid_list = _psutil_bsd.get_pid_list
pid_exists = _psposix.pid_exists
get_disk_usage = _psposix.get_disk_usage
net_io_counters = _psutil_bsd.get_net_io_counters
disk_io_counters = _psutil_bsd.get_disk_io_counters
# not public; it's here because we need to test it from test_memory_leask.py
get_num_cpus = _psutil_bsd.get_num_cpus()
get_system_boot_time = _psutil_bsd.get_system_boot_time
def wrap_exceptions(fun):
"""Decorator which translates bare OSError exceptions into
NoSuchProcess and AccessDenied.
"""
@wraps(fun)
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_process_name"]
@wrap_exceptions
def get_process_name(self):
"""Return process name as a string of limited len (15)."""
return _psutil_bsd.get_process_name(self.pid)
@wrap_exceptions
def get_process_exe(self):
"""Return process executable pathname."""
return _psutil_bsd.get_process_exe(self.pid)
@wrap_exceptions
def get_process_cmdline(self):
"""Return process cmdline as a list of arguments."""
return _psutil_bsd.get_process_cmdline(self.pid)
@wrap_exceptions
@wrap_exceptions
def get_process_ppid(self):
"""Return process parent pid."""
return _psutil_bsd.get_process_ppid(self.pid)
# XXX - available on FreeBSD >= 8 only
if hasattr(_psutil_bsd, "get_process_cwd"):
@wrap_exceptions
def get_process_cwd(self):
"""Return process current working directory."""
# sometimes we get an empty string, in which case we turn
# it into None
return _psutil_bsd.get_process_cwd(self.pid) or None
@wrap_exceptions
def get_process_uids(self):
"""Return real, effective and saved user ids."""
real, effective, saved = _psutil_bsd.get_process_uids(self.pid)
return nt_uids(real, effective, saved)
@wrap_exceptions
def get_process_gids(self):
"""Return real, effective and saved group ids."""
real, effective, saved = _psutil_bsd.get_process_gids(self.pid)
return nt_gids(real, effective, saved)
@wrap_exceptions
def get_cpu_times(self):
"""return a tuple containing process user/kernel time."""
user, system = _psutil_bsd.get_process_cpu_times(self.pid)
return nt_cputimes(user, system)
@wrap_exceptions
def get_memory_info(self):
"""Return a tuple with the process' RSS and VMS size."""
rss, vms = _psutil_bsd.get_process_memory_info(self.pid)[:2]
return nt_meminfo(rss, vms)
_nt_ext_mem = namedtuple('meminfo', 'rss vms text data stack')
@wrap_exceptions
@wrap_exceptions
def get_process_create_time(self):
"""Return the start time of the process as a number of seconds since
the epoch."""
return _psutil_bsd.get_process_create_time(self.pid)
@wrap_exceptions
def get_process_num_threads(self):
"""Return the number of threads belonging to the process."""
return _psutil_bsd.get_process_num_threads(self.pid)
@wrap_exceptions
@wrap_exceptions
def get_num_fds(self):
"""Return the number of file descriptors opened by this process."""
return _psutil_bsd.get_process_num_fds(self.pid)
@wrap_exceptions
def get_process_threads(self):
"""Return the number of threads belonging to the process."""
rawlist = _psutil_bsd.get_process_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = nt_thread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def get_open_files(self):
"""Return files opened by process as a list of namedtuples."""
# XXX - C implementation available on FreeBSD >= 8 only
# else fallback on lsof parser
if hasattr(_psutil_bsd, "get_process_open_files"):
rawlist = _psutil_bsd.get_process_open_files(self.pid)
return [nt_openfile(path, fd) for path, fd in rawlist]
else:
lsof = _psposix.LsofParser(self.pid, self._process_name)
return lsof.get_process_open_files()
@wrap_exceptions
def get_connections(self, kind='inet'):
"""Return etwork connections opened by a process as a list of
namedtuples.
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
rawlist = _psutil_bsd.get_process_connections(self.pid, families, types)
ret = []
for item in rawlist:
fd, fam, type, laddr, raddr, status = item
status = TCP_STATUSES[status]
nt = nt_connection(fd, fam, type, laddr, raddr, status)
ret.append(nt)
return ret
@wrap_exceptions
@wrap_exceptions
@wrap_exceptions
@wrap_exceptions
@wrap_exceptions
nt_mmap_grouped = namedtuple(
'mmap', 'path rss, private, ref_count, shadow_count')
nt_mmap_ext = namedtuple(
'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
@wrap_exceptions
# FreeBSD < 8 does not support kinfo_getfile() and kinfo_getvmmap()
if not hasattr(_psutil_bsd, 'get_process_open_files'):
get_open_files = _not_implemented
get_process_cwd = _not_implemented
get_memory_maps = _not_implemented
get_num_fds = _not_implemented
| 34.050265 | 80 | 0.681688 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""FreeBSD platform implementation."""
import errno
import os
import sys
import warnings
import _psutil_bsd
import _psutil_posix
from psutil import _psposix
from psutil._common import *
from psutil._compat import namedtuple, wraps
from psutil._error import AccessDenied, NoSuchProcess, TimeoutExpired
__extra__all__ = []
# --- constants
# Since these constants get determined at import time we do not want to
# crash immediately; instead we'll set them to None and most likely
# we'll crash later as they're used for determining process CPU stats
# and creation_time
try:
NUM_CPUS = _psutil_bsd.get_num_cpus()
except Exception:
NUM_CPUS = None
warnings.warn("couldn't determine platform's NUM_CPUS", RuntimeWarning)
try:
TOTAL_PHYMEM = _psutil_bsd.get_virtual_mem()[0]
except Exception:
TOTAL_PHYMEM = None
warnings.warn("couldn't determine platform's TOTAL_PHYMEM", RuntimeWarning)
try:
BOOT_TIME = _psutil_bsd.get_system_boot_time()
except Exception:
BOOT_TIME = None
warnings.warn("couldn't determine platform's BOOT_TIME", RuntimeWarning)
PROC_STATUSES = {
_psutil_bsd.SSTOP: STATUS_STOPPED,
_psutil_bsd.SSLEEP: STATUS_SLEEPING,
_psutil_bsd.SRUN: STATUS_RUNNING,
_psutil_bsd.SIDL: STATUS_IDLE,
_psutil_bsd.SWAIT: STATUS_WAITING,
_psutil_bsd.SLOCK: STATUS_LOCKED,
_psutil_bsd.SZOMB: STATUS_ZOMBIE,
}
TCP_STATUSES = {
_psutil_bsd.TCPS_ESTABLISHED: CONN_ESTABLISHED,
_psutil_bsd.TCPS_SYN_SENT: CONN_SYN_SENT,
_psutil_bsd.TCPS_SYN_RECEIVED: CONN_SYN_RECV,
_psutil_bsd.TCPS_FIN_WAIT_1: CONN_FIN_WAIT1,
_psutil_bsd.TCPS_FIN_WAIT_2: CONN_FIN_WAIT2,
_psutil_bsd.TCPS_TIME_WAIT: CONN_TIME_WAIT,
_psutil_bsd.TCPS_CLOSED: CONN_CLOSE,
_psutil_bsd.TCPS_CLOSE_WAIT: CONN_CLOSE_WAIT,
_psutil_bsd.TCPS_LAST_ACK: CONN_LAST_ACK,
_psutil_bsd.TCPS_LISTEN: CONN_LISTEN,
_psutil_bsd.TCPS_CLOSING: CONN_CLOSING,
_psutil_bsd.PSUTIL_CONN_NONE: CONN_NONE,
}
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
nt_virtmem_info = namedtuple('vmem', ' '.join([
# all platforms
'total', 'available', 'percent', 'used', 'free',
# FreeBSD specific
'active',
'inactive',
'buffers',
'cached',
'shared',
'wired']))
def virtual_memory():
"""System virtual memory as a namedutple."""
mem = _psutil_bsd.get_virtual_mem()
total, free, active, inactive, wired, cached, buffers, shared = mem
avail = inactive + cached + free
used = active + wired + cached
percent = usage_percent((total - avail), total, _round=1)
return nt_virtmem_info(total, avail, percent, used, free,
active, inactive, buffers, cached, shared, wired)
def swap_memory():
"""System swap memory as (total, used, free, sin, sout) namedtuple."""
total, used, free, sin, sout = \
[x * PAGESIZE for x in _psutil_bsd.get_swap_mem()]
percent = usage_percent(used, total, _round=1)
return nt_swapmeminfo(total, used, free, percent, sin, sout)
_cputimes_ntuple = namedtuple('cputimes', 'user nice system idle irq')
def get_system_cpu_times():
"""Return system per-CPU times as a named tuple"""
user, nice, system, idle, irq = _psutil_bsd.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle, irq)
def get_system_per_cpu_times():
"""Return system CPU times as a named tuple"""
ret = []
for cpu_t in _psutil_bsd.get_system_per_cpu_times():
user, nice, system, idle, irq = cpu_t
item = _cputimes_ntuple(user, nice, system, idle, irq)
ret.append(item)
return ret
# XXX
# Ok, this is very dirty.
# On FreeBSD < 8 we cannot gather per-cpu information, see:
# http://code.google.com/p/psutil/issues/detail?id=226
# If NUM_CPUS > 1, on first call we return single cpu times to avoid a
# crash at psutil import time.
# Next calls will fail with NotImplementedError
if not hasattr(_psutil_bsd, "get_system_per_cpu_times"):
def get_system_per_cpu_times():
if NUM_CPUS == 1:
return [get_system_cpu_times]
if get_system_per_cpu_times.__called__:
raise NotImplementedError("supported only starting from FreeBSD 8")
get_system_per_cpu_times.__called__ = True
return [get_system_cpu_times]
get_system_per_cpu_times.__called__ = False
def disk_partitions(all=False):
retlist = []
partitions = _psutil_bsd.get_disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if not os.path.isabs(device) or not os.path.exists(device):
continue
ntuple = nt_partition(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def get_system_users():
retlist = []
rawlist = _psutil_bsd.get_system_users()
for item in rawlist:
user, tty, hostname, tstamp = item
if tty == '~':
continue # reboot or shutdown
nt = nt_user(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
get_pid_list = _psutil_bsd.get_pid_list
pid_exists = _psposix.pid_exists
get_disk_usage = _psposix.get_disk_usage
net_io_counters = _psutil_bsd.get_net_io_counters
disk_io_counters = _psutil_bsd.get_disk_io_counters
# not public; it's here because we need to test it from test_memory_leask.py
get_num_cpus = _psutil_bsd.get_num_cpus()
get_system_boot_time = _psutil_bsd.get_system_boot_time
def wrap_exceptions(fun):
"""Decorator which translates bare OSError exceptions into
NoSuchProcess and AccessDenied.
"""
@wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
"""Return process name as a string of limited len (15)."""
return _psutil_bsd.get_process_name(self.pid)
@wrap_exceptions
def get_process_exe(self):
"""Return process executable pathname."""
return _psutil_bsd.get_process_exe(self.pid)
@wrap_exceptions
def get_process_cmdline(self):
"""Return process cmdline as a list of arguments."""
return _psutil_bsd.get_process_cmdline(self.pid)
@wrap_exceptions
def get_process_terminal(self):
tty_nr = _psutil_bsd.get_process_tty_nr(self.pid)
tmap = _psposix._get_terminal_map()
try:
return tmap[tty_nr]
except KeyError:
return None
@wrap_exceptions
def get_process_ppid(self):
"""Return process parent pid."""
return _psutil_bsd.get_process_ppid(self.pid)
# XXX - available on FreeBSD >= 8 only
if hasattr(_psutil_bsd, "get_process_cwd"):
@wrap_exceptions
def get_process_cwd(self):
"""Return process current working directory."""
# sometimes we get an empty string, in which case we turn
# it into None
return _psutil_bsd.get_process_cwd(self.pid) or None
@wrap_exceptions
def get_process_uids(self):
"""Return real, effective and saved user ids."""
real, effective, saved = _psutil_bsd.get_process_uids(self.pid)
return nt_uids(real, effective, saved)
@wrap_exceptions
def get_process_gids(self):
"""Return real, effective and saved group ids."""
real, effective, saved = _psutil_bsd.get_process_gids(self.pid)
return nt_gids(real, effective, saved)
@wrap_exceptions
def get_cpu_times(self):
"""return a tuple containing process user/kernel time."""
user, system = _psutil_bsd.get_process_cpu_times(self.pid)
return nt_cputimes(user, system)
@wrap_exceptions
def get_memory_info(self):
"""Return a tuple with the process' RSS and VMS size."""
rss, vms = _psutil_bsd.get_process_memory_info(self.pid)[:2]
return nt_meminfo(rss, vms)
_nt_ext_mem = namedtuple('meminfo', 'rss vms text data stack')
@wrap_exceptions
def get_ext_memory_info(self):
return self._nt_ext_mem(*_psutil_bsd.get_process_memory_info(self.pid))
@wrap_exceptions
def get_process_create_time(self):
"""Return the start time of the process as a number of seconds since
the epoch."""
return _psutil_bsd.get_process_create_time(self.pid)
@wrap_exceptions
def get_process_num_threads(self):
"""Return the number of threads belonging to the process."""
return _psutil_bsd.get_process_num_threads(self.pid)
@wrap_exceptions
def get_num_ctx_switches(self):
return nt_ctxsw(*_psutil_bsd.get_process_num_ctx_switches(self.pid))
@wrap_exceptions
def get_num_fds(self):
"""Return the number of file descriptors opened by this process."""
return _psutil_bsd.get_process_num_fds(self.pid)
@wrap_exceptions
def get_process_threads(self):
"""Return the number of threads belonging to the process."""
rawlist = _psutil_bsd.get_process_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = nt_thread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def get_open_files(self):
"""Return files opened by process as a list of namedtuples."""
# XXX - C implementation available on FreeBSD >= 8 only
# else fallback on lsof parser
if hasattr(_psutil_bsd, "get_process_open_files"):
rawlist = _psutil_bsd.get_process_open_files(self.pid)
return [nt_openfile(path, fd) for path, fd in rawlist]
else:
lsof = _psposix.LsofParser(self.pid, self._process_name)
return lsof.get_process_open_files()
@wrap_exceptions
def get_connections(self, kind='inet'):
"""Return etwork connections opened by a process as a list of
namedtuples.
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
rawlist = _psutil_bsd.get_process_connections(self.pid, families, types)
ret = []
for item in rawlist:
fd, fam, type, laddr, raddr, status = item
status = TCP_STATUSES[status]
nt = nt_connection(fd, fam, type, laddr, raddr, status)
ret.append(nt)
return ret
@wrap_exceptions
def process_wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except TimeoutExpired:
raise TimeoutExpired(self.pid, self._process_name)
@wrap_exceptions
def get_process_nice(self):
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def set_process_nice(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def get_process_status(self):
code = _psutil_bsd.get_process_status(self.pid)
if code in PROC_STATUSES:
return PROC_STATUSES[code]
# XXX is this legit? will we even ever get here?
return "?"
@wrap_exceptions
def get_process_io_counters(self):
rc, wc, rb, wb = _psutil_bsd.get_process_io_counters(self.pid)
return nt_io(rc, wc, rb, wb)
nt_mmap_grouped = namedtuple(
'mmap', 'path rss, private, ref_count, shadow_count')
nt_mmap_ext = namedtuple(
'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
@wrap_exceptions
def get_memory_maps(self):
return _psutil_bsd.get_process_memory_maps(self.pid)
# FreeBSD < 8 does not support kinfo_getfile() and kinfo_getvmmap()
if not hasattr(_psutil_bsd, 'get_process_open_files'):
def _not_implemented(self):
raise NotImplementedError("supported only starting from FreeBSD 8")
get_open_files = _not_implemented
get_process_cwd = _not_implemented
get_memory_maps = _not_implemented
get_num_fds = _not_implemented
| 2,660 | 0 | 389 |
ff394a1171c293e681f64072878f8148762d365e | 2,100 | py | Python | tap_linkedin_marketing/executor.py | Radico/tap-linkedin-marketing | 4ccd48bfdfa109955d4eb1c9ae5d81ff0c1f40bb | [
"Apache-2.0"
] | null | null | null | tap_linkedin_marketing/executor.py | Radico/tap-linkedin-marketing | 4ccd48bfdfa109955d4eb1c9ae5d81ff0c1f40bb | [
"Apache-2.0"
] | null | null | null | tap_linkedin_marketing/executor.py | Radico/tap-linkedin-marketing | 4ccd48bfdfa109955d4eb1c9ae5d81ff0c1f40bb | [
"Apache-2.0"
] | 1 | 2020-10-08T16:49:59.000Z | 2020-10-08T16:49:59.000Z | import singer
from tap_kit import TapExecutor
from tap_kit.utils import (transform_write_and_count)
LOGGER = singer.get_logger()
| 27.272727 | 71 | 0.543333 | import singer
from tap_kit import TapExecutor
from tap_kit.utils import (transform_write_and_count)
LOGGER = singer.get_logger()
class LinkedInExecutor(TapExecutor):
def __init__(self, streams, args, client):
"""
Args:
streams (arr[Stream])
args (dict)
client (BaseClient)
"""
super(LinkedInExecutor, self).__init__(streams, args, client)
self.url = 'https://api.linkedin.com/v2/adAnalyticsV2'
self.access_token = self.client.config['access_token']
def call_full_stream(self, stream):
"""
Method to call all fully synced streams
"""
pivots = (
"CAMPAIGN",
"CREATIVE",
"CAMPAIGN_GROUP",
"CONVERSION",
)
for pivot in pivots:
request_config = {
'url': self.url,
'headers': self.build_headers(),
'params': self.build_params(pivot),
'run': True
}
LOGGER.info("Extracting {s} ".format(s=stream))
self.call_stream(stream, request_config)
def call_stream(self, stream, request_config):
res = self.client.make_request(request_config)
records = res.json()
if not records:
records = []
elif not isinstance(records, list):
# subsequent methods are expecting a list
records = [records]
transform_write_and_count(stream, records)
def build_params(self, pivot):
return {
"q": "analytics",
"pivot": pivot,
"dateRange.start.day": 1,
"dateRange.start.month": 12,
"dateRange.start.year": 2019,
"timeGranularity": "DAILY",
"accounts[0]": "urn:li:sponsoredAccount:507638420"
}
def build_headers(self):
"""
Included in all API calls
"""
return {
"Authorization": "Bearer {}".format(self.access_token),
"Accept": "application/json;charset=UTF-8"
}
| 660 | 1,285 | 23 |
3c5b2df441b90277fa6a7b32261e00006b20d450 | 566 | py | Python | products/migrations/0009_auto_20200310_1903.py | JayPeaa/msproject5 | 89ee3e52cbefc686104389f91770581b88349020 | [
"MIT"
] | null | null | null | products/migrations/0009_auto_20200310_1903.py | JayPeaa/msproject5 | 89ee3e52cbefc686104389f91770581b88349020 | [
"MIT"
] | 12 | 2020-02-12T02:53:42.000Z | 2022-03-12T00:17:00.000Z | products/migrations/0009_auto_20200310_1903.py | JayPeaa/msproject5 | 89ee3e52cbefc686104389f91770581b88349020 | [
"MIT"
] | 1 | 2020-04-11T12:31:12.000Z | 2020-04-11T12:31:12.000Z | # Generated by Django 2.1.14 on 2020-03-10 19:03
from django.db import migrations, models
| 23.583333 | 70 | 0.591873 | # Generated by Django 2.1.14 on 2020-03-10 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0008_auto_20200307_1352'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='product_image',
),
migrations.AddField(
model_name='product',
name='product_image_name',
field=models.TextField(default='picture', max_length=200),
preserve_default=False,
),
]
| 0 | 451 | 23 |
73a63e4efac203c167f98bcd9a2ff16d6821760c | 4,158 | py | Python | core/src/models.py | fall2021-csc510-group40/filmfan | da42fdcc713f2b22debc1da9a09dc7a82aa5b66b | [
"MIT"
] | 1 | 2021-09-20T00:34:27.000Z | 2021-09-20T00:34:27.000Z | core/src/models.py | fall2021-csc510-group40/filmfan | da42fdcc713f2b22debc1da9a09dc7a82aa5b66b | [
"MIT"
] | 36 | 2021-10-29T19:02:23.000Z | 2021-11-16T03:06:01.000Z | core/src/models.py | pncnmnp/SE21-project | da42fdcc713f2b22debc1da9a09dc7a82aa5b66b | [
"MIT"
] | 1 | 2022-02-25T03:07:26.000Z | 2022-02-25T03:07:26.000Z | """
This package defines database models and relations used
"""
from . import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
class MovieHandle(db.Model):
"""
MovieHandle class provides a representation of a movie id for the database
"""
id = db.Column(db.Integer, primary_key=True)
class TvShowHandle(db.Model):
"""
TvShowHandle class provides a representation of a TV show id for the database
"""
id = db.Column(db.Integer, primary_key=True)
movie_favorites = db.Table(
'movie_favorites',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('movie_id', db.Integer, db.ForeignKey(f'{MovieHandle.__tablename__}.id'), primary_key=True),
)
tv_favorites = db.Table(
'tv_favorites',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('tv_id', db.Integer, db.ForeignKey(f'{TvShowHandle.__tablename__}.id'), primary_key=True),
)
class User(db.Model, UserMixin):
"""
User class is a model for a user in the database
"""
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(100),
nullable=False,
unique=False
)
email = db.Column(
db.String(40),
unique=True,
nullable=False
)
password = db.Column(
db.String(200),
primary_key=False,
unique=False,
nullable=False
)
movie_favorites = db.relationship('MovieHandle', secondary=movie_favorites, lazy='dynamic')
tv_favorites = db.relationship('TvShowHandle', secondary=tv_favorites, lazy='dynamic')
def set_password(self, password):
"""Create hashed password."""
self.password = generate_password_hash(
password,
method='sha256'
)
def check_password(self, password):
"""Check hashed password."""
return check_password_hash(self.password, password)
def has_favorite(self, movie_id, movie_type):
"""
Checks if user has an item as their favorite
:param self: User object
:param movie_id: Item id
:param movie_type: Item type
:return: ``True`` if user has the item as their favorite, ``False`` otherwise
"""
if movie_type == "movie":
return self.movie_favorites.filter_by(id=movie_id).first() is not None
else:
return self.tv_favorites.filter_by(id=movie_id).first() is not None
def add_favorite(self, movie_id, movie_type):
"""
Add a favorite for the user
:param self: User object
:param movie_id: Item id
:param movie_type: Item type
:return: ``None``
"""
if not self.has_favorite(movie_id, movie_type):
if movie_type == "movie":
handle = db.session.get(MovieHandle, movie_id) or MovieHandle(id=movie_id)
db.session.add(handle)
self.movie_favorites.append(handle)
db.session.add(self)
else:
handle = db.session.get(TvShowHandle, movie_id) or TvShowHandle(id=movie_id)
db.session.add(handle)
self.tv_favorites.append(handle)
db.session.add(self)
db.session.commit()
def remove_favorite(self, movie_id, movie_type):
"""
Remove a favorite for the user
:param self: User object
:param movie_id: Item id
:param movie_type: Item type
:return: ``None``
"""
if self.has_favorite(movie_id, movie_type):
if movie_type == "movie":
handle = db.session.get(MovieHandle, movie_id) or MovieHandle(id=movie_id)
self.movie_favorites.remove(handle)
else:
handle = db.session.get(TvShowHandle, movie_id) or TvShowHandle(id=movie_id)
self.tv_favorites.remove(handle)
db.session.add(self)
db.session.commit()
| 31.740458 | 106 | 0.61544 | """
This package defines database models and relations used
"""
from . import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
class MovieHandle(db.Model):
"""
MovieHandle class provides a representation of a movie id for the database
"""
id = db.Column(db.Integer, primary_key=True)
class TvShowHandle(db.Model):
"""
TvShowHandle class provides a representation of a TV show id for the database
"""
id = db.Column(db.Integer, primary_key=True)
movie_favorites = db.Table(
'movie_favorites',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('movie_id', db.Integer, db.ForeignKey(f'{MovieHandle.__tablename__}.id'), primary_key=True),
)
tv_favorites = db.Table(
'tv_favorites',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('tv_id', db.Integer, db.ForeignKey(f'{TvShowHandle.__tablename__}.id'), primary_key=True),
)
class User(db.Model, UserMixin):
"""
User class is a model for a user in the database
"""
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(100),
nullable=False,
unique=False
)
email = db.Column(
db.String(40),
unique=True,
nullable=False
)
password = db.Column(
db.String(200),
primary_key=False,
unique=False,
nullable=False
)
movie_favorites = db.relationship('MovieHandle', secondary=movie_favorites, lazy='dynamic')
tv_favorites = db.relationship('TvShowHandle', secondary=tv_favorites, lazy='dynamic')
def set_password(self, password):
"""Create hashed password."""
self.password = generate_password_hash(
password,
method='sha256'
)
def check_password(self, password):
"""Check hashed password."""
return check_password_hash(self.password, password)
def has_favorite(self, movie_id, movie_type):
"""
Checks if user has an item as their favorite
:param self: User object
:param movie_id: Item id
:param movie_type: Item type
:return: ``True`` if user has the item as their favorite, ``False`` otherwise
"""
if movie_type == "movie":
return self.movie_favorites.filter_by(id=movie_id).first() is not None
else:
return self.tv_favorites.filter_by(id=movie_id).first() is not None
def add_favorite(self, movie_id, movie_type):
"""
Add a favorite for the user
:param self: User object
:param movie_id: Item id
:param movie_type: Item type
:return: ``None``
"""
if not self.has_favorite(movie_id, movie_type):
if movie_type == "movie":
handle = db.session.get(MovieHandle, movie_id) or MovieHandle(id=movie_id)
db.session.add(handle)
self.movie_favorites.append(handle)
db.session.add(self)
else:
handle = db.session.get(TvShowHandle, movie_id) or TvShowHandle(id=movie_id)
db.session.add(handle)
self.tv_favorites.append(handle)
db.session.add(self)
db.session.commit()
def remove_favorite(self, movie_id, movie_type):
"""
Remove a favorite for the user
:param self: User object
:param movie_id: Item id
:param movie_type: Item type
:return: ``None``
"""
if self.has_favorite(movie_id, movie_type):
if movie_type == "movie":
handle = db.session.get(MovieHandle, movie_id) or MovieHandle(id=movie_id)
self.movie_favorites.remove(handle)
else:
handle = db.session.get(TvShowHandle, movie_id) or TvShowHandle(id=movie_id)
self.tv_favorites.remove(handle)
db.session.add(self)
db.session.commit()
def __repr__(self):
return '<User {}>'.format(self.username)
| 47 | 0 | 27 |
282aaaeff385e415d0b0f70d80c9a913e92b0bfc | 3,092 | py | Python | python/itertools_combinations_2.py | Hamng/python-sources | 0cc5a5d9e576440d95f496edcfd921ae37fcd05a | [
"Unlicense"
] | null | null | null | python/itertools_combinations_2.py | Hamng/python-sources | 0cc5a5d9e576440d95f496edcfd921ae37fcd05a | [
"Unlicense"
] | 1 | 2019-02-23T18:30:51.000Z | 2019-02-23T18:30:51.000Z | python/itertools_combinations_2.py | Hamng/python-sources | 0cc5a5d9e576440d95f496edcfd921ae37fcd05a | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 00:39:20 2019
@author: Ham
HackerRanch Challenge: Iterable and Iterators
The itertools module standardizes a core set of fast, memory efficient tools
that are useful by themselves or in combination.
Together, they form an iterator algebra making it possible to construct
specialized tools succinctly and efficiently in pure Python.
To read more about the functions in this module, check out their documentation here.
You are given a list of N lowercase English letters.
For a given integer k, you can select any k indices (assume 1-based indexing)
with a uniform probability from the list.
Find the probability that at least one of the K indices selected will contain the letter: 'a'.
Input Format
The input consists of three lines.
The first line contains the integer N, denoting the length of the list.
The next line consists of N space-separated lowercase English letters,
denoting the elements of the list.
The third and the last line of input contains the integer k,
denoting the number of indices to be selected.
Output Format
Output a single line consisting of the probability
that at least one of the indices selected contains the letter:'a'.
Note: The answer must be correct up to 3 decimal places.
Constraints
All the letters in the list are lowercase English letters.
Sample Input
4
a a c d
2
Sample Output
0.8333
Explanation
All possible unordered tuples of length 2 comprising of indices from 1 to 4 are:
(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), and (3, 4)
Out of these 6 combinations, 5 of them contain either
index 1 or index 2 which are the indices that contain the letter 'a'.
Hence, the answer is 5/6.
"""
import itertools
if __name__ == '__main__':
n = int(input().strip())
#w = [p for p, l in enumerate(input().strip().split(), 1) if l == 'a']
#print(w)
#k = int(input().strip())
#a = 0
#for c, t in enumerate(itertools.combinations(range(1, n + 1), k), 1):
# for i in t:
# if i in w:
# a += 1
# #print(c, t, a)
# break
#
# Above is my original, and working submission
# Below is a revision after reading the Discussion forum
# I optimized to iterate thru the combo(w, k) only once.
# Other solution might iterate thru 3 times: 1st to make it a list;
# 2nd to iterate thru the list; then 3rd to calculate len of the list.
# The c, t in enumerate(iterable, 1) is such that at the end,
# c will be the length of the iterable.
# Caution: if someone tries to convert the "for" loop to a list comp,
# then (for Python 3), both "c" and "t" are NOT be defined
# after the list comprehension!
#
w = input().strip().split()
k = int(input().strip())
#print(k, w)
a = 0
for c, t in enumerate(itertools.combinations(w, k), 1):
#print(c, t)
if 'a' in t:
a += 1
#print(a, c)
print("%.12f" % (float(a) / float(c)))
| 29.730769 | 95 | 0.651682 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 00:39:20 2019
@author: Ham
HackerRanch Challenge: Iterable and Iterators
The itertools module standardizes a core set of fast, memory efficient tools
that are useful by themselves or in combination.
Together, they form an iterator algebra making it possible to construct
specialized tools succinctly and efficiently in pure Python.
To read more about the functions in this module, check out their documentation here.
You are given a list of N lowercase English letters.
For a given integer k, you can select any k indices (assume 1-based indexing)
with a uniform probability from the list.
Find the probability that at least one of the K indices selected will contain the letter: 'a'.
Input Format
The input consists of three lines.
The first line contains the integer N, denoting the length of the list.
The next line consists of N space-separated lowercase English letters,
denoting the elements of the list.
The third and the last line of input contains the integer k,
denoting the number of indices to be selected.
Output Format
Output a single line consisting of the probability
that at least one of the indices selected contains the letter:'a'.
Note: The answer must be correct up to 3 decimal places.
Constraints
All the letters in the list are lowercase English letters.
Sample Input
4
a a c d
2
Sample Output
0.8333
Explanation
All possible unordered tuples of length 2 comprising of indices from 1 to 4 are:
(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), and (3, 4)
Out of these 6 combinations, 5 of them contain either
index 1 or index 2 which are the indices that contain the letter 'a'.
Hence, the answer is 5/6.
"""
import itertools
if __name__ == '__main__':
n = int(input().strip())
#w = [p for p, l in enumerate(input().strip().split(), 1) if l == 'a']
#print(w)
#k = int(input().strip())
#a = 0
#for c, t in enumerate(itertools.combinations(range(1, n + 1), k), 1):
# for i in t:
# if i in w:
# a += 1
# #print(c, t, a)
# break
#
# Above is my original, and working submission
# Below is a revision after reading the Discussion forum
# I optimized to iterate thru the combo(w, k) only once.
# Other solution might iterate thru 3 times: 1st to make it a list;
# 2nd to iterate thru the list; then 3rd to calculate len of the list.
# The c, t in enumerate(iterable, 1) is such that at the end,
# c will be the length of the iterable.
# Caution: if someone tries to convert the "for" loop to a list comp,
# then (for Python 3), both "c" and "t" are NOT be defined
# after the list comprehension!
#
w = input().strip().split()
k = int(input().strip())
#print(k, w)
a = 0
for c, t in enumerate(itertools.combinations(w, k), 1):
#print(c, t)
if 'a' in t:
a += 1
#print(a, c)
print("%.12f" % (float(a) / float(c)))
| 0 | 0 | 0 |
1249771607a589243d9d0012abde96e177516f07 | 236 | py | Python | setup.py | pallogu/numerai | 6f5d6b31e86d27030b041f8591e0122894128e59 | [
"FTL"
] | null | null | null | setup.py | pallogu/numerai | 6f5d6b31e86d27030b041f8591e0122894128e59 | [
"FTL"
] | null | null | null | setup.py | pallogu/numerai | 6f5d6b31e86d27030b041f8591e0122894128e59 | [
"FTL"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Fun project to explore numer.ai modelling of market trends',
author='Arvpau',
license='',
)
| 21.454545 | 77 | 0.677966 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Fun project to explore numer.ai modelling of market trends',
author='Arvpau',
license='',
)
| 0 | 0 | 0 |
54db4693e1cf294e42ce5591b68d9cdb70403b91 | 4,356 | py | Python | OCR.py | Sunil7545/OCRProjectTeamTwo | 6124b0440421acfd28524988171d0061507d53dd | [
"MIT"
] | null | null | null | OCR.py | Sunil7545/OCRProjectTeamTwo | 6124b0440421acfd28524988171d0061507d53dd | [
"MIT"
] | 2 | 2022-01-13T02:02:43.000Z | 2022-03-12T00:11:03.000Z | OCR.py | Sunil7545/OCRProjectTeamTwo | 6124b0440421acfd28524988171d0061507d53dd | [
"MIT"
] | 2 | 2019-12-27T19:07:27.000Z | 2020-01-17T15:06:13.000Z | '''
This program will convert PDFs into images and read text from those images
and print the text over the screen.
This can also extract text directly from images and print it out.
'''
import os
# try is used to keep a check over the import. If there is an error, it will not close
# the program, but instead execute the except statement, similar to if & else.
try:
from PIL import Image, ImageChops, ImageDraw
except ImportError:
import Image, ImageChops, ImageDraw
# extracts text from images
import pytesseract
# convert pdf into images
from pdf2image import convert_from_path
# image processing library
import cv2 as cv
pytesseract.pytesseract.tesseract_cmd = r"C:\\Program Files\\Tesseract-OCR\\tesseract.exe"
class OCR:
'''
OCR class to process PDFs and images to extract text from them.
'''
def __init__(self, filename):
'''
Initializes the memory of the object as the object is created using the parent class.
:param filename: string parameter to save the path and name of the file.
'''
self.filename = filename
def split_pdf_and_convert_to_images(self):
'''
A method of OCR class that takes pdf file and path as the input parameter
and split the pdf into multiple images. After splitting the pdf,
it takes every image, convert into binary color format, i.e., black and white,
and extracts text from the images using the read_text function.
:param: filename as string containing path of a PDF file.
:return: text extracted from the PDF file.
'''
# saving filename as dirName to create a directory of the same name as of the file
dirName = self.filename.split("\\")[1].split(".")[0]
# create a directory with name similar to filename and do nothing if an error is raised.
try:
os.mkdir(dirName)
except:
pass
dirPath = "{}\\".format(dirName)
# create images by random names of every page of the PDF within the created directory.
convert_from_path(self.filename, output_folder=dirPath, fmt="png")
# next method is used to iterate files within the directory, os.walk is used to scan
# for files within a directory as we are only storing the filenames as imageNames,
# the earlier underscores stores the root directory name and child directory names.
# This will give us imageNames as a list of files inside the directory.
(_, _, imageNames) = next(os.walk(dirPath))
for i in imageNames:
i = dirPath + i
# creating an openCV object of the image to perform image processing operations
a = cv.imread(i)
# changing image from coloured to gray
grayImage = cv.cvtColor(a, cv.COLOR_BGR2GRAY)
# changing images threshold to convert the image to black and white only.
(thresh, blackAndWhiteImage) = cv.threshold(grayImage, 127, 255, cv.THRESH_BINARY)
name_2 = dirPath + "a.png"
# creating black and white image on path
cv.imwrite(name_2, blackAndWhiteImage)
# fetching the text from the image using read_text function
text = self.read_text(filename=name_2)
# printing text of single image
print(text)
# Deleting b&w image from the directory
os.unlink(name_2)
# deleting gray image from the directory
os.unlink(i)
# removing the directory
os.rmdir(dirName)
def read_text(self, filename=None):
"""
This function will handle the core OCR processing of images.
:param: filename as string containing path of an image.
:return: text extracted from the image.
"""
if filename == None:
filename = self.filename
text = pytesseract.image_to_string(Image.open(filename))
# We'll use Pillow's Image class to open the image and
# pytesseract to detect the string in the image
return text
# processing an individual image
filename = 'Images\\wordsworthwordle1.jpg'
file_text = OCR(filename)
print(file_text.read_text())
# or
# processing a PDF file
filename = 'Files\\cert.pdf'
file_text = OCR(filename)
print(file_text.split_pdf_and_convert_to_images())
| 36.3 | 96 | 0.665289 | '''
This program will convert PDFs into images and read text from those images
and print the text over the screen.
This can also extract text directly from images and print it out.
'''
import os
# try is used to keep a check over the import. If there is an error, it will not close
# the program, but instead execute the except statement, similar to if & else.
try:
from PIL import Image, ImageChops, ImageDraw
except ImportError:
import Image, ImageChops, ImageDraw
# extracts text from images
import pytesseract
# convert pdf into images
from pdf2image import convert_from_path
# image processing library
import cv2 as cv
pytesseract.pytesseract.tesseract_cmd = r"C:\\Program Files\\Tesseract-OCR\\tesseract.exe"
class OCR:
'''
OCR class to process PDFs and images to extract text from them.
'''
def __init__(self, filename):
'''
Initializes the memory of the object as the object is created using the parent class.
:param filename: string parameter to save the path and name of the file.
'''
self.filename = filename
def split_pdf_and_convert_to_images(self):
'''
A method of OCR class that takes pdf file and path as the input parameter
and split the pdf into multiple images. After splitting the pdf,
it takes every image, convert into binary color format, i.e., black and white,
and extracts text from the images using the read_text function.
:param: filename as string containing path of a PDF file.
:return: text extracted from the PDF file.
'''
# saving filename as dirName to create a directory of the same name as of the file
dirName = self.filename.split("\\")[1].split(".")[0]
# create a directory with name similar to filename and do nothing if an error is raised.
try:
os.mkdir(dirName)
except:
pass
dirPath = "{}\\".format(dirName)
# create images by random names of every page of the PDF within the created directory.
convert_from_path(self.filename, output_folder=dirPath, fmt="png")
# next method is used to iterate files within the directory, os.walk is used to scan
# for files within a directory as we are only storing the filenames as imageNames,
# the earlier underscores stores the root directory name and child directory names.
# This will give us imageNames as a list of files inside the directory.
(_, _, imageNames) = next(os.walk(dirPath))
for i in imageNames:
i = dirPath + i
# creating an openCV object of the image to perform image processing operations
a = cv.imread(i)
# changing image from coloured to gray
grayImage = cv.cvtColor(a, cv.COLOR_BGR2GRAY)
# changing images threshold to convert the image to black and white only.
(thresh, blackAndWhiteImage) = cv.threshold(grayImage, 127, 255, cv.THRESH_BINARY)
name_2 = dirPath + "a.png"
# creating black and white image on path
cv.imwrite(name_2, blackAndWhiteImage)
# fetching the text from the image using read_text function
text = self.read_text(filename=name_2)
# printing text of single image
print(text)
# Deleting b&w image from the directory
os.unlink(name_2)
# deleting gray image from the directory
os.unlink(i)
# removing the directory
os.rmdir(dirName)
def read_text(self, filename=None):
"""
This function will handle the core OCR processing of images.
:param: filename as string containing path of an image.
:return: text extracted from the image.
"""
if filename == None:
filename = self.filename
text = pytesseract.image_to_string(Image.open(filename))
# We'll use Pillow's Image class to open the image and
# pytesseract to detect the string in the image
return text
# processing an individual image
filename = 'Images\\wordsworthwordle1.jpg'
file_text = OCR(filename)
print(file_text.read_text())
# or
# processing a PDF file
filename = 'Files\\cert.pdf'
file_text = OCR(filename)
print(file_text.split_pdf_and_convert_to_images())
| 0 | 0 | 0 |
61a82ac910dabe7ebb8ace667d6eced2cc315462 | 1,671 | py | Python | tests/ros_comm/test_asserts.py | ros-testing/rospbt | db708ba9c326920b222ef5662b0326db9397d718 | [
"Apache-2.0"
] | null | null | null | tests/ros_comm/test_asserts.py | ros-testing/rospbt | db708ba9c326920b222ef5662b0326db9397d718 | [
"Apache-2.0"
] | 11 | 2018-05-11T15:37:20.000Z | 2018-07-30T19:10:47.000Z | tests/ros_comm/test_asserts.py | ros-testing/rospbt | db708ba9c326920b222ef5662b0326db9397d718 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from rostestplus.ros_comm.asserts import (
AssertException,
assert_node_pingable,
assert_node_listed,
assert_node_listed_on_machine,
assert_service_response_success_true,
)
| 25.707692 | 86 | 0.775583 | # -*- coding: utf-8 -*-
import pytest
from rostestplus.ros_comm.asserts import (
AssertException,
assert_node_pingable,
assert_node_listed,
assert_node_listed_on_machine,
assert_service_response_success_true,
)
def test_assert_node_pingable_doesnt_raise_exception_for_existing_node():
fake_stdout = """
rosnode: node is [/rosout]
pinging /rosout with a timeout of 3.0s
xmlrpc reply from http://ann:46635/ time=1.195908ms
ping average: 1.150429ms
"""
assert_node_pingable(fake_stdout, 'rosout')
def test_assert_node_listed_raises_no_exception_for_existing_node():
fake_stdout = """
/rosout
/talker
/listener
"""
assert_node_listed(fake_stdout, 'talker')
def test_assert_node_on_machine_listed_raises_exception_for_non_existing_node():
fake_stdout = """
/talker-ninja.local-72266-125792
/rosout
/listener-ninja.local-72615-125792
"""
with pytest.raises(AssertException):
assert_node_listed_on_machine(fake_stdout, 'non_existing_node', 'ninja.local')
def test_assert_node_on_machine_listed_raises_no_exception_for_existing_node():
fake_stdout = """
/talker-ninja.local-72266-125792
/rosout
/listener-ninja.local-72615-125792
"""
assert_node_listed_on_machine(fake_stdout, 'ninja.local', 'talker')
def test_assert_service_response_success_true_raises_no_exception_if_true():
fake_stdout = """
success: True
"""
assert_service_response_success_true(fake_stdout)
def test_assert_service_response_success_true_raises_exception_if_falsse():
fake_stdout = """
success: False
"""
with pytest.raises(AssertException):
assert_service_response_success_true(fake_stdout)
| 1,286 | 0 | 138 |
3f63d509b9f70233de4928b087d43a1d0bf024cc | 3,229 | py | Python | model-optimizer/mo/ops/pad_test.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 1 | 2021-04-06T03:32:12.000Z | 2021-04-06T03:32:12.000Z | model-optimizer/mo/ops/pad_test.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 28 | 2021-09-24T09:29:02.000Z | 2022-03-28T13:20:46.000Z | model-optimizer/mo/ops/pad_test.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 1 | 2020-08-30T11:48:03.000Z | 2020-08-30T11:48:03.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from mo.graph.graph import Node
from mo.ops.pad import Pad, AttributedPad
from mo.utils.unittest.graph import build_graph
| 33.635417 | 120 | 0.539486 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from mo.graph.graph import Node
from mo.ops.pad import Pad, AttributedPad
from mo.utils.unittest.graph import build_graph
class TestPadOps(unittest.TestCase):
node_attrs = {
'data_in': {
'kind': 'data',
'shape': np.array([1, 3, 100, 200])
},
'pads_begin': {
'kind': 'data',
'value': np.array([0, 0, 1, 2], dtype=np.int64),
'shape': np.array([4], dtype=np.int64)
},
'pads_end': {
'kind': 'data',
'value': np.array([0, 0, 3, 4], dtype=np.int64),
'shape': np.array([4], dtype=np.int64)
},
'pad': {
'op': 'Pad',
'kind': 'op',
'pads': None,
},
'data_out': {
'kind': 'data',
'shape': None,
'value': None,
}
}
edge_attrs = [
('data_in', 'pad'),
('pad', 'data_out')
]
def test_attribute_pad_no_infer(self):
graph = build_graph(
self.node_attrs,
self.edge_attrs,
{'pad': {'pads': np.array([[0, 0], [0, 0], [1, 3], [2, 4]], dtype=np.int64)}},
nodes_with_edges_only=True,
)
pad_node = Node(graph, 'pad')
with self.assertRaisesRegex(AttributeError, ".*has no attribute 'infer'.*"):
AttributedPad.infer(pad_node)
def test_two_inputs(self):
graph = build_graph(
self.node_attrs,
self.edge_attrs + [('pads_begin', 'pad'), ('pads_end', 'pad')],
nodes_with_edges_only=True,
)
pad_node = Node(graph, 'pad')
Pad.infer(pad_node)
self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4])))
def test_not_enough_inputs(self):
graph = build_graph(
self.node_attrs,
self.edge_attrs + [('pads_begin', 'pad')],
nodes_with_edges_only=True,
)
pad_node = Node(graph, 'pad')
with self.assertRaisesRegex(AssertionError, ".*must have 3 or 4 inputs.*"):
Pad.infer(pad_node)
def test_two_inputs_value_infer(self):
in_value = np.random.rand(*self.node_attrs['data_in']['shape']).astype(np.float32)
graph = build_graph(
self.node_attrs,
self.edge_attrs + [('pads_begin', 'pad'), ('pads_end', 'pad')],
{'data_in': {'value': in_value}},
nodes_with_edges_only=True,
)
pads = np.insert(self.node_attrs['pads_end']['value'],
np.arange(len(self.node_attrs['pads_begin']['value'])), self.node_attrs['pads_begin']['value'])
pads = np.reshape(pads, (len(self.node_attrs['pads_begin']['value']), 2))
ref_value = np.pad(in_value, pads, constant_values=0, mode='constant')
pad_node = Node(graph, 'pad')
Pad.infer(pad_node)
self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4])))
self.assertTrue(np.array_equal(Node(graph, 'data_out').value, ref_value))
| 2,058 | 905 | 23 |
b3fa2afde1dc0c13806286ecc1f5bd2388803a59 | 11,997 | py | Python | tensorflow/script/network_hrnet.py | christinazavou/ANNFASS_Structure | f7b6d3e44d2466ed15009a3335e757def62adfa6 | [
"MIT"
] | null | null | null | tensorflow/script/network_hrnet.py | christinazavou/ANNFASS_Structure | f7b6d3e44d2466ed15009a3335e757def62adfa6 | [
"MIT"
] | null | null | null | tensorflow/script/network_hrnet.py | christinazavou/ANNFASS_Structure | f7b6d3e44d2466ed15009a3335e757def62adfa6 | [
"MIT"
] | null | null | null | from ocnn import *
| 46.863281 | 120 | 0.581895 | from ocnn import *
class OctreeUpsample:
def __init__(self, upsample='nearest'):
self.upsample = upsample
def __call__(self, data, octree, d, mask=None):
if self.upsample == 'nearest':
data = octree_tile(data, octree, d)
else:
data = octree_bilinear(data, octree, d, d + 1, mask)
return data
def branch(data, octree, depth, channel, block_num, training, dynamic_bottleneck=False):
debug_checks = {}
# if depth > 5: block_num = block_num // 2 # !!! whether should we add this !!!
for i in range(block_num):
with tf.variable_scope('resblock_d%d_%d' % (depth, i)):
if dynamic_bottleneck:
bottleneck = channel // 32.0
else:
bottleneck = 4 if channel < 256 else 8 # bottleneck used in original code, everything > 256 is set to 8
data = octree_resblock(data, octree, depth, channel, 1, training, bottleneck)
debug_checks['{}/data'.format(tf.get_variable_scope().name)] = data
return data, debug_checks
def branch_channels(channel, i):
return (2 ** i) * channel
def branches(data, octree, depth, channel, block_num, training, threshold):
for i in range(len(data)):
with tf.variable_scope('branch_%d' % (depth - i)):
depth_i, channel_i = depth - i, branch_channels(channel, i)
if threshold > 0: # threshold=0 => do not apply clipping
if channel_i > threshold: channel_i = threshold # !!! clip the channel to threshold
data[i], dc = branch(data[i], octree, depth_i, channel_i, block_num, training)
return data, dc
def trans_func(data_in, octree, d0, d1, training, upsample, threshold):
data = data_in
channel0 = int(data.shape[1])
channel1 = channel0 * (2 ** (d0 - d1))
if threshold > 0: # threshold=0 => do not apply clipping
if channel1 > threshold: channel1 = threshold # !!! clip the channel to threshold
# no relu for the last feature map
with tf.variable_scope('trans_%d_%d' % (d0, d1)):
if d0 > d1: # downsample, transitioning to smaller depth
for d in range(d0, d1, -1):
with tf.variable_scope('down_%d' % d): # transfer features from depth d to d1
data, _ = octree_max_pool(data, octree, d)
with tf.variable_scope('conv1x1_%d' % (d1)):
data = octree_conv1x1_bn(data, channel1, training) # get channels to wanted size
elif d0 < d1: # upsample, transitioning to bigger depth
for d in range(d0, d1, 1):
with tf.variable_scope('up_%d' % d):
if d == d0:
data = octree_conv1x1_bn(data, channel1, training)
data = OctreeUpsample(upsample)(data, octree, d)
else: # do nothing, return data_in without any changes
pass
return data
def transitions(data, octree, depth, training, threshold, upsample='neareast'):
debug_checks = {}
num = len(data)
features = [[0] * num for _ in range(num + 1)]
for i in range(num):
for j in range(num + 1):
d0, d1 = depth - i, depth - j
features[j][i] = trans_func(data[i], octree, d0, d1, training, upsample, threshold)
debug_checks["{}/features_{}_{}".format(tf.get_variable_scope().name, j, i)] = features[j][i]
outputs = [None] * (num + 1)
for j in range(num + 1):
with tf.variable_scope('fuse_%d' % (depth - j)):
outputs[j] = tf.nn.relu(tf.add_n(features[j]))
debug_checks["{}/outputs_{}".format(tf.get_variable_scope().name, j)] = outputs[j]
return outputs, debug_checks
def front_layer_channeld(channel, d, d1):
return channel / 2 ** (d - d1 + 1)
class HRNet:
def __init__(self, flags):
self.tensors = dict()
self.flags = flags
def network_seg(self, octree, training, reuse=False, pts=None, mask=None):
debug_checks = {}
with tf.variable_scope('ocnn_hrnet', reuse=reuse):
## backbone
convs, dc = self.backbone(octree, training)
debug_checks.update(dc)
self.tensors['convs'] = convs
## header
with tf.variable_scope('seg_header'):
if pts is None:
logit, dc = self.seg_header(convs, octree, self.flags.nout, mask, training)
else:
logit, dc = self.seg_header_pts(convs, octree, self.flags.nout, pts, training)
debug_checks.update(dc)
self.tensors['logit_seg'] = logit
return logit, debug_checks
def seg_header(self, inputs, octree, nout, mask, training):
debug_checks = {}
feature = self.points_feat(inputs, octree) # d5-128,d4-256,d3-516 = 896 feats
factor = self.flags.factor
if self.flags.with_d0:
feature = OctreeUpsample('linear')(feature, octree, self.flags.depth - 1, mask)
debug_checks['{}/feature(linear_ups)'.format(tf.get_variable_scope().name)] = feature
convd0 = self.tensors['front/convd0'] # (1, C, H, 1)
if mask is not None:
convd0 = tf.boolean_mask(convd0, mask, axis=2)
feature = tf.concat([feature, convd0], axis=1) # append input depth features, d6-32 =>928 feats
debug_checks['{}/convd0'.format(tf.get_variable_scope().name)] = convd0
debug_checks['{}/feature(concat)'.format(tf.get_variable_scope().name)] = feature
else:
if mask is not None:
feature = tf.boolean_mask(feature, mask, axis=2)
with tf.variable_scope('predict_%d_with%s_convd0' % (self.flags.depth, "" if self.flags.with_d0 else "out")):
logit = predict_module(feature, nout, 128 * factor, training) # 2-FC
logit = tf.transpose(tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C)
return logit, debug_checks
def seg_header_pts(self, inputs, octree, nout, pts, training):
debug_checks = {}
feature = self.points_feat(inputs, octree) # The resolution is 5-depth
# d5-128,d4-256,d3-516 = 896 feats
depth, factor = self.flags.depth, self.flags.factor
xyz, ids = tf.split(pts, [3, 1], axis=1) # get xyz and octree id in current batch
xyz = xyz + 1.0 # [0, 2]
ptsd1 = tf.concat([xyz * (2.0 ** (depth - 2)), ids], axis=1) # [0, 32], d6 resolution
debug_checks["{}pts/ptsd1".format(tf.get_variable_scope().name)] = ptsd1
feature = octree_bilinear_v3(ptsd1, feature, octree,
depth=depth - 1) # transfer octree features to pts
debug_checks["{}pts/feature(bilinear)".format(tf.get_variable_scope().name)] = feature
if self.flags.with_d0:
convd0 = self.tensors['front/convd0'] # The resolution is 6-depth
ptsd0 = tf.concat([xyz * (2 ** (depth - 1)), ids], axis=1) # [0, 64]
debug_checks["{}pts/ptsd0".format(tf.get_variable_scope().name)] = ptsd0
convd0 = octree_nearest_interp(ptsd0, convd0, octree, depth=depth)
debug_checks["{}pts/convd0(nearinterp)".format(tf.get_variable_scope().name)] = convd0
feature = tf.concat([feature, convd0], axis=1)
debug_checks["{}pts/feature(concat)".format(tf.get_variable_scope().name)] = feature
with tf.variable_scope('predict_%d_with%s_convd0' % (self.flags.depth, "" if self.flags.with_d0 else "out")):
logit = predict_module(feature, nout, 128 * factor, training) # 2-FC
logit = tf.transpose(tf.squeeze(logit, [0, 3])) # (1, C, H, 1) -> (H, C)
return logit, debug_checks
def points_feat(self, inputs, octree):
data = [t for t in inputs]
depth, factor, num = self.flags.depth - 1, self.flags.factor, len(inputs)
assert (self.flags.depth >= depth)
for i in range(1, num):
with tf.variable_scope('up_%d' % i):
for j in range(i):
d = depth - i + j
data[i] = OctreeUpsample(self.flags.upsample)(data[i], octree, d)
feature = tf.concat(data, axis=1) # the resolution is depth-5
return feature
def cls_header(self, inputs, octree, nout, training):
data = [t for t in inputs]
channel = [int(t.shape[1]) for t in inputs]
depth, factor, num = self.flags.depth, self.flags.factor, len(inputs)
assert (self.flags.depth >= depth)
for i in range(num):
conv = data[i]
d = depth - i
with tf.variable_scope('down_%d' % d):
for j in range(2 - i):
with tf.variable_scope('down_%d' % (d - j)):
conv, _ = octree_max_pool(conv, octree, d - j)
data[i] = conv
features = tf.concat(data, axis=1)
# with tf.variable_scope("fc0"):
# conv = octree_conv1x1_bn_relu(features, 256, training)
# with tf.variable_scope("fc1"):
# conv = octree_conv1x1_bn_relu(conv, 512 * factor, training)
with tf.variable_scope("fc1"):
conv = octree_conv1x1_bn_relu(features, 512 * factor, training)
fc1 = octree_global_pool(conv, octree, depth=3)
self.tensors['fc1'] = fc1
if self.flags.dropout[0]:
fc1 = tf.layers.dropout(fc1, rate=0.5, training=training)
with tf.variable_scope("fc2"):
# with tf.variable_scope('fc2_pre'):
# fc1 = fc_bn_relu(fc1, 512, training=training)
logit = dense(fc1, nout, use_bias=True)
self.tensors['fc2'] = logit
return logit
def backbone(self, octree, training):
debug_checks = {}
flags = self.flags
depth = flags.depth
with tf.variable_scope('signal'):
data = octree_property(octree, property_name='feature', dtype=tf.float32,
depth=depth, channel=flags.channel)
data = tf.reshape(data, [1, flags.channel, -1, 1]) # [1,channels,no. octants,1]
if flags.signal_abs: data = tf.abs(data)
debug_checks['{}/data(feature)'.format(tf.get_variable_scope().name)] = data
# front
convs = [None]
channel, d1 = 64 * flags.factor, depth - 1 # chosen resolution, main working depth (depth-1)
convs[0] = self.front_layer(data, octree, depth, d1, channel, training)
# stages, how many depths to consider in HRNet architecture
stage_num = flags.stages
for stage in range(1, stage_num + 1):
with tf.variable_scope('stage_%d' % stage):
convs, dc = branches(convs, octree, d1, channel, flags.resblock_num, training,
self.flags.feature_threshold)
debug_checks.update(dc)
if stage == stage_num: break
# move to shallower depth
convs, dc = transitions(convs, octree, depth=d1, training=training, upsample=flags.upsample,
threshold=self.flags.feature_threshold)
debug_checks.update(dc)
return convs, debug_checks
def front_layer(self, data, octree, d0, d1, channel, training):
conv = data
with tf.variable_scope('front'):
for d in range(d0, d1, -1):
with tf.variable_scope('depth_%d' % d):
channeld = front_layer_channeld(channel, d, d1)
conv = octree_conv_bn_relu(conv, octree, d, channeld, training)
self.tensors['front/convd0'] = conv # TODO: add a resblock here?
conv, _ = octree_max_pool(conv, octree, d)
with tf.variable_scope('depth_%d' % d1):
conv = octree_conv_bn_relu(conv, octree, d1, channel, training)
self.tensors['front/convd1'] = conv
return conv
| 11,527 | -9 | 452 |
cf2a2de27769f700c32eae12f1013f8529c4d8cf | 835 | py | Python | backend/wod_board/models/__init__.py | GuillaumeOj/P13-WOD-Board | 36df7979e63c354507edb56eabdfc548b1964d08 | [
"MIT"
] | null | null | null | backend/wod_board/models/__init__.py | GuillaumeOj/P13-WOD-Board | 36df7979e63c354507edb56eabdfc548b1964d08 | [
"MIT"
] | 82 | 2021-01-17T18:12:23.000Z | 2021-06-12T21:46:49.000Z | backend/wod_board/models/__init__.py | GuillaumeOj/WodBoard | 1ac12404f6094909c9bf116bcaf6ccd60e85bc00 | [
"MIT"
] | null | null | null | import sqlalchemy
import sqlalchemy.orm
from wod_board import config
Base = sqlalchemy.orm.declarative_base()
engine = sqlalchemy.create_engine(config.DATABASE_URL)
Session = sqlalchemy.orm.sessionmaker(bind=engine, class_=sqlalchemy.orm.Session)
# Import each model fo Alembic
from wod_board.models.equipment import * # noqa
from wod_board.models.goal import * # noqa
from wod_board.models.movement import * # noqa
from wod_board.models.unit import * # noqa
from wod_board.models.user import * # noqa
from wod_board.models.wod import * # noqa
from wod_board.models.wod_round import * # noqa
| 21.973684 | 81 | 0.732934 | import sqlalchemy
import sqlalchemy.orm
from wod_board import config
Base = sqlalchemy.orm.declarative_base()
engine = sqlalchemy.create_engine(config.DATABASE_URL)
Session = sqlalchemy.orm.sessionmaker(bind=engine, class_=sqlalchemy.orm.Session)
def get_db():
db = Session()
try:
yield db
finally:
db.close()
# Import each model fo Alembic
from wod_board.models.equipment import * # noqa
from wod_board.models.goal import * # noqa
from wod_board.models.movement import * # noqa
from wod_board.models.unit import * # noqa
from wod_board.models.user import * # noqa
from wod_board.models.wod import * # noqa
from wod_board.models.wod_round import * # noqa
def create_all() -> None:
Base.metadata.create_all(bind=engine)
def drop_all() -> None:
Base.metadata.drop_all(bind=engine)
| 157 | 0 | 69 |
99e693da437969ae877a9c639beb2e1c016d3c3c | 1,727 | py | Python | qatrack/parts/migrations/0014_auto_20201230_0955.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 20 | 2021-03-11T18:37:32.000Z | 2022-03-23T19:38:07.000Z | qatrack/parts/migrations/0014_auto_20201230_0955.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 75 | 2021-02-12T02:37:33.000Z | 2022-03-29T20:56:16.000Z | qatrack/parts/migrations/0014_auto_20201230_0955.py | crcrewso/qatrackplus | b9da3bc542d9e3eca8b7291bb631d1c7255d528e | [
"MIT"
] | 5 | 2021-04-07T15:46:53.000Z | 2021-09-18T16:55:00.000Z | # Generated by Django 2.1.15 on 2020-12-30 14:55
import os
from django.conf import settings
from django.db import migrations
from django.db.migrations.recorder import MigrationRecorder
| 31.4 | 107 | 0.659525 | # Generated by Django 2.1.15 on 2020-12-30 14:55
import os
from django.conf import settings
from django.db import migrations
from django.db.migrations.recorder import MigrationRecorder
def alter_unique(apps, schema):
from django.db import connection, transaction
cursor = connection.cursor()
cursor.execute("""
SELECT top 1
TC.Constraint_Name
FROM information_schema.table_constraints TC
INNER JOIN information_schema.constraint_column_usage CC on TC.Constraint_Name = CC.Constraint_Name
WHERE
TC.constraint_type = 'Unique'
AND
TC.Constraint_Name LIKE 'parts_partsuppliercollection_part_id_supplier%'
ORDER BY TC.Constraint_Name"""
)
try:
constraint_name = cursor.fetchone()[0]
cursor.execute("ALTER TABLE parts_partsuppliercollection drop constraint %s" % constraint_name)
except TypeError:
pass
columns = ['part_id', 'supplier_id', 'part_number']
condition = ' AND '.join(["[%s] IS NOT NULL" % col for col in columns])
PartSupplierCollection = apps.get_model("parts", "PartSupplierCollection")
schema._create_unique_sql(PartSupplierCollection, columns, condition=condition)
class Migration(migrations.Migration):
dependencies = [
('parts', '0013_auto_20201229_1302'),
]
if "sql_server" in settings.DATABASES['default']['ENGINE']:
operations = [
migrations.RunPython(alter_unique),
]
else:
operations = [
migrations.AlterUniqueTogether(
name='partsuppliercollection',
unique_together={('part', 'supplier', 'part_number')},
),
]
| 1,035 | 457 | 46 |
00d47b939f359f3dcf59c98e277e76e10ecbf25e | 5,108 | py | Python | app/main.py | prav10194/automated-twitter-reddit-app | 7c44dbb998d4124589e7c8d74fa0b6e09c2aea40 | [
"MIT"
] | null | null | null | app/main.py | prav10194/automated-twitter-reddit-app | 7c44dbb998d4124589e7c8d74fa0b6e09c2aea40 | [
"MIT"
] | null | null | null | app/main.py | prav10194/automated-twitter-reddit-app | 7c44dbb998d4124589e7c8d74fa0b6e09c2aea40 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from flask import Flask, render_template, request
from flask_cors import CORS, cross_origin
import requests
import dropbox
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
import praw
import requests
import youtube_dl
import random
import time
import os
dbx = dropbox.Dropbox(os.environ.get('DROPBOX_ACCESS_TOKEN'))
reddit = praw.Reddit(
client_id=os.environ.get('REDDIT_CLIENT_ID'),
client_secret=os.environ.get('REDDIT_CLIENT_SECRET'),
user_agent=os.environ.get('REDDIT_USER_AGENT'),
username=os.environ.get('REDDIT_USERNAME'),
password=os.environ.get('REDDIT_PASSWORD')
)
print(reddit.read_only)
from twython import Twython
twitter = Twython(os.environ.get('TWITTER_APP_KEY'), os.environ.get('TWITTER_APP_SECRET'),
os.environ.get('TWITTER_OAUTH_TOKEN'), os.environ.get('TWITTER_OAUTH_TOKEN_SECRET'))
@app.route("/")
@app.route("/postreddit")
| 45.607143 | 166 | 0.57224 | from __future__ import unicode_literals
from flask import Flask, render_template, request
from flask_cors import CORS, cross_origin
import requests
import dropbox
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
import praw
import requests
import youtube_dl
import random
import time
import os
dbx = dropbox.Dropbox(os.environ.get('DROPBOX_ACCESS_TOKEN'))
reddit = praw.Reddit(
client_id=os.environ.get('REDDIT_CLIENT_ID'),
client_secret=os.environ.get('REDDIT_CLIENT_SECRET'),
user_agent=os.environ.get('REDDIT_USER_AGENT'),
username=os.environ.get('REDDIT_USERNAME'),
password=os.environ.get('REDDIT_PASSWORD')
)
print(reddit.read_only)
from twython import Twython
twitter = Twython(os.environ.get('TWITTER_APP_KEY'), os.environ.get('TWITTER_APP_SECRET'),
os.environ.get('TWITTER_OAUTH_TOKEN'), os.environ.get('TWITTER_OAUTH_TOKEN_SECRET'))
@app.route("/")
def home_view():
return render_template('frontpage.html')
@app.route("/postreddit")
def post_reddit():
os.remove('./ids')
dbx.files_download_to_file("./ids", '/Reddit-Twitter/ids')
print("VAR:", os.environ.get('VAR'))
if request.args.get('frensandfamilycode') == os.environ.get('SUPER_SECRET_TOKEN'):
print("Access granted")
subreddits_list = ["aww","earthporn","cattaps","tippytaps","masterreturns","dogpictures","RarePuppers","DogsWithJobs"]
random_subbreddit = random.choice(subreddits_list)
subreddit = reddit.subreddit(random_subbreddit)
time_filters_counts = ["year:100", "month:20", "week:5"]
time_filter_count = random.choice(time_filters_counts)
alreadyPosted = False
reddit_post = {"url": "", "id": "", "title": "", "postlink": ""}
for submission in subreddit.top(time_filter=time_filter_count.split(":")[0],limit=int(time_filter_count.split(":")[1])):
try:
readfile = open("ids", "r")
isUnique = submission.id not in readfile.read()
readfile.close()
except:
isUnique = True
open("ids",'w').close()
if isUnique and not alreadyPosted: #check if id does not exists in file:
alreadyPosted = True
try:
appendfile = open("ids", "a")
appendfile.write("\n" + submission.id)
appendfile.close()
reddit_post["postlink"] = "http://reddit.com" + submission.permalink
reddit_post["url"] = submission.url
reddit_post["id"] = submission.id
reddit_post["title"] = submission.title
reddit_post["author"] = submission.author
# print("reddit_link: " + reddit_link)
except:
alreadyPosted = False
print("Checking the next post")
r = requests.get(reddit_post["url"], allow_redirects=True)
print(r.headers.get('content-type'))
print("running code now for: " + reddit_post["id"])
ydl_opts = {'outtmpl': reddit_post["id"] + '.%(ext)s'}
print(r.headers.get('content-type'))
if r.headers.get('content-type') == "image/jpeg" or r.headers.get('content-type') == "text/html":
open(reddit_post["id"] + '.jpg', 'wb').write(r.content)
photo = open(reddit_post["id"] + '.jpg', 'rb')
tweet = reddit_post["title"] + ' \nr/' + str(random_subbreddit) + '\nu/' + str(reddit_post["author"]) + '\n\n[' + reddit_post["postlink"] + ']'
response = twitter.upload_media(media=photo)
twitter.update_status(status=tweet, media_ids=[response['media_id']])
os.remove(reddit_post["id"] + '.jpg')
if r.headers.get('content-type') == "text/html; charset=utf-8" or r.headers.get('content-type') == "text/html;charset=UTF-8":
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([reddit_post["url"]])
tweet = reddit_post["title"] + '\nr/' + str(random_subbreddit) + '\nu/' + str(reddit_post["author"]) + '\n\n[' + reddit_post["postlink"] + ']'
print(os.listdir("./"))
video = open(reddit_post["id"] + '.mp4', 'rb')
response = twitter.upload_video(media=video, media_category='tweet_video', media_type='video/mp4', check_progress=True)
twitter.update_status(status=tweet, media_ids=[response['media_id']])
os.remove(reddit_post["id"] + '.mp4')
dbx.files_delete_v2('/Reddit-Twitter/ids', parent_rev=None)
with open("./ids", "rb") as f:
dbx.files_upload(f.read(), '/Reddit-Twitter/ids', mute = True)
return {"message": "Posted successfully"}
| 4,112 | 0 | 44 |
072864afc42d7a2bd9cbf5cb71e2e0a705e39792 | 3,651 | py | Python | dgaintel/predict.py | ffontaine/dgaintel | 6b2ed1023c73fd3449571380eca34e17f919114b | [
"MIT"
] | null | null | null | dgaintel/predict.py | ffontaine/dgaintel | 6b2ed1023c73fd3449571380eca34e17f919114b | [
"MIT"
] | null | null | null | dgaintel/predict.py | ffontaine/dgaintel | 6b2ed1023c73fd3449571380eca34e17f919114b | [
"MIT"
] | null | null | null | '''
Main prediction module for dgaintel package
'''
import os
import numpy as np
from tensorflow.keras.models import load_model
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
SAVED_MODEL_PATH = os.path.join(DIR_PATH, 'domain_classifier_model.h5')
MODEL = load_model(SAVED_MODEL_PATH)
CHAR2IDX = {'-': 0, '.': 1, '0': 2, '1': 3, '2': 4, '3': 5,
'4': 6, '5': 7, '6': 8, '7': 9, '8': 10, '9': 11,
'_': 12, 'a': 13, 'b': 14, 'c': 15, 'd': 16, 'e': 17,
'f': 18, 'g': 19, 'h': 20, 'i': 21, 'j': 22, 'k': 23,
'l': 24, 'm': 25, 'n': 26, 'o': 27, 'p': 28, 'q': 29,
'r': 30, 's': 31, 't': 32, 'u': 33, 'v': 34, 'w': 35,
'x': 36, 'y': 37, 'z': 38}
def get_prob(domains, raw=False, internal=False):
'''
Core inference function; calls model on vectorized batch of domain names.
Input: list of domains (list)
Output: len(domains) == 1: single probability value
raw=False: list of tuples of format (domain_name, probability)
raw=True: np.ndarray of probabilities
'''
if not isinstance(domains, list):
domains = _inputs(domains)
vec = np.zeros((len(domains), 82))
for i, domain in enumerate(domains):
for j, char in enumerate(domain):
vec[i, j] = CHAR2IDX[char] if char in CHAR2IDX else -1
prob = MODEL(vec).numpy()
prob = prob.transpose()[0]
if not internal:
if prob.shape[0] == 1:
return prob.sum()
if raw:
return prob
return list(zip(domains, list(prob)))
def get_prediction(domains, to_file=None, show=True):
'''
Wrapper for printing out/writing full predictions on a domain or set of domains
Input: domain (str), list of domains (list), domains in .txt file (FileObj)
Output: show to stdout
show=False: list of prediction strings (list)
to_file=<filename>.txt: writes new file at <filename>.txt with predictions
'''
if not isinstance(domains, list):
domains = _inputs(domains)
raw_probs = get_prob(domains, internal=True)
preds = [_get_prediction(domain, prob=prob) for domain, prob in raw_probs]
if to_file:
assert os.path.splitext(to_file)[1] == ".txt"
with open(os.path.join(os.getcwd(), to_file), 'w') as outfile:
outfile.writelines(preds)
return None
if show:
for pred in preds:
print(pred.strip('\n'))
return None
return preds
def main():
'''
Main function for testing purposes.
'''
get_prediction(['microsoft.com',
'squarespace.com',
'hsfkjdshfjasdhfk.com',
'fdkhakshfda.com',
'foilfencersarebad.com',
'discojjfdsf.com',
'fasddafhkj.com',
'wikipedai.com'])
if __name__ == '__main__':
main()
| 30.680672 | 83 | 0.575185 | '''
Main prediction module for dgaintel package
'''
import os
import numpy as np
from tensorflow.keras.models import load_model
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
SAVED_MODEL_PATH = os.path.join(DIR_PATH, 'domain_classifier_model.h5')
MODEL = load_model(SAVED_MODEL_PATH)
CHAR2IDX = {'-': 0, '.': 1, '0': 2, '1': 3, '2': 4, '3': 5,
'4': 6, '5': 7, '6': 8, '7': 9, '8': 10, '9': 11,
'_': 12, 'a': 13, 'b': 14, 'c': 15, 'd': 16, 'e': 17,
'f': 18, 'g': 19, 'h': 20, 'i': 21, 'j': 22, 'k': 23,
'l': 24, 'm': 25, 'n': 26, 'o': 27, 'p': 28, 'q': 29,
'r': 30, 's': 31, 't': 32, 'u': 33, 'v': 34, 'w': 35,
'x': 36, 'y': 37, 'z': 38}
def _inputs(domains):
lpath = os.path.splitext(domains)
if lpath[1] == ".txt":
path = os.path.join(os.getcwd(), domains)
with open(path, 'r') as dfile:
domain_list = dfile.readlines()
domain_list = [domain.strip('\n').lower() for domain in domain_list]
return domain_list
if isinstance(domains, list):
return [domain.lower() for domain in domains]
return [domains.lower()]
def _get_prediction(domain_name, prob=None):
if not prob:
prob = get_prob([domain_name], raw=True)
if prob >= 0.5:
return '{} is DGA with probability {}\n'.format(domain_name, prob)
return '{} is genuine with probability {}\n'.format(domain_name, prob)
def get_prob(domains, raw=False, internal=False):
'''
Core inference function; calls model on vectorized batch of domain names.
Input: list of domains (list)
Output: len(domains) == 1: single probability value
raw=False: list of tuples of format (domain_name, probability)
raw=True: np.ndarray of probabilities
'''
if not isinstance(domains, list):
domains = _inputs(domains)
vec = np.zeros((len(domains), 82))
for i, domain in enumerate(domains):
for j, char in enumerate(domain):
vec[i, j] = CHAR2IDX[char] if char in CHAR2IDX else -1
prob = MODEL(vec).numpy()
prob = prob.transpose()[0]
if not internal:
if prob.shape[0] == 1:
return prob.sum()
if raw:
return prob
return list(zip(domains, list(prob)))
def get_prediction(domains, to_file=None, show=True):
'''
Wrapper for printing out/writing full predictions on a domain or set of domains
Input: domain (str), list of domains (list), domains in .txt file (FileObj)
Output: show to stdout
show=False: list of prediction strings (list)
to_file=<filename>.txt: writes new file at <filename>.txt with predictions
'''
if not isinstance(domains, list):
domains = _inputs(domains)
raw_probs = get_prob(domains, internal=True)
preds = [_get_prediction(domain, prob=prob) for domain, prob in raw_probs]
if to_file:
assert os.path.splitext(to_file)[1] == ".txt"
with open(os.path.join(os.getcwd(), to_file), 'w') as outfile:
outfile.writelines(preds)
return None
if show:
for pred in preds:
print(pred.strip('\n'))
return None
return preds
def main():
'''
Main function for testing purposes.
'''
get_prediction(['microsoft.com',
'squarespace.com',
'hsfkjdshfjasdhfk.com',
'fdkhakshfda.com',
'foilfencersarebad.com',
'discojjfdsf.com',
'fasddafhkj.com',
'wikipedai.com'])
if __name__ == '__main__':
main()
| 689 | 0 | 46 |
4521ed0e587cc3e49439300b7abac40ba3de6383 | 17,394 | py | Python | python/deepLearningPlotter.py | cms-ttbarAC/CyMiniAna | 405b1ac6639f8a93297e847180b5a6ab58f9a06c | [
"MIT"
] | null | null | null | python/deepLearningPlotter.py | cms-ttbarAC/CyMiniAna | 405b1ac6639f8a93297e847180b5a6ab58f9a06c | [
"MIT"
] | 31 | 2017-10-26T16:11:32.000Z | 2018-08-13T14:39:56.000Z | python/deepLearningPlotter.py | cms-ttbarAC/cheetah | 76457d3cb3936dac5c78957b66b3b8aa213ca2b7 | [
"MIT"
] | 1 | 2018-07-24T20:32:35.000Z | 2018-07-24T20:32:35.000Z | """
Created: 11 November 2016
Last Updated: 16 February 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
Base class for plotting deep learning
Designed for running on desktop at TAMU
with specific set of software installed
--> not guaranteed to work in CMSSW environment!
Does not use ROOT!
Instead, uses matplotlib to generate figures
"""
import os
import sys
import json
import util
from datetime import date
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', family='sans-serif')
from keras.utils.vis_utils import plot_model as keras_plot
from sklearn.metrics import roc_curve, auc
import hepPlotter.hepPlotterLabels as hpl
import hepPlotter.hepPlotterTools as hpt
from hepPlotter.hepPlotter import HepPlotter
class Target(object):
"""Class to contain information for targets used in training"""
class DeepLearningPlotter(object):
"""Plotting utilities for deep learning"""
def __init__(self):
"""Give default values to member variables"""
self.date = date.today().strftime('%d%b%Y')
self.betterColors = hpt.betterColors()['linecolors']
self.sample_labels = hpl.sample_labels()
self.variable_labels = hpl.variable_labels()
self.msg_svc = util.VERBOSE()
self.filename = ""
self.output_dir = ''
self.image_format = 'png'
self.process_label = '' # if a single process is used for all training, set this
self.classification = False # 'binary','multi',False
self.regression = False # True or False
self.df = None
self.targets = []
self.CMSlabelStatus = "Internal"
def initialize(self,dataframe,target_names=[],target_values=[]):
"""
Set parameters of class to make plots
@param dataframe The dataframe that contains physics information for training/testing
"""
self.df = dataframe
try:
self.processlabel = self.sample_labels[self.filename].label # process used in each plot
except KeyError:
self.processlabel = ''
if self.classification:
for i,(n,v) in enumerate(zip(target_names,target_values)):
tmp = Target(n)
tmp.df = self.df.loc[self.df['target']==v]
tmp.target_value = v
tmp.label = self.sample_labels[n].label
tmp.color = self.betterColors[i]
self.targets.append(tmp)
else: # regression
try:
tmp = Target(target_names[0])
tmp.df = self.df.loc[self.df['target']==target_values[0]]
tmp.target_value = target_values[0]
except TypeError:
tmp = Target(target_names)
tmp.df = self.df.loc[self.df['target']==target_values]
tmp.target_value = target_values
tmp.label = self.sample_labels[tmp.name].label
tmp.color = self.betterColors[i]
self.targets.append(tmp)
return
def features(self):
"""
Plot the features
For classification, compare different targets
For regression, just plot the features <- should do data/mc plots instead!
"""
self.msg_svc.INFO("DL : Plotting features.")
target0 = self.targets[0] # hard-coded for binary comparisons
target1 = self.targets[1]
plt_features = self.df.keys()
for hi,feature in enumerate(plt_features):
if feature=='target': continue
binning = self.variable_labels[feature].binning
hist = HepPlotter("histogram",1)
hist.normed = True
hist.stacked = False
hist.logplot = {"y":False,"x":False,"data":False}
hist.binning = binning
hist.x_label = self.variable_labels[feature].label
hist.y_label = "Events"
hist.format = self.image_format
hist.saveAs = self.output_dir+"/hist_"+feature+"_"+self.date
hist.ratio_plot = True
hist.ratio_type = 'ratio'
hist.y_ratio_label = '{0}/{1}'.format(target0.label,target1.label)
hist.CMSlabel = 'top left'
hist.CMSlabelStatus = self.CMSlabelStatus
hist.numLegendColumns = 1
# Add some extra text to the plot
if self.processlabel: hist.extra_text.Add(self.processlabel,coords=[0.03,0.80]) # physics process that produces these features
hist.initialize()
hist.Add(target0.df[feature], name=target0.name, draw='step',
linecolor=target0.color, label=target0.label,
ratio_num=True,ratio_den=False,ratio_partner=target1.name)
hist.Add(target1.df[feature], name=target1.name, draw='step',
linecolor=target1.color, label=target1.label,
ratio_num=False,ratio_den=True,ratio_partner=target0.name)
if self.classification=='binary':
t0,_ = np.histogram(target0.df[feature],bins=binning,normed=True)
t1,_ = np.histogram(target1.df[feature],bins=binning,normed=True)
separation = util.getSeparation(t0,t1)
hist.extra_text.Add("Separation = {0:.4f}".format(separation),coords=[0.03,0.73])
p = hist.execute()
hist.savefig()
return
def feature_correlations(self):
"""Plot correlations between features of the NN"""
## Correlation Matrices of Features (top/antitop) ##
fontProperties = {'family':'sans-serif'}
opts = {'cmap': plt.get_cmap("bwr"), 'vmin': -1, 'vmax': +1}
for c,target in enumerate(self.targets):
saveAs = "{0}/correlations_{1}_{2}".format(self.output_dir,target.name,self.date)
allkeys = target.df.keys()
keys = []
for key in allkeys:
if key!='target': keys.append(key)
t_ = target.df[keys]
corrmat = t_.corr()
# Save correlation matrix to CSV file
corrmat.to_csv("{0}.csv".format(saveAs))
# Use matplotlib directly
fig,ax = plt.subplots()
heatmap1 = ax.pcolor(corrmat, **opts)
cbar = plt.colorbar(heatmap1, ax=ax)
cbar.ax.set_yticklabels( [i.get_text().strip('$') for i in cbar.ax.get_yticklabels()], **fontProperties )
labels = corrmat.columns.values
labels = [i.replace('_','\_') for i in labels]
# shift location of ticks to center of the bins
ax.set_xticks(np.arange(len(labels))+0.5, minor=False)
ax.set_yticks(np.arange(len(labels))+0.5, minor=False)
ax.set_xticklabels(labels, fontProperties, fontsize=18, minor=False, ha='right', rotation=70)
ax.set_yticklabels(labels, fontProperties, fontsize=18, minor=False)
## CMS/COM Energy Label + Signal name
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.02,1.00]
cms_stamp.fontsize = 16
cms_stamp.va = 'bottom'
ax.text(0.02,1.00,cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.ha = 'right'
energy_stamp.coords = [0.99,1.00]
energy_stamp.fontsize = 16
energy_stamp.va = 'bottom'
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
ax.text(0.03,0.93,target.label,fontsize=16,ha='left',va='bottom',transform=ax.transAxes)
plt.savefig("{0}.{1}".format(saveAs,self.image_format),
format=self.image_format,dpi=300,bbox_inches='tight')
plt.close()
return
def prediction(self,train_data={},test_data={}):
"""Plot the training and testing predictions"""
self.msg_svc.INFO("DL : Plotting DNN prediction. ")
# Plot all k-fold cross-validation results
for i,(train,trainY,test,testY) in enumerate(zip(train_data['X'],train_data['Y'],test_data['X'],test_data['Y'])):
hist = HepPlotter("histogram",1)
hist.ratio_plot = True
hist.ratio_type = "ratio"
hist.y_ratio_label = "Test/Train"
hist.label_size = 14
hist.normed = True # compare shape differences (likely don't have the same event yield)
hist.format = self.image_format
hist.saveAs = "{0}/hist_DNN_prediction_kfold{1}_{2}".format(self.output_dir,i,self.date)
hist.binning = [bb/10. for bb in range(11)]
hist.stacked = False
hist.logplot = {"y":False,"x":False,"data":False}
hist.x_label = "Prediction"
hist.y_label = "Arb. Units"
hist.CMSlabel = 'top left'
hist.CMSlabelStatus = self.CMSlabelStatus
hist.numLegendColumns = 1
if self.processlabel: hist.extra_text.Add(self.processlabel,coords=[0.03,0.80],fontsize=14)
hist.initialize()
test_data = []
train_data = []
json_data = {}
for t,target in enumerate(self.targets):
## Training
target_value = target.target_value
hist.Add(train[ trainY==target_value ],
name=target.name+'_train', linecolor=target.color,
linewidth=2, draw='step', label=target.label+" Train",
ratio_den=True,ratio_num=False,ratio_partner=target.name+'_test')
## Testing
hist.Add(test[ testY==target_value ],
name=target.name+'_test', linecolor=target.color, color=target.color,
linewidth=0, draw='stepfilled', label=target.label+" Test", alpha=0.5,
ratio_den=False,ratio_num=True,ratio_partner=target.name+'_train')
## Save data to JSON file
json_data[target.name+"_train"] = {}
json_data[target.name+"_test"] = {}
d_tr,b_tr = np.histogram(train[trainY==target_value],bins=hist.binning)
d_te,b_te = np.histogram(test[testY==target_value], bins=hist.binning)
json_data[target.name+"_train"]["binning"] = b_tr.tolist()
json_data[target.name+"_train"]["content"] = d_tr.tolist()
json_data[target.name+"_test"]["binning"] = b_te.tolist()
json_data[target.name+"_test"]["content"] = d_te.tolist()
test_data.append(d_te.tolist())
train_data.append(d_tr.tolist())
separation = util.getSeparation(test_data[0],test_data[1])
hist.extra_text.Add("Test Separation = {0:.4f}".format(separation),coords=[0.03,0.72])
p = hist.execute()
hist.savefig()
# save results to JSON file (just histogram values & bins) to re-make plots
with open("{0}.json".format(hist.saveAs), 'w') as outfile:
json.dump(json_data, outfile)
return
def ROC(self,fprs=[],tprs=[],accuracy={}):
"""Plot the ROC curve & save to text file"""
self.msg_svc.INFO("DL : Plotting ROC curve.")
saveAs = "{0}/roc_curve_{1}".format(self.output_dir,self.date)
## Use matplotlib directly
fig,ax = plt.subplots()
# Draw all of the ROC curves from the K-fold cross-validation
ax.plot([0, 1], [0, 1], ls='--',label='No Discrimination',lw=2,c='gray')
ax.axhline(y=1,lw=1,c='lightgray',ls='--')
for ft,(fpr,tpr) in enumerate(zip(fprs,tprs)):
roc_auc = auc(fpr,tpr)
ax.plot(fpr,tpr,label='K-fold {0} (AUC = {1:.2f})'.format(ft,roc_auc),lw=2)
# save ROC curve to CSV file (to plot later)
outfile_name = "{0}_{1}.csv".format(saveAs,ft)
csv = [ "{0},{1}".format(fp,tp) for fp,tp in zip(fpr,tpr) ]
util.to_csv(outfile_name,csv)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.5])
ax.set_xlabel(r'$\epsilon$(anti-top)',fontsize=22,ha='right',va='top',position=(1,0))
ax.set_xticklabels(["{0:.1f}".format(i) for i in ax.get_xticks()],fontsize=22)
ax.set_ylabel(r'$\epsilon$(top)',fontsize=22,ha='right',va='bottom',position=(0,1))
ax.set_yticklabels(['']+["{0:.1f}".format(i) for i in ax.get_yticks()[1:-1]]+[''],fontsize=22)
## CMS/COM Energy Label
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.03,0.97]
cms_stamp.fontsize = 16
ax.text(cms_stamp.coords[0],cms_stamp.coords[1],cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.coords = [0.03,0.90]
energy_stamp.fontsize = 16
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
text_args = {'ha':'left','va':'top','fontsize':18,'transform':ax.transAxes}
if self.processlabel: ax.text(0.03,0.82,self.processlabel,**text_args)
if accuracy: ax.text(0.03,0.75,r"Accuracy = {0:.2f}$\pm${1:.2f}".format(accuracy['mean'],accuracy['std']),**text_args)
leg = ax.legend(loc=4,numpoints=1,fontsize=12,ncol=1,columnspacing=0.3)
leg.draw_frame(False)
plt.savefig('{0}.{1}'.format(saveAs,self.image_format),
format=self.image_format,bbox_inches='tight',dpi=300)
plt.close()
return
def plot_loss_history(self,history,ax=None,index=-1):
"""Draw history of model"""
loss = history.history['loss']
x = range(1,len(loss)+1)
label = 'Loss {0}'.format(index) if index>=0 else 'Loss'
ax.plot(x,loss,label=label)
csv = [ "{0},{1}".format(i,j) for i,j in zip(x,loss) ]
return csv
def loss_history(self,history,kfold=0,val_loss=0.0):
"""Plot loss as a function of epoch for model"""
self.msg_svc.INFO("DL : Plotting loss as a function of epoch number.")
saveAs = "{0}/loss_epochs_{1}".format(self.output_dir,self.date)
all_histories = type(history)==list
# draw the loss curve
fig,ax = plt.subplots()
# also save the data to a CSV file
if all_histories:
for i,h in enumerate(history):
csv = self.plot_loss_history(h,ax=ax,index=i)
filename = "{0}_{1}.csv".format(saveAs,i)
util.to_csv(filename,csv)
else:
csv = self.plot_loss_history(history,ax=ax)
filename = "{0}.csv".format(saveAs)
util.to_csv(filename,csv)
ax.set_xlabel('Epoch',fontsize=22,ha='right',va='top',position=(1,0))
ax.set_xticklabels(["{0:.1f}".format(i) for i in ax.get_xticks()],fontsize=22)
ax.set_ylabel('Loss',fontsize=22,ha='right',va='bottom',position=(0,1))
ax.set_yticklabels(['']+["{0:.1f}".format(i) for i in ax.get_yticks()[1:-1]]+[''],fontsize=22)
## CMS/COM Energy Label
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.03,0.97]
cms_stamp.fontsize = 18
ax.text(cms_stamp.coords[0],cms_stamp.coords[1],cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.coords = [0.03,0.90]
energy_stamp.fontsize = 18
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
text_args = {'ha':'left','va':'top','fontsize':18,'transform':ax.transAxes}
text = "Validation Loss = {0}; {1} K-folds".format(val_loss,len(history)) if all_histories else "Validation Loss = {0}".format(val_loss)
ax.text(0.03,0.76,text,**text_args)
leg = ax.legend(loc=1,numpoints=1,fontsize=12,ncol=1,columnspacing=0.3)
leg.draw_frame(False)
f = lambda x,pos: str(x).rstrip('0').rstrip('.')
ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(f))
plt.savefig('{0}.{1}'.format(saveAs,self.image_format),
format=self.image_format,bbox_inches='tight',dpi=200)
plt.close()
return
def model(self,model,name):
"""Plot the model architecture to view later"""
keras_plot(model,to_file='{0}/{1}_model.eps'.format(self.output_dir,name),show_shapes=True)
return
## THE END ##
| 39.531818 | 144 | 0.593825 | """
Created: 11 November 2016
Last Updated: 16 February 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
Base class for plotting deep learning
Designed for running on desktop at TAMU
with specific set of software installed
--> not guaranteed to work in CMSSW environment!
Does not use ROOT!
Instead, uses matplotlib to generate figures
"""
import os
import sys
import json
import util
from datetime import date
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', family='sans-serif')
from keras.utils.vis_utils import plot_model as keras_plot
from sklearn.metrics import roc_curve, auc
import hepPlotter.hepPlotterLabels as hpl
import hepPlotter.hepPlotterTools as hpt
from hepPlotter.hepPlotter import HepPlotter
class Target(object):
"""Class to contain information for targets used in training"""
def __init__(self,name=""):
self.name = name # Name of this target, e.g., 'signal'
self.df = None # dataframe of this target's features
self.color = 'k'
self.label = ''
self.target_value = -999
self.binning = 1
class DeepLearningPlotter(object):
"""Plotting utilities for deep learning"""
def __init__(self):
"""Give default values to member variables"""
self.date = date.today().strftime('%d%b%Y')
self.betterColors = hpt.betterColors()['linecolors']
self.sample_labels = hpl.sample_labels()
self.variable_labels = hpl.variable_labels()
self.msg_svc = util.VERBOSE()
self.filename = ""
self.output_dir = ''
self.image_format = 'png'
self.process_label = '' # if a single process is used for all training, set this
self.classification = False # 'binary','multi',False
self.regression = False # True or False
self.df = None
self.targets = []
self.CMSlabelStatus = "Internal"
def initialize(self,dataframe,target_names=[],target_values=[]):
"""
Set parameters of class to make plots
@param dataframe The dataframe that contains physics information for training/testing
"""
self.df = dataframe
try:
self.processlabel = self.sample_labels[self.filename].label # process used in each plot
except KeyError:
self.processlabel = ''
if self.classification:
for i,(n,v) in enumerate(zip(target_names,target_values)):
tmp = Target(n)
tmp.df = self.df.loc[self.df['target']==v]
tmp.target_value = v
tmp.label = self.sample_labels[n].label
tmp.color = self.betterColors[i]
self.targets.append(tmp)
else: # regression
try:
tmp = Target(target_names[0])
tmp.df = self.df.loc[self.df['target']==target_values[0]]
tmp.target_value = target_values[0]
except TypeError:
tmp = Target(target_names)
tmp.df = self.df.loc[self.df['target']==target_values]
tmp.target_value = target_values
tmp.label = self.sample_labels[tmp.name].label
tmp.color = self.betterColors[i]
self.targets.append(tmp)
return
def features(self):
"""
Plot the features
For classification, compare different targets
For regression, just plot the features <- should do data/mc plots instead!
"""
self.msg_svc.INFO("DL : Plotting features.")
target0 = self.targets[0] # hard-coded for binary comparisons
target1 = self.targets[1]
plt_features = self.df.keys()
for hi,feature in enumerate(plt_features):
if feature=='target': continue
binning = self.variable_labels[feature].binning
hist = HepPlotter("histogram",1)
hist.normed = True
hist.stacked = False
hist.logplot = {"y":False,"x":False,"data":False}
hist.binning = binning
hist.x_label = self.variable_labels[feature].label
hist.y_label = "Events"
hist.format = self.image_format
hist.saveAs = self.output_dir+"/hist_"+feature+"_"+self.date
hist.ratio_plot = True
hist.ratio_type = 'ratio'
hist.y_ratio_label = '{0}/{1}'.format(target0.label,target1.label)
hist.CMSlabel = 'top left'
hist.CMSlabelStatus = self.CMSlabelStatus
hist.numLegendColumns = 1
# Add some extra text to the plot
if self.processlabel: hist.extra_text.Add(self.processlabel,coords=[0.03,0.80]) # physics process that produces these features
hist.initialize()
hist.Add(target0.df[feature], name=target0.name, draw='step',
linecolor=target0.color, label=target0.label,
ratio_num=True,ratio_den=False,ratio_partner=target1.name)
hist.Add(target1.df[feature], name=target1.name, draw='step',
linecolor=target1.color, label=target1.label,
ratio_num=False,ratio_den=True,ratio_partner=target0.name)
if self.classification=='binary':
t0,_ = np.histogram(target0.df[feature],bins=binning,normed=True)
t1,_ = np.histogram(target1.df[feature],bins=binning,normed=True)
separation = util.getSeparation(t0,t1)
hist.extra_text.Add("Separation = {0:.4f}".format(separation),coords=[0.03,0.73])
p = hist.execute()
hist.savefig()
return
def feature_correlations(self):
"""Plot correlations between features of the NN"""
## Correlation Matrices of Features (top/antitop) ##
fontProperties = {'family':'sans-serif'}
opts = {'cmap': plt.get_cmap("bwr"), 'vmin': -1, 'vmax': +1}
for c,target in enumerate(self.targets):
saveAs = "{0}/correlations_{1}_{2}".format(self.output_dir,target.name,self.date)
allkeys = target.df.keys()
keys = []
for key in allkeys:
if key!='target': keys.append(key)
t_ = target.df[keys]
corrmat = t_.corr()
# Save correlation matrix to CSV file
corrmat.to_csv("{0}.csv".format(saveAs))
# Use matplotlib directly
fig,ax = plt.subplots()
heatmap1 = ax.pcolor(corrmat, **opts)
cbar = plt.colorbar(heatmap1, ax=ax)
cbar.ax.set_yticklabels( [i.get_text().strip('$') for i in cbar.ax.get_yticklabels()], **fontProperties )
labels = corrmat.columns.values
labels = [i.replace('_','\_') for i in labels]
# shift location of ticks to center of the bins
ax.set_xticks(np.arange(len(labels))+0.5, minor=False)
ax.set_yticks(np.arange(len(labels))+0.5, minor=False)
ax.set_xticklabels(labels, fontProperties, fontsize=18, minor=False, ha='right', rotation=70)
ax.set_yticklabels(labels, fontProperties, fontsize=18, minor=False)
## CMS/COM Energy Label + Signal name
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.02,1.00]
cms_stamp.fontsize = 16
cms_stamp.va = 'bottom'
ax.text(0.02,1.00,cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.ha = 'right'
energy_stamp.coords = [0.99,1.00]
energy_stamp.fontsize = 16
energy_stamp.va = 'bottom'
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
ax.text(0.03,0.93,target.label,fontsize=16,ha='left',va='bottom',transform=ax.transAxes)
plt.savefig("{0}.{1}".format(saveAs,self.image_format),
format=self.image_format,dpi=300,bbox_inches='tight')
plt.close()
return
def prediction(self,train_data={},test_data={}):
"""Plot the training and testing predictions"""
self.msg_svc.INFO("DL : Plotting DNN prediction. ")
# Plot all k-fold cross-validation results
for i,(train,trainY,test,testY) in enumerate(zip(train_data['X'],train_data['Y'],test_data['X'],test_data['Y'])):
hist = HepPlotter("histogram",1)
hist.ratio_plot = True
hist.ratio_type = "ratio"
hist.y_ratio_label = "Test/Train"
hist.label_size = 14
hist.normed = True # compare shape differences (likely don't have the same event yield)
hist.format = self.image_format
hist.saveAs = "{0}/hist_DNN_prediction_kfold{1}_{2}".format(self.output_dir,i,self.date)
hist.binning = [bb/10. for bb in range(11)]
hist.stacked = False
hist.logplot = {"y":False,"x":False,"data":False}
hist.x_label = "Prediction"
hist.y_label = "Arb. Units"
hist.CMSlabel = 'top left'
hist.CMSlabelStatus = self.CMSlabelStatus
hist.numLegendColumns = 1
if self.processlabel: hist.extra_text.Add(self.processlabel,coords=[0.03,0.80],fontsize=14)
hist.initialize()
test_data = []
train_data = []
json_data = {}
for t,target in enumerate(self.targets):
## Training
target_value = target.target_value
hist.Add(train[ trainY==target_value ],
name=target.name+'_train', linecolor=target.color,
linewidth=2, draw='step', label=target.label+" Train",
ratio_den=True,ratio_num=False,ratio_partner=target.name+'_test')
## Testing
hist.Add(test[ testY==target_value ],
name=target.name+'_test', linecolor=target.color, color=target.color,
linewidth=0, draw='stepfilled', label=target.label+" Test", alpha=0.5,
ratio_den=False,ratio_num=True,ratio_partner=target.name+'_train')
## Save data to JSON file
json_data[target.name+"_train"] = {}
json_data[target.name+"_test"] = {}
d_tr,b_tr = np.histogram(train[trainY==target_value],bins=hist.binning)
d_te,b_te = np.histogram(test[testY==target_value], bins=hist.binning)
json_data[target.name+"_train"]["binning"] = b_tr.tolist()
json_data[target.name+"_train"]["content"] = d_tr.tolist()
json_data[target.name+"_test"]["binning"] = b_te.tolist()
json_data[target.name+"_test"]["content"] = d_te.tolist()
test_data.append(d_te.tolist())
train_data.append(d_tr.tolist())
separation = util.getSeparation(test_data[0],test_data[1])
hist.extra_text.Add("Test Separation = {0:.4f}".format(separation),coords=[0.03,0.72])
p = hist.execute()
hist.savefig()
# save results to JSON file (just histogram values & bins) to re-make plots
with open("{0}.json".format(hist.saveAs), 'w') as outfile:
json.dump(json_data, outfile)
return
def ROC(self,fprs=[],tprs=[],accuracy={}):
"""Plot the ROC curve & save to text file"""
self.msg_svc.INFO("DL : Plotting ROC curve.")
saveAs = "{0}/roc_curve_{1}".format(self.output_dir,self.date)
## Use matplotlib directly
fig,ax = plt.subplots()
# Draw all of the ROC curves from the K-fold cross-validation
ax.plot([0, 1], [0, 1], ls='--',label='No Discrimination',lw=2,c='gray')
ax.axhline(y=1,lw=1,c='lightgray',ls='--')
for ft,(fpr,tpr) in enumerate(zip(fprs,tprs)):
roc_auc = auc(fpr,tpr)
ax.plot(fpr,tpr,label='K-fold {0} (AUC = {1:.2f})'.format(ft,roc_auc),lw=2)
# save ROC curve to CSV file (to plot later)
outfile_name = "{0}_{1}.csv".format(saveAs,ft)
csv = [ "{0},{1}".format(fp,tp) for fp,tp in zip(fpr,tpr) ]
util.to_csv(outfile_name,csv)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.5])
ax.set_xlabel(r'$\epsilon$(anti-top)',fontsize=22,ha='right',va='top',position=(1,0))
ax.set_xticklabels(["{0:.1f}".format(i) for i in ax.get_xticks()],fontsize=22)
ax.set_ylabel(r'$\epsilon$(top)',fontsize=22,ha='right',va='bottom',position=(0,1))
ax.set_yticklabels(['']+["{0:.1f}".format(i) for i in ax.get_yticks()[1:-1]]+[''],fontsize=22)
## CMS/COM Energy Label
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.03,0.97]
cms_stamp.fontsize = 16
ax.text(cms_stamp.coords[0],cms_stamp.coords[1],cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.coords = [0.03,0.90]
energy_stamp.fontsize = 16
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
text_args = {'ha':'left','va':'top','fontsize':18,'transform':ax.transAxes}
if self.processlabel: ax.text(0.03,0.82,self.processlabel,**text_args)
if accuracy: ax.text(0.03,0.75,r"Accuracy = {0:.2f}$\pm${1:.2f}".format(accuracy['mean'],accuracy['std']),**text_args)
leg = ax.legend(loc=4,numpoints=1,fontsize=12,ncol=1,columnspacing=0.3)
leg.draw_frame(False)
plt.savefig('{0}.{1}'.format(saveAs,self.image_format),
format=self.image_format,bbox_inches='tight',dpi=300)
plt.close()
return
def plot_loss_history(self,history,ax=None,index=-1):
"""Draw history of model"""
loss = history.history['loss']
x = range(1,len(loss)+1)
label = 'Loss {0}'.format(index) if index>=0 else 'Loss'
ax.plot(x,loss,label=label)
csv = [ "{0},{1}".format(i,j) for i,j in zip(x,loss) ]
return csv
def loss_history(self,history,kfold=0,val_loss=0.0):
"""Plot loss as a function of epoch for model"""
self.msg_svc.INFO("DL : Plotting loss as a function of epoch number.")
saveAs = "{0}/loss_epochs_{1}".format(self.output_dir,self.date)
all_histories = type(history)==list
# draw the loss curve
fig,ax = plt.subplots()
# also save the data to a CSV file
if all_histories:
for i,h in enumerate(history):
csv = self.plot_loss_history(h,ax=ax,index=i)
filename = "{0}_{1}.csv".format(saveAs,i)
util.to_csv(filename,csv)
else:
csv = self.plot_loss_history(history,ax=ax)
filename = "{0}.csv".format(saveAs)
util.to_csv(filename,csv)
ax.set_xlabel('Epoch',fontsize=22,ha='right',va='top',position=(1,0))
ax.set_xticklabels(["{0:.1f}".format(i) for i in ax.get_xticks()],fontsize=22)
ax.set_ylabel('Loss',fontsize=22,ha='right',va='bottom',position=(0,1))
ax.set_yticklabels(['']+["{0:.1f}".format(i) for i in ax.get_yticks()[1:-1]]+[''],fontsize=22)
## CMS/COM Energy Label
cms_stamp = hpl.CMSStamp(self.CMSlabelStatus)
cms_stamp.coords = [0.03,0.97]
cms_stamp.fontsize = 18
ax.text(cms_stamp.coords[0],cms_stamp.coords[1],cms_stamp.text,fontsize=cms_stamp.fontsize,
ha=cms_stamp.ha,va=cms_stamp.va,transform=ax.transAxes)
energy_stamp = hpl.EnergyStamp()
energy_stamp.coords = [0.03,0.90]
energy_stamp.fontsize = 18
ax.text(energy_stamp.coords[0],energy_stamp.coords[1],energy_stamp.text,
fontsize=energy_stamp.fontsize,ha=energy_stamp.ha, va=energy_stamp.va, transform=ax.transAxes)
text_args = {'ha':'left','va':'top','fontsize':18,'transform':ax.transAxes}
text = "Validation Loss = {0}; {1} K-folds".format(val_loss,len(history)) if all_histories else "Validation Loss = {0}".format(val_loss)
ax.text(0.03,0.76,text,**text_args)
leg = ax.legend(loc=1,numpoints=1,fontsize=12,ncol=1,columnspacing=0.3)
leg.draw_frame(False)
f = lambda x,pos: str(x).rstrip('0').rstrip('.')
ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(f))
plt.savefig('{0}.{1}'.format(saveAs,self.image_format),
format=self.image_format,bbox_inches='tight',dpi=200)
plt.close()
return
def model(self,model,name):
"""Plot the model architecture to view later"""
keras_plot(model,to_file='{0}/{1}_model.eps'.format(self.output_dir,name),show_shapes=True)
return
## THE END ##
| 249 | 0 | 26 |
834408ee97e14fc2967196b73b32178bcfe126ec | 5,078 | py | Python | parametric_problems/lasso.py | BerkeleyAutomation/rlqp_benchmarks | 5c79e870c4bd697383f66f5dff26aea29dc1ebfa | [
"Apache-2.0"
] | 49 | 2017-11-18T11:16:44.000Z | 2021-05-05T12:48:33.000Z | parametric_problems/lasso.py | leiyubiao/osqp_benchmarks | 5c79e870c4bd697383f66f5dff26aea29dc1ebfa | [
"Apache-2.0"
] | 5 | 2017-11-18T20:10:25.000Z | 2020-09-27T09:06:58.000Z | parametric_problems/lasso.py | leiyubiao/osqp_benchmarks | 5c79e870c4bd697383f66f5dff26aea29dc1ebfa | [
"Apache-2.0"
] | 19 | 2017-11-18T20:13:31.000Z | 2021-05-06T01:27:31.000Z | """
Solve Lasso problem as parametric QP by updating iteratively lambda
"""
import numpy as np
import pandas as pd
import os
from solvers.solvers import SOLVER_MAP # AVOID CIRCULAR DEPENDENCY
from problem_classes.lasso import LassoExample
from utils.general import make_sure_path_exists
# import osqppurepy as osqp
import osqp
| 32.343949 | 83 | 0.521268 | """
Solve Lasso problem as parametric QP by updating iteratively lambda
"""
import numpy as np
import pandas as pd
import os
from solvers.solvers import SOLVER_MAP # AVOID CIRCULAR DEPENDENCY
from problem_classes.lasso import LassoExample
from utils.general import make_sure_path_exists
# import osqppurepy as osqp
import osqp
class LassoParametric(object):
def __init__(self,
osqp_settings,
dimension,
minimum_lambda_over_max=0.01,
n_problems=100):
"""
Generate Parametric Lasso object
Args:
osqp_settings: osqp solver settings
dimension: leading dimension for the problem
minimum_lambda_over_max: min ratio between lambda and lambda_max
n_problem: number of lasso problems to solve
"""
self.osqp_settings = osqp_settings
self.dimension = dimension
self.minimum_lambda_over_max = minimum_lambda_over_max
self.n_problems = n_problems
def solve(self):
"""
Solve Lasso problem
"""
print("Solve Lasso problem for dimension %i" % self.dimension)
# Create example instance
instance = LassoExample(self.dimension)
qp = instance.qp_problem
# Create lambda array
lambda_array = np.logspace(np.log10(self.minimum_lambda_over_max *
instance.lambda_max),
np.log10(instance.lambda_max),
self.n_problems)[::-1] # From max to min
'''
Solve problem without warm start
'''
# print("Solving without warm start")
# Solution directory
no_ws_path = os.path.join('.', 'results', 'parametric_problems',
'OSQP no warmstart',
'Lasso',
)
# Create directory for the results
make_sure_path_exists(no_ws_path)
# Check if solution already exists
n_file_name = os.path.join(no_ws_path, 'n%i.csv' % self.dimension)
if not os.path.isfile(n_file_name):
res_list_no_ws = [] # Initialize results
for lambda_val in lambda_array:
# Update lambda
instance.update_lambda(lambda_val)
# Solve problem
m = osqp.OSQP()
m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
**self.osqp_settings)
r = m.solve()
# DEBUG
# print("Lambda = %.4e,\t niter = %d" % (lambda_val, r.info.iter))
if r.info.status != "solved":
print("OSQP no warmstart did not solve the problem")
solution_dict = {'status': [r.info.status],
'run_time': [r.info.run_time],
'iter': [r.info.iter]}
res_list_no_ws.append(pd.DataFrame(solution_dict))
# Get full warm-start
res_no_ws = pd.concat(res_list_no_ws)
# Store file
res_no_ws.to_csv(n_file_name, index=False)
'''
Solve problem with warm start
'''
# print("Solving with warm start")
# Solution directory
ws_path = os.path.join('.', 'results', 'parametric_problems',
'OSQP warmstart',
'Lasso',
)
# Create directory for the results
make_sure_path_exists(ws_path)
# Check if solution already exists
n_file_name = os.path.join(ws_path, 'n%i.csv' % self.dimension)
# Reset problem to first instance
instance.update_lambda(lambda_array[0])
# Setup solver
qp = instance.qp_problem
m = osqp.OSQP()
m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
**self.osqp_settings)
if not os.path.isfile(n_file_name):
res_list_ws = [] # Initialize results
for lambda_val in lambda_array:
# Update lambda
instance.update_lambda(lambda_val)
m.update(q=qp['q'])
# Solve problem
r = m.solve()
# DEBUG
# print("Lambda = %.4e,\t niter = %d" % (lambda_val, r.info.iter))
if r.info.status != "solved":
print("OSQP warmstart did not solve the problem")
# Get results
solution_dict = {'status': [r.info.status],
'run_time': [r.info.run_time],
'iter': [r.info.iter]}
res_list_ws.append(pd.DataFrame(solution_dict))
# Get full warm-start
res_ws = pd.concat(res_list_ws)
# Store file
res_ws.to_csv(n_file_name, index=False)
else:
res_ws = pd.read_csv(n_file_name)
| 0 | 4,726 | 23 |
e1bd1d8d7ce5622633bf24fc68ec450788976faa | 74,865 | py | Python | src/datalad_installer.py | datalad/datalad-installer | 93a4c7a032aef42af59fc889e61d9e4c78f0f1bb | [
"MIT"
] | 2 | 2021-07-06T11:51:44.000Z | 2022-03-01T08:03:01.000Z | src/datalad_installer.py | datalad/datalad-installer | 93a4c7a032aef42af59fc889e61d9e4c78f0f1bb | [
"MIT"
] | 88 | 2020-12-15T16:12:58.000Z | 2022-03-25T20:48:31.000Z | src/datalad_installer.py | datalad/datalad-installer | 93a4c7a032aef42af59fc889e61d9e4c78f0f1bb | [
"MIT"
] | 2 | 2020-12-24T03:03:29.000Z | 2022-01-06T01:28:36.000Z | #!/usr/bin/env python3
"""
Installation script for Datalad and related components
``datalad-installer`` is a script for installing Datalad_, git-annex_, and
related components all in a single invocation. It requires no third-party
Python libraries, though it does make heavy use of external packaging commands.
.. _Datalad: https://www.datalad.org
.. _git-annex: https://git-annex.branchable.com
Visit <https://github.com/datalad/datalad-installer> for more information.
"""
__version__ = "0.5.4"
__author__ = "The DataLad Team and Contributors"
__author_email__ = "team@datalad.org"
__license__ = "MIT"
__url__ = "https://github.com/datalad/datalad-installer"
from abc import ABC, abstractmethod
from contextlib import contextmanager
import ctypes
from enum import Enum
from functools import total_ordering
from getopt import GetoptError, getopt
import json
import logging
import os
import os.path
from pathlib import Path
import platform
from random import randrange
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import textwrap
from time import sleep
from typing import (
Any,
Callable,
ClassVar,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Type,
Union,
)
from urllib.request import Request, urlopen
from zipfile import ZipFile
log = logging.getLogger("datalad_installer")
SYSTEM = platform.system()
ON_LINUX = SYSTEM == "Linux"
ON_MACOS = SYSTEM == "Darwin"
ON_WINDOWS = SYSTEM == "Windows"
ON_POSIX = ON_LINUX or ON_MACOS
def parse_log_level(level: str) -> int:
"""
Convert a log level name (case-insensitive) or number to its numeric value
"""
try:
lv = int(level)
except ValueError:
levelup = level.upper()
if levelup in {"CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"}:
ll = getattr(logging, levelup)
assert isinstance(ll, int)
return ll
else:
raise UsageError(f"Invalid log level: {level!r}")
else:
return lv
class Immediate:
"""
Superclass for constructs returned by the argument-parsing code
representing options that are handled "immediately" (i.e., --version and
--help)
"""
pass
class VersionRequest(Immediate):
"""`Immediate` representing a ``--version`` option"""
class HelpRequest(Immediate):
"""`Immediate` representing a ``--help`` option"""
SHORT_RGX = re.compile(r"-[^-]")
LONG_RGX = re.compile(r"--[^-].*")
OPTION_COLUMN_WIDTH = 30
OPTION_HELP_COLUMN_WIDTH = 40
HELP_GUTTER = 2
HELP_INDENT = 2
HELP_WIDTH = 75
@total_ordering
class UsageError(Exception):
"""Raised when an error occurs while processing command-line options"""
class ParsedArgs(NamedTuple):
"""
A pair of global options and `ComponentRequest`\\s parsed from command-line
arguments
"""
global_opts: Dict[str, Any]
components: List["ComponentRequest"]
class ComponentRequest:
"""A request for a component parsed from command-line arguments"""
class CondaInstance(NamedTuple):
"""A Conda installation or environment"""
#: The root of the Conda installation
basepath: Path
#: The name of the environment (`None` for the base environment)
name: Optional[str]
@property
def conda_exe(self) -> Path:
"""The path to the Conda executable"""
if ON_WINDOWS:
return self.basepath / "Scripts" / "conda.exe"
else:
return self.basepath / "bin" / "conda"
@property
def bindir(self) -> Path:
"""
The directory in which command-line programs provided by packages are
installed
"""
dirname = "Scripts" if ON_WINDOWS else "bin"
if self.name is None:
return self.basepath / dirname
else:
return self.basepath / "envs" / self.name / dirname
#: A list of command names and the paths at which they are located
CommandList = List[Tuple[str, Path]]
class DataladInstaller:
"""The script's primary class, a manager & runner of components"""
COMPONENTS: ClassVar[Dict[str, Type["Component"]]] = {}
OPTION_PARSER = OptionParser(
help="Installation script for Datalad and related components",
options=[
Option(
"-V",
"--version",
is_flag=True,
immediate=VersionRequest(),
help="Show program version and exit",
),
Option(
"-l",
"--log-level",
converter=parse_log_level,
metavar="LEVEL",
help="Set logging level [default: INFO]",
),
Option(
"-E",
"--env-write-file",
converter=Path,
multiple=True,
help=(
"Append PATH modifications and other shell commands to the"
" given file; can be given multiple times"
),
),
Option(
"--sudo",
choices=[v.value for v in SudoConfirm],
converter=SudoConfirm,
help="How to handle sudo commands [default: ask]",
),
],
)
@classmethod
def register_component(
cls, name: str
) -> Callable[[Type["Component"]], Type["Component"]]:
"""A decorator for registering concrete `Component` subclasses"""
return decorator
def ensure_env_write_file(self) -> None:
"""If there are no env write files registered, add one"""
if not self.env_write_files:
fd, fpath = tempfile.mkstemp(prefix="dl-env-", suffix=".sh")
os.close(fd)
log.info("Writing environment modifications to %s", fpath)
self.env_write_files.append(Path(fpath))
@classmethod
def parse_args(cls, args: List[str]) -> Union[Immediate, ParsedArgs]:
"""
Parse all command-line arguments.
:param List[str] args: command-line arguments without ``sys.argv[0]``
"""
r = cls.OPTION_PARSER.parse_args(args)
if isinstance(r, Immediate):
return r
global_opts, leftovers = r
components: List[ComponentRequest] = []
while leftovers:
c = leftovers.pop(0)
name, eq, version = c.partition("=")
if not name:
raise UsageError("Component name must be nonempty")
try:
component = cls.COMPONENTS[name]
except KeyError:
raise UsageError(f"Unknown component: {name!r}")
cparser = component.OPTION_PARSER
if version and not cparser.versioned:
raise UsageError(f"{name} component does not take a version", name)
if eq and not version:
raise UsageError("Version must be nonempty", name)
cr = cparser.parse_args(leftovers)
if isinstance(cr, Immediate):
return cr
kwargs, leftovers = cr
if version:
kwargs["version"] = version
components.append(ComponentRequest(name=name, **kwargs))
return ParsedArgs(global_opts, components)
def main(self, argv: Optional[List[str]] = None) -> int:
"""
Parsed command-line arguments and perform the requested actions.
Returns 0 if everything was OK, nonzero otherwise.
:param List[str] argv: command-line arguments, including
``sys.argv[0]``
"""
if argv is None:
argv = sys.argv
progname, *args = argv
if not progname:
progname = "datalad-installer"
else:
progname = Path(progname).name
try:
r = self.parse_args(args)
except UsageError as e:
print(self.short_help(progname, e.component), file=sys.stderr)
print(file=sys.stderr)
print(str(e), file=sys.stderr)
return 2
if isinstance(r, VersionRequest):
print("datalad-installer", __version__)
return 0
elif isinstance(r, HelpRequest):
print(self.long_help(progname, r.component))
return 0
else:
assert isinstance(r, ParsedArgs)
global_opts, components = r
if not components:
components = [ComponentRequest("datalad")]
logging.basicConfig(
format="%(asctime)s [%(levelname)-8s] %(name)s %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S%z",
level=global_opts.pop("log_level", logging.INFO),
)
if global_opts.get("env_write_file"):
self.env_write_files.extend(global_opts["env_write_file"])
self.ensure_env_write_file()
if global_opts.get("sudo"):
self.sudo_confirm = global_opts["sudo"]
for cr in components:
self.addcomponent(name=cr.name, **cr.kwargs)
ok = True
for name, path in self.new_commands:
log.info("%s is now installed at %s", name, path)
if not os.path.exists(path):
log.error("%s does not exist!", path)
ok = False
elif not ON_WINDOWS and not os.access(path, os.X_OK):
log.error("%s is not executable!", path)
ok = False
else:
try:
sr = subprocess.run(
[str(path), "--help"], stdout=subprocess.DEVNULL
)
except Exception as e:
log.error("Failed to run `%s --help`: %s", path, e)
ok = False
else:
if sr.returncode != 0:
log.error("`%s --help` command failed!", path)
ok = False
return 0 if ok else 1
def addenv(self, line: str) -> None:
"""Write a line to the env write files"""
log.debug("Adding line %r to env_write_files", line)
for p in self.env_write_files:
with p.open("a") as fp:
print(line, file=fp)
def addpath(self, p: Union[str, os.PathLike], last: bool = False) -> None:
"""
Add a line to the env write files that prepends (or appends, if
``last`` is true) a given path to ``PATH``
"""
path = Path(p).resolve()
if not last:
line = f'export PATH={shlex.quote(str(path))}:"$PATH"'
else:
line = f'export PATH="$PATH":{shlex.quote(str(path))}'
self.addenv(line)
def addcomponent(self, name: str, **kwargs: Any) -> None:
"""Provision the given component"""
try:
component = self.COMPONENTS[name]
except AttributeError:
raise ValueError(f"Unknown component: {name}")
component(self).provide(**kwargs)
def get_conda(self) -> CondaInstance:
"""
Return the most-recently created Conda installation or environment. If
there is no such instance, return an instance for an
externally-installed Conda installation, raising an error if none is
found.
"""
if self.conda_stack:
return self.conda_stack[-1]
else:
conda_path = shutil.which("conda")
if conda_path is not None:
basepath = Path(readcmd(conda_path, "info", "--base").strip())
return CondaInstance(basepath=basepath, name=None)
else:
raise RuntimeError("conda not installed")
@classmethod
@classmethod
class Component(ABC):
"""
An abstract base class for a component that can be specified on the command
line and provisioned
"""
OPTION_PARSER: ClassVar[OptionParser]
@abstractmethod
@DataladInstaller.register_component("venv")
class VenvComponent(Component):
"""Creates a Python virtual environment using ``python -m venv``"""
OPTION_PARSER = OptionParser(
"venv",
versioned=False,
help="Create a Python virtual environment",
options=[
Option(
"--path",
converter=Path,
metavar="PATH",
help="Create the venv at the given path",
),
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the venv command",
),
# For use in testing against the dev version of pip:
Option(
"--dev-pip",
is_flag=True,
help="Install the development version of pip from GitHub",
),
],
)
@DataladInstaller.register_component("miniconda")
class MinicondaComponent(Component):
"""Installs Miniconda"""
OPTION_PARSER = OptionParser(
"miniconda",
versioned=False,
help="Install Miniconda",
options=[
Option(
"--path",
converter=Path,
metavar="PATH",
help="Install Miniconda at the given path",
),
Option("--batch", is_flag=True, help="Run in batch (noninteractive) mode"),
Option(
"--spec",
converter=str.split,
help=(
"Space-separated list of package specifiers to install in"
" the Miniconda environment"
),
),
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the install command",
),
],
)
@DataladInstaller.register_component("conda-env")
class CondaEnvComponent(Component):
"""Creates a Conda environment"""
OPTION_PARSER = OptionParser(
"conda-env",
versioned=False,
help="Create a Conda environment",
options=[
Option(
"-n",
"--name",
"envname",
metavar="NAME",
help="Name of the environment",
),
Option(
"--spec",
converter=str.split,
help="Space-separated list of package specifiers to install in the environment",
),
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the `conda create` command",
),
],
)
@DataladInstaller.register_component("neurodebian")
class NeurodebianComponent(Component):
"""Installs & configures NeuroDebian"""
OPTION_PARSER = OptionParser(
"neurodebian",
versioned=False,
help="Install & configure NeuroDebian",
options=[
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the nd-configurerepo command",
)
],
)
KEY_FINGERPRINT = "0xA5D32F012649A5A9"
KEY_URL = "http://neuro.debian.net/_static/neuro.debian.net.asc"
DOWNLOAD_SERVER = "us-nh"
class InstallableComponent(Component):
"""
Superclass for components that install packages via installation methods
"""
NAME: ClassVar[str]
INSTALLERS: ClassVar[Dict[str, Type["Installer"]]] = {}
@classmethod
def register_installer(cls, installer: Type["Installer"]) -> Type["Installer"]:
"""A decorator for registering concrete `Installer` subclasses"""
cls.INSTALLERS[installer.NAME] = installer
methods = cls.OPTION_PARSER.options["--method"].choices
assert methods is not None
methods.append(installer.NAME)
for opt in installer.OPTIONS:
cls.OPTION_PARSER.add_option(opt)
return installer
def get_installer(self, name: str) -> "Installer":
"""Retrieve & instantiate the installer with the given name"""
try:
installer_cls = self.INSTALLERS[name]
except KeyError:
raise ValueError(f"Unknown installation method: {name}")
return installer_cls(self.manager)
@DataladInstaller.register_component("git-annex")
class GitAnnexComponent(InstallableComponent):
"""Installs git-annex"""
NAME = "git-annex"
OPTION_PARSER = OptionParser(
"git-annex",
versioned=True,
help="Install git-annex",
options=[
Option(
"-m",
"--method",
choices=["auto"],
help="Select the installation method to use",
),
],
)
@DataladInstaller.register_component("datalad")
class DataladComponent(InstallableComponent):
"""Installs Datalad"""
NAME = "datalad"
OPTION_PARSER = OptionParser(
"datalad",
versioned=True,
help="Install Datalad",
options=[
Option(
"-m",
"--method",
choices=["auto"],
help="Select the installation method to use",
),
],
)
class Installer(ABC):
"""An abstract base class for installation methods for packages"""
NAME: ClassVar[str]
OPTIONS: ClassVar[List[Option]]
#: Mapping from supported installable component names to
#: (installer-specific package IDs, list of installed programs) pairs
PACKAGES: ClassVar[Dict[str, Tuple[str, List[str]]]]
def install(self, component: str, **kwargs: Any) -> CommandList:
"""
Installs a given component. Raises `MethodNotSupportedError` if the
installation method is not supported on the system or the method does
not support installing the given component. Returns a list of
(command, Path) pairs for each installed program.
"""
self.assert_supported_system()
try:
package, commands = self.PACKAGES[component]
except KeyError:
raise MethodNotSupportedError(
f"{self.NAME} does not know how to install {component}"
)
bindir = self.install_package(package, **kwargs)
bins = []
for cmd in commands:
p = bindir / cmd
if ON_WINDOWS and p.suffix == "":
p = p.with_suffix(".exe")
bins.append((cmd, p))
return bins
@abstractmethod
def install_package(self, package: str, **kwargs: Any) -> Path:
"""
Installs a given package. Returns the installation directory for the
package's programs.
"""
...
@abstractmethod
def assert_supported_system(self) -> None:
"""
If the installation method is not supported by the current system,
raises `MethodNotSupportedError`; otherwise, does nothing.
"""
...
EXTRA_ARGS_OPTION = Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the install command",
)
@GitAnnexComponent.register_installer
@DataladComponent.register_installer
class AptInstaller(Installer):
"""Installs via apt-get"""
NAME = "apt"
OPTIONS = [
Option(
"--build-dep", is_flag=True, help="Install build-dep instead of the package"
),
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
"git-annex": ("git-annex", ["git-annex"]),
}
@DataladComponent.register_installer
@GitAnnexComponent.register_installer
class HomebrewInstaller(Installer):
"""Installs via brew (Homebrew)"""
NAME = "brew"
OPTIONS = [
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
"git-annex": ("git-annex", ["git-annex"]),
}
@DataladComponent.register_installer
class PipInstaller(Installer):
"""
Installs via pip, either at the system level or into a given virtual
environment
"""
NAME = "pip"
OPTIONS = [
Option("--devel", is_flag=True, help="Install from GitHub repository"),
Option("-E", "--extras", metavar="EXTRAS", help="Install package extras"),
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
}
DEVEL_PACKAGES = {
"datalad": "git+https://github.com/datalad/datalad.git",
}
@property
@GitAnnexComponent.register_installer
class NeurodebianInstaller(AptInstaller):
"""Installs via apt-get and the NeuroDebian repositories"""
NAME = "neurodebian"
PACKAGES = {
"git-annex": ("git-annex-standalone", ["git-annex"]),
}
@GitAnnexComponent.register_installer
@DataladComponent.register_installer
class DebURLInstaller(Installer):
"""Installs a ``*.deb`` package by URL"""
NAME = "deb-url"
OPTIONS = [
Option("--url", metavar="URL", help="URL from which to download `*.deb` file"),
Option(
"--install-dir",
converter=Path,
metavar="DIR",
help="Directory in which to unpack the `*.deb`",
),
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
"datalad": ("datalad", ["datalad"]),
}
@GitAnnexComponent.register_installer
class AutobuildInstaller(AutobuildSnapshotInstaller):
"""Installs the latest official build of git-annex from kitenet.net"""
NAME = "autobuild"
@GitAnnexComponent.register_installer
class SnapshotInstaller(AutobuildSnapshotInstaller):
"""
Installs the latest official snapshot build of git-annex from kitenet.net
"""
NAME = "snapshot"
@GitAnnexComponent.register_installer
@DataladComponent.register_installer
class CondaInstaller(Installer):
"""Installs via conda"""
NAME = "conda"
OPTIONS = [
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
"git-annex": ("git-annex", ["git-annex"]),
}
@GitAnnexComponent.register_installer
class DataladGitAnnexBuildInstaller(Installer):
"""
Installs git-annex via the artifact from the latest successful build of
datalad/git-annex
"""
NAME = "datalad/git-annex:tested"
OPTIONS = [
Option(
"--install-dir",
converter=Path,
metavar="DIR",
help="Directory in which to unpack the `*.deb`",
),
]
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
@staticmethod
def download(ostype: str, target_dir: Path) -> None:
"""
Download & unzip the artifact from the latest successful build of
datalad/git-annex for the given OS in the given directory
"""
GitHubArtifactDownloader().download_last_successful_artifact(
target_dir, repo="datalad/git-annex", workflow=f"build-{ostype}.yaml"
)
@GitAnnexComponent.register_installer
class DataladGitAnnexLatestBuildInstaller(DataladGitAnnexBuildInstaller):
"""
Installs git-annex via the artifact from the latest artifact-producing
build (successful or unsuccessful) of datalad/git-annex
"""
NAME = "datalad/git-annex"
@staticmethod
def download(ostype: str, target_dir: Path) -> None:
"""
Download & unzip the artifact from the latest build of
datalad/git-annex for the given OS in the given directory
"""
GitHubArtifactDownloader().download_latest_artifact(
target_dir, repo="datalad/git-annex", workflow=f"build-{ostype}.yaml"
)
@GitAnnexComponent.register_installer
class DataladPackagesBuildInstaller(Installer):
"""
Installs git-annex via artifacts uploaded to
<https://datasets.datalad.org/?dir=/datalad/packages>
"""
NAME = "datalad/packages"
OPTIONS: ClassVar[List[Option]] = []
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
@GitAnnexComponent.register_installer
class DMGInstaller(Installer):
"""Installs a local ``*.dmg`` file"""
NAME = "dmg"
OPTIONS = [
Option(
"--path",
converter=Path,
metavar="PATH",
help="Path to local `*.dmg` to install",
),
]
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
class MethodNotSupportedError(Exception):
"""
Raised when an installer's `install()` method is called on an unsupported
system or with an unsupported component
"""
pass
def download_file(
url: str, path: Union[str, os.PathLike], headers: Optional[Dict[str, str]] = None
) -> None:
"""
Download a file from ``url``, saving it at ``path``. Optional ``headers``
are sent in the HTTP request.
"""
log.info("Downloading %s", url)
if headers is None:
headers = {}
req = Request(url, headers=headers)
with urlopen(req) as r:
with open(path, "wb") as fp:
shutil.copyfileobj(r, fp)
def compose_pip_requirement(
package: str,
version: Optional[str] = None,
urlspec: Optional[str] = None,
extras: Optional[str] = None,
) -> str:
"""Compose a PEP 503 requirement specifier"""
req = package
if extras is not None:
req += f"[{extras}]"
if urlspec is None:
if version is not None:
req += f"=={version}"
else:
req += f" @ {urlspec}"
if version is not None:
req += f"@{version}"
return req
def mktempdir(prefix: str) -> Path:
"""Create a directory in ``$TMPDIR`` with the given prefix"""
return Path(tempfile.mkdtemp(prefix=prefix))
def runcmd(*args: Any, **kwargs: Any) -> subprocess.CompletedProcess:
"""Run (and log) a given command. Raise an error if it fails."""
arglist = [str(a) for a in args]
log.info("Running: %s", " ".join(map(shlex.quote, arglist)))
return subprocess.run(arglist, check=True, **kwargs)
def readcmd(*args: Any) -> str:
"""Run a command, capturing & returning its stdout"""
s = runcmd(*args, stdout=subprocess.PIPE, universal_newlines=True).stdout
assert isinstance(s, str)
return s
def install_git_annex_dmg(
dmgpath: Union[str, os.PathLike], manager: DataladInstaller
) -> Path:
"""Install git-annex from a DMG file at ``dmgpath``"""
runcmd("hdiutil", "attach", dmgpath)
runcmd("rsync", "-a", "/Volumes/git-annex/git-annex.app", "/Applications/")
runcmd("hdiutil", "detach", "/Volumes/git-annex/")
annex_bin = Path("/Applications/git-annex.app/Contents/MacOS")
manager.addpath(annex_bin)
return annex_bin
def parse_header_links(links_header: str) -> Dict[str, Dict[str, str]]:
"""
Parse a "Link" header from an HTTP response into a `dict` of the form::
{"next": {"url": "...", "rel": "next"}, "last": { ... }}
"""
# <https://git.io/JcYZi>
links: Dict[str, Dict[str, str]] = {}
replace_chars = " '\""
value = links_header.strip(replace_chars)
if not value:
return links
for val in re.split(r", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ""
link: Dict[str, str] = {"url": url.strip("<> '\"")}
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
key = link.get("rel") or link.get("url")
assert key is not None
links[key] = link
return links
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 34.263158 | 96 | 0.562773 | #!/usr/bin/env python3
"""
Installation script for Datalad and related components
``datalad-installer`` is a script for installing Datalad_, git-annex_, and
related components all in a single invocation. It requires no third-party
Python libraries, though it does make heavy use of external packaging commands.
.. _Datalad: https://www.datalad.org
.. _git-annex: https://git-annex.branchable.com
Visit <https://github.com/datalad/datalad-installer> for more information.
"""
__version__ = "0.5.4"
__author__ = "The DataLad Team and Contributors"
__author_email__ = "team@datalad.org"
__license__ = "MIT"
__url__ = "https://github.com/datalad/datalad-installer"
from abc import ABC, abstractmethod
from contextlib import contextmanager
import ctypes
from enum import Enum
from functools import total_ordering
from getopt import GetoptError, getopt
import json
import logging
import os
import os.path
from pathlib import Path
import platform
from random import randrange
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import textwrap
from time import sleep
from typing import (
Any,
Callable,
ClassVar,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Type,
Union,
)
from urllib.request import Request, urlopen
from zipfile import ZipFile
log = logging.getLogger("datalad_installer")
SYSTEM = platform.system()
ON_LINUX = SYSTEM == "Linux"
ON_MACOS = SYSTEM == "Darwin"
ON_WINDOWS = SYSTEM == "Windows"
ON_POSIX = ON_LINUX or ON_MACOS
class SudoConfirm(Enum):
ASK = "ask"
ERROR = "error"
OK = "ok"
def parse_log_level(level: str) -> int:
"""
Convert a log level name (case-insensitive) or number to its numeric value
"""
try:
lv = int(level)
except ValueError:
levelup = level.upper()
if levelup in {"CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"}:
ll = getattr(logging, levelup)
assert isinstance(ll, int)
return ll
else:
raise UsageError(f"Invalid log level: {level!r}")
else:
return lv
class Immediate:
"""
Superclass for constructs returned by the argument-parsing code
representing options that are handled "immediately" (i.e., --version and
--help)
"""
pass
class VersionRequest(Immediate):
"""`Immediate` representing a ``--version`` option"""
def __eq__(self, other: Any) -> bool:
if type(self) is type(other):
return True
else:
return NotImplemented
class HelpRequest(Immediate):
"""`Immediate` representing a ``--help`` option"""
def __init__(self, component: Optional[str]) -> None:
#: The component for which help was requested, or `None` if the
#: ``--help`` option was given at the global level
self.component: Optional[str] = component
def __eq__(self, other: Any) -> bool:
if type(self) is type(other):
return bool(self.component == other.component)
else:
return NotImplemented
SHORT_RGX = re.compile(r"-[^-]")
LONG_RGX = re.compile(r"--[^-].*")
OPTION_COLUMN_WIDTH = 30
OPTION_HELP_COLUMN_WIDTH = 40
HELP_GUTTER = 2
HELP_INDENT = 2
HELP_WIDTH = 75
@total_ordering
class Option:
def __init__(
self,
*names: str,
is_flag: bool = False,
converter: Optional[Callable[[str], Any]] = None,
multiple: bool = False,
immediate: Optional[Immediate] = None,
metavar: Optional[str] = None,
choices: Optional[List[str]] = None,
help: Optional[str] = None,
) -> None:
#: List of individual option characters
self.shortopts: List[str] = []
#: List of long option names (sans leading "--")
self.longopts: List[str] = []
dest: Optional[str] = None
self.is_flag: bool = is_flag
self.converter: Optional[Callable[[str], Any]] = converter
self.multiple: bool = multiple
self.immediate: Optional[Immediate] = immediate
self.metavar: Optional[str] = metavar
self.choices: Optional[List[str]] = choices
self.help: Optional[str] = help
for n in names:
if n.startswith("-"):
if LONG_RGX.fullmatch(n):
self.longopts.append(n[2:])
elif SHORT_RGX.fullmatch(n):
self.shortopts.append(n[1])
else:
raise ValueError(f"Invalid option: {n!r}")
elif dest is not None:
raise ValueError("More than one option destination specified")
else:
dest = n
if not self.shortopts and not self.longopts:
raise ValueError("No options supplied to Option constructor")
self.dest: str
if dest is None:
self.dest = (self.longopts + self.shortopts)[0].replace("-", "_")
else:
self.dest = dest
def __eq__(self, other: Any) -> bool:
if type(self) is type(other):
return vars(self) == vars(other)
else:
return NotImplemented
def __lt__(self, other: Any) -> bool:
if type(self) is type(other):
return bool(self._cmp_key() < other._cmp_key())
else:
return NotImplemented
def _cmp_key(self) -> Tuple[int, str]:
name = self.option_name
if name == "--help":
return (2, "--help")
elif name == "--version":
return (1, "--version")
else:
return (0, name)
@property
def option_name(self) -> str:
"""Display name for the option"""
if self.longopts:
return f"--{self.longopts[0]}"
else:
assert self.shortopts
return f"-{self.shortopts[0]}"
def process(self, namespace: Dict[str, Any], argument: str) -> Optional[Immediate]:
if self.immediate is not None:
return self.immediate
if self.is_flag:
namespace[self.dest] = True
else:
if self.choices is not None and argument not in self.choices:
raise UsageError(
f"Invalid choice for {self.option_name} option: {argument!r}"
)
if self.converter is None:
value = argument
else:
value = self.converter(argument)
if self.multiple:
namespace.setdefault(self.dest, []).append(value)
else:
namespace[self.dest] = value
return None
def get_help(self) -> str:
options = []
for o in self.shortopts:
options.append(f"-{o}")
for o in self.longopts:
options.append(f"--{o}")
header = ", ".join(options)
if not self.is_flag:
if self.metavar is not None:
metavar = self.metavar
elif self.choices is not None:
metavar = f"[{'|'.join(self.choices)}]"
elif self.longopts:
metavar = self.longopts[0].upper().replace("-", "_")
else:
metavar = "ARG"
header += " " + metavar
if self.help is not None:
helplines = textwrap.wrap(self.help, OPTION_HELP_COLUMN_WIDTH)
else:
helplines = []
if len(header) > OPTION_COLUMN_WIDTH:
lines2 = [header]
remainder = helplines
elif helplines:
lines2 = [
header.ljust(OPTION_COLUMN_WIDTH) + " " * HELP_GUTTER + helplines[0]
]
remainder = helplines[1:]
else:
lines2 = [header]
remainder = []
for r in remainder:
lines2.append(" " * (OPTION_COLUMN_WIDTH + HELP_GUTTER) + r)
return textwrap.indent("\n".join(lines2), " " * HELP_INDENT)
class OptionParser:
def __init__(
self,
component: Optional[str] = None,
versioned: bool = False,
help: Optional[str] = None,
options: Optional[List[Option]] = None,
) -> None:
self.component: Optional[str] = component
self.versioned: bool = versioned
self.help: Optional[str] = help
#: Mapping from individual option characters to Option instances
self.short_options: Dict[str, Option] = {}
#: Mapping from long option names (sans leading "--") to Option
#: instances
self.long_options: Dict[str, Option] = {}
#: Mapping from option names (including leading hyphens) to Option
#: instances
self.options: Dict[str, Option] = {}
self.option_list: List[Option] = []
self.add_option(
Option(
"-h",
"--help",
is_flag=True,
immediate=HelpRequest(self.component),
help="Show this help information and exit",
)
)
if options is not None:
for opt in options:
self.add_option(opt)
def add_option(self, option: Option) -> None:
if self.options.get(option.option_name) == option:
return
for o in option.shortopts:
if o in self.short_options:
raise ValueError(f"Option -{o} registered more than once")
for o in option.longopts:
if o in self.long_options:
raise ValueError(f"Option --{o} registered more than once")
for o in option.shortopts:
self.short_options[o] = option
self.options[f"-{o}"] = option
for o in option.longopts:
self.long_options[o] = option
self.options[f"--{o}"] = option
self.option_list.append(option)
def parse_args(
self, args: List[str]
) -> Union[Immediate, Tuple[Dict[str, Any], List[str]]]:
"""
Parse command-line arguments, stopping when a non-option is reached.
Returns either an `Immediate` (if an immediate option is encountered)
or a tuple of the option values and remaining arguments.
:param List[str] args: command-line arguments without ``sys.argv[0]``
"""
shortspec = ""
for o, option in self.short_options.items():
if option.is_flag:
shortspec += o
else:
shortspec += f"{o}:"
longspec = []
for o, option in self.long_options.items():
if option.is_flag:
longspec.append(o)
else:
longspec.append(f"{o}=")
try:
optlist, leftovers = getopt(args, shortspec, longspec)
except GetoptError as e:
raise UsageError(str(e), self.component)
kwargs: Dict[str, Any] = {}
for (o, a) in optlist:
option = self.options[o]
try:
ret = option.process(kwargs, a)
except ValueError as e:
raise UsageError(f"{a!r}: {e}", self.component)
except UsageError as e:
e.component = self.component
raise e
else:
if ret is not None:
return ret
return (kwargs, leftovers)
def short_help(self, progname: str) -> str:
if self.component is None:
return (
f"Usage: {progname} [<options>] [COMPONENT[=VERSION] [<options>]] ..."
)
else:
cmd = f"Usage: {progname} [<options>] {self.component}"
if self.versioned:
cmd += "[=VERSION]"
cmd += " [<options>]"
return cmd
def long_help(self, progname: str) -> str:
lines = [self.short_help(progname)]
if self.help is not None:
lines.append("")
lines.extend(
" " * HELP_INDENT + ln for ln in textwrap.wrap(self.help, HELP_WIDTH)
)
if self.options:
lines.append("")
lines.append("Options:")
for option in sorted(self.option_list):
lines.extend(option.get_help().splitlines())
return "\n".join(lines)
class UsageError(Exception):
"""Raised when an error occurs while processing command-line options"""
def __init__(self, message: str, component: Optional[str] = None) -> None:
#: The error message
self.message: str = message
#: The component for which the error occurred, or `None` if the error
#: was at the global level
self.component: Optional[str] = component
def __str__(self) -> str:
return self.message
class ParsedArgs(NamedTuple):
"""
A pair of global options and `ComponentRequest`\\s parsed from command-line
arguments
"""
global_opts: Dict[str, Any]
components: List["ComponentRequest"]
class ComponentRequest:
"""A request for a component parsed from command-line arguments"""
def __init__(self, name: str, **kwargs: Any) -> None:
self.name: str = name
self.kwargs: Dict[str, Any] = kwargs
def __eq__(self, other: Any) -> bool:
if type(self) is type(other):
return bool(self.name == other.name and self.kwargs == other.kwargs)
else:
return NotImplemented
def __repr__(self) -> str:
attrs = [f"name={self.name!r}"]
for k, v in self.kwargs.items():
attrs.append(f"{k}={v!r}")
return "{0.__module__}.{0.__name__}({1})".format(
type(self),
", ".join(attrs),
)
class CondaInstance(NamedTuple):
"""A Conda installation or environment"""
#: The root of the Conda installation
basepath: Path
#: The name of the environment (`None` for the base environment)
name: Optional[str]
@property
def conda_exe(self) -> Path:
"""The path to the Conda executable"""
if ON_WINDOWS:
return self.basepath / "Scripts" / "conda.exe"
else:
return self.basepath / "bin" / "conda"
@property
def bindir(self) -> Path:
"""
The directory in which command-line programs provided by packages are
installed
"""
dirname = "Scripts" if ON_WINDOWS else "bin"
if self.name is None:
return self.basepath / dirname
else:
return self.basepath / "envs" / self.name / dirname
#: A list of command names and the paths at which they are located
CommandList = List[Tuple[str, Path]]
class DataladInstaller:
"""The script's primary class, a manager & runner of components"""
COMPONENTS: ClassVar[Dict[str, Type["Component"]]] = {}
OPTION_PARSER = OptionParser(
help="Installation script for Datalad and related components",
options=[
Option(
"-V",
"--version",
is_flag=True,
immediate=VersionRequest(),
help="Show program version and exit",
),
Option(
"-l",
"--log-level",
converter=parse_log_level,
metavar="LEVEL",
help="Set logging level [default: INFO]",
),
Option(
"-E",
"--env-write-file",
converter=Path,
multiple=True,
help=(
"Append PATH modifications and other shell commands to the"
" given file; can be given multiple times"
),
),
Option(
"--sudo",
choices=[v.value for v in SudoConfirm],
converter=SudoConfirm,
help="How to handle sudo commands [default: ask]",
),
],
)
def __init__(
self,
env_write_files: Optional[List[Union[str, os.PathLike]]] = None,
sudo_confirm: SudoConfirm = SudoConfirm.ASK,
) -> None:
#: A list of files to which to write ``PATH`` modifications and related
#: shell commands
self.env_write_files: List[Path]
if env_write_files is None:
self.env_write_files = []
else:
self.env_write_files = [Path(p) for p in env_write_files]
self.sudo_confirm: SudoConfirm = sudo_confirm
#: The default installers to fall back on for the "auto" installation
#: method
self.installer_stack: List["Installer"] = [
# Lowest priority first
DataladPackagesBuildInstaller(self),
AutobuildInstaller(self),
HomebrewInstaller(self),
NeurodebianInstaller(self),
AptInstaller(self),
CondaInstaller(self),
]
#: A stack of Conda installations & environments installed via the
#: instance
self.conda_stack: List[CondaInstance] = []
#: A list of commands installed via the instance
self.new_commands: CommandList = []
#: Whether "brew update" has been run
self.brew_updated: bool = False
@classmethod
def register_component(
cls, name: str
) -> Callable[[Type["Component"]], Type["Component"]]:
"""A decorator for registering concrete `Component` subclasses"""
def decorator(component: Type["Component"]) -> Type["Component"]:
cls.COMPONENTS[name] = component
return component
return decorator
def __enter__(self) -> "DataladInstaller":
return self
def __exit__(self, exc_type: Any, _exc_value: Any, _exc_tb: Any) -> None:
if exc_type is None:
# Ensure env write files at least exist
for p in self.env_write_files:
p.touch()
def ensure_env_write_file(self) -> None:
"""If there are no env write files registered, add one"""
if not self.env_write_files:
fd, fpath = tempfile.mkstemp(prefix="dl-env-", suffix=".sh")
os.close(fd)
log.info("Writing environment modifications to %s", fpath)
self.env_write_files.append(Path(fpath))
def sudo(self, *args: Any, **kwargs: Any) -> None:
arglist = [str(a) for a in args]
cmd = " ".join(map(shlex.quote, arglist))
if ON_WINDOWS:
# The OS will ask the user for confirmation anyway, so there's no
# need for us to ask anything.
log.info("Running as administrator: %s", " ".join(arglist))
ctypes.windll.shell32.ShellExecuteW( # type: ignore[attr-defined]
None, "runas", arglist[0], " ".join(arglist[1:]), None, 1
)
else:
if self.sudo_confirm is SudoConfirm.ERROR:
log.error("Not running sudo command: %s", cmd)
sys.exit(1)
elif self.sudo_confirm is SudoConfirm.ASK:
print("About to run the following command as an administrator:")
print(f" {cmd}")
yan = ask("Proceed?", ["y", "a", "n"])
if yan == "n":
sys.exit(0)
elif yan == "a":
self.sudo_confirm = SudoConfirm.OK
runcmd("sudo", *args, **kwargs)
def run_maybe_elevated(self, *args: Any, **kwargs: Any) -> None:
try:
runcmd(*args, **kwargs)
except OSError as e:
if e.winerror == 740: # type: ignore[attr-defined]
log.info("Operation requires elevation; rerunning as administrator")
self.sudo(*args, **kwargs)
else:
raise
@classmethod
def parse_args(cls, args: List[str]) -> Union[Immediate, ParsedArgs]:
"""
Parse all command-line arguments.
:param List[str] args: command-line arguments without ``sys.argv[0]``
"""
r = cls.OPTION_PARSER.parse_args(args)
if isinstance(r, Immediate):
return r
global_opts, leftovers = r
components: List[ComponentRequest] = []
while leftovers:
c = leftovers.pop(0)
name, eq, version = c.partition("=")
if not name:
raise UsageError("Component name must be nonempty")
try:
component = cls.COMPONENTS[name]
except KeyError:
raise UsageError(f"Unknown component: {name!r}")
cparser = component.OPTION_PARSER
if version and not cparser.versioned:
raise UsageError(f"{name} component does not take a version", name)
if eq and not version:
raise UsageError("Version must be nonempty", name)
cr = cparser.parse_args(leftovers)
if isinstance(cr, Immediate):
return cr
kwargs, leftovers = cr
if version:
kwargs["version"] = version
components.append(ComponentRequest(name=name, **kwargs))
return ParsedArgs(global_opts, components)
def main(self, argv: Optional[List[str]] = None) -> int:
"""
Parsed command-line arguments and perform the requested actions.
Returns 0 if everything was OK, nonzero otherwise.
:param List[str] argv: command-line arguments, including
``sys.argv[0]``
"""
if argv is None:
argv = sys.argv
progname, *args = argv
if not progname:
progname = "datalad-installer"
else:
progname = Path(progname).name
try:
r = self.parse_args(args)
except UsageError as e:
print(self.short_help(progname, e.component), file=sys.stderr)
print(file=sys.stderr)
print(str(e), file=sys.stderr)
return 2
if isinstance(r, VersionRequest):
print("datalad-installer", __version__)
return 0
elif isinstance(r, HelpRequest):
print(self.long_help(progname, r.component))
return 0
else:
assert isinstance(r, ParsedArgs)
global_opts, components = r
if not components:
components = [ComponentRequest("datalad")]
logging.basicConfig(
format="%(asctime)s [%(levelname)-8s] %(name)s %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S%z",
level=global_opts.pop("log_level", logging.INFO),
)
if global_opts.get("env_write_file"):
self.env_write_files.extend(global_opts["env_write_file"])
self.ensure_env_write_file()
if global_opts.get("sudo"):
self.sudo_confirm = global_opts["sudo"]
for cr in components:
self.addcomponent(name=cr.name, **cr.kwargs)
ok = True
for name, path in self.new_commands:
log.info("%s is now installed at %s", name, path)
if not os.path.exists(path):
log.error("%s does not exist!", path)
ok = False
elif not ON_WINDOWS and not os.access(path, os.X_OK):
log.error("%s is not executable!", path)
ok = False
else:
try:
sr = subprocess.run(
[str(path), "--help"], stdout=subprocess.DEVNULL
)
except Exception as e:
log.error("Failed to run `%s --help`: %s", path, e)
ok = False
else:
if sr.returncode != 0:
log.error("`%s --help` command failed!", path)
ok = False
return 0 if ok else 1
def addenv(self, line: str) -> None:
"""Write a line to the env write files"""
log.debug("Adding line %r to env_write_files", line)
for p in self.env_write_files:
with p.open("a") as fp:
print(line, file=fp)
def addpath(self, p: Union[str, os.PathLike], last: bool = False) -> None:
"""
Add a line to the env write files that prepends (or appends, if
``last`` is true) a given path to ``PATH``
"""
path = Path(p).resolve()
if not last:
line = f'export PATH={shlex.quote(str(path))}:"$PATH"'
else:
line = f'export PATH="$PATH":{shlex.quote(str(path))}'
self.addenv(line)
def addcomponent(self, name: str, **kwargs: Any) -> None:
"""Provision the given component"""
try:
component = self.COMPONENTS[name]
except AttributeError:
raise ValueError(f"Unknown component: {name}")
component(self).provide(**kwargs)
def get_conda(self) -> CondaInstance:
"""
Return the most-recently created Conda installation or environment. If
there is no such instance, return an instance for an
externally-installed Conda installation, raising an error if none is
found.
"""
if self.conda_stack:
return self.conda_stack[-1]
else:
conda_path = shutil.which("conda")
if conda_path is not None:
basepath = Path(readcmd(conda_path, "info", "--base").strip())
return CondaInstance(basepath=basepath, name=None)
else:
raise RuntimeError("conda not installed")
@classmethod
def short_help(cls, progname: str, component: Optional[str] = None) -> str:
if component is None:
return cls.OPTION_PARSER.short_help(progname)
else:
return cls.COMPONENTS[component].OPTION_PARSER.short_help(progname)
@classmethod
def long_help(cls, progname: str, component: Optional[str] = None) -> str:
if component is None:
s = cls.OPTION_PARSER.long_help(progname)
s += "\n\nComponents:"
width = max(map(len, cls.COMPONENTS.keys()))
for name, cmpnt in sorted(cls.COMPONENTS.items()):
if cmpnt.OPTION_PARSER.help is not None:
chelp = cmpnt.OPTION_PARSER.help
else:
chelp = ""
s += (
f"\n{' ' * HELP_INDENT}{name:{width}}{' ' * HELP_GUTTER}"
+ textwrap.shorten(chelp, HELP_WIDTH - width - HELP_GUTTER)
)
return s
else:
return cls.COMPONENTS[component].OPTION_PARSER.long_help(progname)
class Component(ABC):
"""
An abstract base class for a component that can be specified on the command
line and provisioned
"""
OPTION_PARSER: ClassVar[OptionParser]
def __init__(self, manager: DataladInstaller) -> None:
self.manager = manager
@abstractmethod
def provide(self, **kwargs: Any) -> None:
...
@DataladInstaller.register_component("venv")
class VenvComponent(Component):
"""Creates a Python virtual environment using ``python -m venv``"""
OPTION_PARSER = OptionParser(
"venv",
versioned=False,
help="Create a Python virtual environment",
options=[
Option(
"--path",
converter=Path,
metavar="PATH",
help="Create the venv at the given path",
),
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the venv command",
),
# For use in testing against the dev version of pip:
Option(
"--dev-pip",
is_flag=True,
help="Install the development version of pip from GitHub",
),
],
)
def provide(
self,
path: Optional[Path] = None,
extra_args: Optional[List[str]] = None,
dev_pip: bool = False,
**kwargs: Any,
) -> None:
log.info("Creating a virtual environment")
if path is None:
path = mktempdir("dl-venv-")
log.info("Path: %s", path)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra component arguments: %r", kwargs)
### TODO: Handle systems on which venv isn't installed
cmd = [sys.executable, "-m", "venv"]
if extra_args is not None:
cmd.extend(extra_args)
cmd.append(str(path))
runcmd(*cmd)
installer = PipInstaller(self.manager, path)
if dev_pip:
runcmd(
installer.python,
"-m",
"pip",
"install",
"pip @ git+https://github.com/pypa/pip",
)
self.manager.installer_stack.append(installer)
@DataladInstaller.register_component("miniconda")
class MinicondaComponent(Component):
"""Installs Miniconda"""
OPTION_PARSER = OptionParser(
"miniconda",
versioned=False,
help="Install Miniconda",
options=[
Option(
"--path",
converter=Path,
metavar="PATH",
help="Install Miniconda at the given path",
),
Option("--batch", is_flag=True, help="Run in batch (noninteractive) mode"),
Option(
"--spec",
converter=str.split,
help=(
"Space-separated list of package specifiers to install in"
" the Miniconda environment"
),
),
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the install command",
),
],
)
def provide(
self,
path: Optional[Path] = None,
batch: bool = False,
spec: Optional[List[str]] = None,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
log.info("Installing Miniconda")
if "CONDA_PREFIX" in os.environ:
raise RuntimeError("Conda already active; not installing miniconda")
if path is None:
path = mktempdir("dl-miniconda-")
# The Miniconda installer requires that the given path not already
# exist (unless -u is given); hence, we need to delete the new
# directory before using it. (Yes, this is vulnerable to race
# conditions, but so is specifying a nonexistent directory on the
# command line.)
path.rmdir()
log.info("Path: %s", path)
if ON_WINDOWS:
log.info("Batch: True")
else:
log.info("Batch: %s", batch)
log.info("Spec: %s", spec)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra component arguments: %r", kwargs)
if ON_LINUX:
miniconda_script = "Miniconda3-latest-Linux-x86_64.sh"
elif ON_MACOS:
miniconda_script = "Miniconda3-latest-MacOSX-x86_64.sh"
elif ON_WINDOWS:
miniconda_script = "Miniconda3-latest-Windows-x86_64.exe"
else:
raise RuntimeError(f"E: Unsupported OS: {SYSTEM}")
log.info("Downloading and running miniconda installer")
with tempfile.TemporaryDirectory() as tmpdir:
script_path = os.path.join(tmpdir, miniconda_script)
download_file(
(
os.environ.get("ANACONDA_URL")
or "https://repo.anaconda.com/miniconda/"
).rstrip("/")
+ "/"
+ miniconda_script,
script_path,
)
log.info("Installing miniconda in %s", path)
if ON_WINDOWS:
# `path` needs to be absolute when passing it to the installer,
# but Path.resolve() is a no-op for non-existent files on
# Windows. Hence, we need to create the directory first.
path.mkdir(parents=True, exist_ok=True)
cmd = f'start /wait "" {script_path}'
if extra_args is not None:
cmd += " ".join(extra_args)
cmd += f" /S /D={path.resolve()}"
log.info("Running: %s", cmd)
subprocess.run(cmd, check=True, shell=True)
else:
args = ["-p", path, "-s"]
if batch:
args.append("-b")
if extra_args is not None:
args.extend(extra_args)
runcmd("bash", script_path, *args)
conda_instance = CondaInstance(basepath=path, name=None)
if spec is not None:
runcmd(conda_instance.conda_exe, "install", *spec)
self.manager.conda_stack.append(conda_instance)
self.manager.installer_stack.append(
CondaInstaller(self.manager, conda_instance)
)
self.manager.addenv(f"source {shlex.quote(str(path))}/etc/profile.d/conda.sh")
self.manager.addenv("conda activate base")
@DataladInstaller.register_component("conda-env")
class CondaEnvComponent(Component):
"""Creates a Conda environment"""
OPTION_PARSER = OptionParser(
"conda-env",
versioned=False,
help="Create a Conda environment",
options=[
Option(
"-n",
"--name",
"envname",
metavar="NAME",
help="Name of the environment",
),
Option(
"--spec",
converter=str.split,
help="Space-separated list of package specifiers to install in the environment",
),
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the `conda create` command",
),
],
)
def provide(
self,
envname: Optional[str] = None,
spec: Optional[List[str]] = None,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
log.info("Creating Conda environment")
if envname is None:
cname = "datalad-installer-{:03d}".format(randrange(1000))
else:
cname = envname
log.info("Name: %s", cname)
log.info("Spec: %s", spec)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra component arguments: %r", kwargs)
conda = self.manager.get_conda()
cmd = [conda.conda_exe, "create", "--name", cname]
if extra_args is not None:
cmd.extend(extra_args)
if spec is not None:
cmd.extend(spec)
runcmd(*cmd)
conda_instance = CondaInstance(basepath=conda.basepath, name=cname)
self.manager.conda_stack.append(conda_instance)
self.manager.installer_stack.append(
CondaInstaller(self.manager, conda_instance)
)
self.manager.addenv(f"conda activate {shlex.quote(cname)}")
@DataladInstaller.register_component("neurodebian")
class NeurodebianComponent(Component):
"""Installs & configures NeuroDebian"""
OPTION_PARSER = OptionParser(
"neurodebian",
versioned=False,
help="Install & configure NeuroDebian",
options=[
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the nd-configurerepo command",
)
],
)
KEY_FINGERPRINT = "0xA5D32F012649A5A9"
KEY_URL = "http://neuro.debian.net/_static/neuro.debian.net.asc"
DOWNLOAD_SERVER = "us-nh"
def provide(self, extra_args: Optional[List[str]] = None, **kwargs: Any) -> None:
log.info("Installing & configuring NeuroDebian")
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra component arguments: %r", kwargs)
r = subprocess.run(
["apt-cache", "show", "neurodebian"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if r.returncode != 0 and "o=NeuroDebian" not in readcmd("apt-cache", "policy"):
log.info("NeuroDebian not available in APT and repository not configured")
log.info("Configuring NeuroDebian APT repository")
release = get_version_codename()
log.debug("Detected version codename: %r", release)
with tempfile.TemporaryDirectory() as tmpdir:
sources_file = os.path.join(tmpdir, "neurodebian.sources.list")
download_file(
f"http://neuro.debian.net/lists/{release}.{self.DOWNLOAD_SERVER}.libre",
sources_file,
)
with open(sources_file) as fp:
log.info(
"Adding the following contents to sources.list.d:\n\n%s",
textwrap.indent(fp.read(), " " * 4),
)
self.manager.sudo(
"cp",
"-i",
sources_file,
"/etc/apt/sources.list.d/neurodebian.sources.list",
)
try:
self.manager.sudo(
"apt-key",
"adv",
"--recv-keys",
"--keyserver",
"hkp://pool.sks-keyservers.net:80",
self.KEY_FINGERPRINT,
)
except subprocess.CalledProcessError:
log.info("apt-key command failed; downloading key directly")
keyfile = os.path.join(tmpdir, "neuro.debian.net.asc")
download_file(self.KEY_URL, keyfile)
self.manager.sudo("apt-key", "add", keyfile)
self.manager.sudo("apt-get", "update")
self.manager.sudo(
"apt-get",
"install",
"-qy",
"neurodebian",
env=dict(os.environ, DEBIAN_FRONTEND="noninteractive"),
)
runcmd("nd-configurerepo", *(extra_args or []))
class InstallableComponent(Component):
"""
Superclass for components that install packages via installation methods
"""
NAME: ClassVar[str]
INSTALLERS: ClassVar[Dict[str, Type["Installer"]]] = {}
@classmethod
def register_installer(cls, installer: Type["Installer"]) -> Type["Installer"]:
"""A decorator for registering concrete `Installer` subclasses"""
cls.INSTALLERS[installer.NAME] = installer
methods = cls.OPTION_PARSER.options["--method"].choices
assert methods is not None
methods.append(installer.NAME)
for opt in installer.OPTIONS:
cls.OPTION_PARSER.add_option(opt)
return installer
def get_installer(self, name: str) -> "Installer":
"""Retrieve & instantiate the installer with the given name"""
try:
installer_cls = self.INSTALLERS[name]
except KeyError:
raise ValueError(f"Unknown installation method: {name}")
return installer_cls(self.manager)
def provide(self, method: Optional[str] = None, **kwargs: Any) -> None:
if method is not None and method != "auto":
bins = self.get_installer(method).install(self.NAME, **kwargs)
else:
for installer in reversed(self.manager.installer_stack):
try:
log.debug("Attempting to install via %s", installer.NAME)
bins = installer.install(self.NAME, **kwargs)
except MethodNotSupportedError as e:
log.debug("Installation method not supported: %s", e)
pass
else:
break
else:
raise RuntimeError(f"No viable installation method for {self.NAME}")
self.manager.new_commands.extend(bins)
@DataladInstaller.register_component("git-annex")
class GitAnnexComponent(InstallableComponent):
"""Installs git-annex"""
NAME = "git-annex"
OPTION_PARSER = OptionParser(
"git-annex",
versioned=True,
help="Install git-annex",
options=[
Option(
"-m",
"--method",
choices=["auto"],
help="Select the installation method to use",
),
],
)
@DataladInstaller.register_component("datalad")
class DataladComponent(InstallableComponent):
"""Installs Datalad"""
NAME = "datalad"
OPTION_PARSER = OptionParser(
"datalad",
versioned=True,
help="Install Datalad",
options=[
Option(
"-m",
"--method",
choices=["auto"],
help="Select the installation method to use",
),
],
)
class Installer(ABC):
"""An abstract base class for installation methods for packages"""
NAME: ClassVar[str]
OPTIONS: ClassVar[List[Option]]
#: Mapping from supported installable component names to
#: (installer-specific package IDs, list of installed programs) pairs
PACKAGES: ClassVar[Dict[str, Tuple[str, List[str]]]]
def __init__(self, manager: DataladInstaller) -> None:
self.manager = manager
def install(self, component: str, **kwargs: Any) -> CommandList:
"""
Installs a given component. Raises `MethodNotSupportedError` if the
installation method is not supported on the system or the method does
not support installing the given component. Returns a list of
(command, Path) pairs for each installed program.
"""
self.assert_supported_system()
try:
package, commands = self.PACKAGES[component]
except KeyError:
raise MethodNotSupportedError(
f"{self.NAME} does not know how to install {component}"
)
bindir = self.install_package(package, **kwargs)
bins = []
for cmd in commands:
p = bindir / cmd
if ON_WINDOWS and p.suffix == "":
p = p.with_suffix(".exe")
bins.append((cmd, p))
return bins
@abstractmethod
def install_package(self, package: str, **kwargs: Any) -> Path:
"""
Installs a given package. Returns the installation directory for the
package's programs.
"""
...
@abstractmethod
def assert_supported_system(self) -> None:
"""
If the installation method is not supported by the current system,
raises `MethodNotSupportedError`; otherwise, does nothing.
"""
...
EXTRA_ARGS_OPTION = Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the install command",
)
@GitAnnexComponent.register_installer
@DataladComponent.register_installer
class AptInstaller(Installer):
"""Installs via apt-get"""
NAME = "apt"
OPTIONS = [
Option(
"--build-dep", is_flag=True, help="Install build-dep instead of the package"
),
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
"git-annex": ("git-annex", ["git-annex"]),
}
def install_package(
self,
package: str,
version: Optional[str] = None,
build_dep: bool = False,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> Path:
log.info("Installing %s via %s", package, self.NAME)
log.info("Version: %s", version)
log.info("Build dep: %s", build_dep)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
cmd = ["apt-get"]
if build_dep:
cmd.append("build-dep")
else:
cmd.append("install")
if extra_args:
cmd.extend(extra_args)
if version is not None:
cmd.append(f"{package}={version}")
else:
cmd.append(package)
self.manager.sudo(*cmd)
log.debug("Installed program directory: /usr/bin")
return Path("/usr/bin")
def assert_supported_system(self) -> None:
if shutil.which("apt-get") is None:
raise MethodNotSupportedError("apt-get command not found")
@DataladComponent.register_installer
@GitAnnexComponent.register_installer
class HomebrewInstaller(Installer):
"""Installs via brew (Homebrew)"""
NAME = "brew"
OPTIONS = [
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
"git-annex": ("git-annex", ["git-annex"]),
}
def install_package(
self,
package: str,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> Path:
log.info("Installing %s via brew", package)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
if not self.manager.brew_updated:
runcmd("brew", "update")
self.manager.brew_updated = True
cmd = ["brew", "install"]
if extra_args:
cmd.extend(extra_args)
cmd.append(package)
try:
runcmd(*cmd)
except subprocess.CalledProcessError:
log.error(
"brew command failed; printing diagnostic output for reporting issue"
)
runcmd("brew", "config")
runcmd("brew", "doctor")
raise
### TODO: Handle variations in this path (Is it "$(brew --prefix)/bin"?)
log.debug("Installed program directory: /usr/local/bin")
return Path("/usr/local/bin")
def assert_supported_system(self) -> None:
if shutil.which("brew") is None:
raise MethodNotSupportedError("brew command not found")
@DataladComponent.register_installer
class PipInstaller(Installer):
"""
Installs via pip, either at the system level or into a given virtual
environment
"""
NAME = "pip"
OPTIONS = [
Option("--devel", is_flag=True, help="Install from GitHub repository"),
Option("-E", "--extras", metavar="EXTRAS", help="Install package extras"),
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
}
DEVEL_PACKAGES = {
"datalad": "git+https://github.com/datalad/datalad.git",
}
def __init__(
self, manager: DataladInstaller, venv_path: Optional[Path] = None
) -> None:
super().__init__(manager)
#: The path to the virtual environment in which to install, or `None`
#: if installation should be done at the system level
self.venv_path: Optional[Path] = venv_path
@property
def python(self) -> Union[str, Path]:
if self.venv_path is None:
return sys.executable
elif ON_WINDOWS:
return self.venv_path / "Scripts" / "python.exe"
else:
return self.venv_path / "bin" / "python"
def install_package(
self,
package: str,
version: Optional[str] = None,
devel: bool = False,
extras: Optional[str] = None,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> Path:
log.info("Installing %s via pip", package)
log.info("Venv path: %s", self.venv_path)
log.info("Version: %s", version)
log.info("Devel: %s", devel)
log.info("Extras: %s", extras)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
urlspec: Optional[str]
if devel:
try:
urlspec = self.DEVEL_PACKAGES[package]
except KeyError:
raise ValueError(f"No source repository known for {package}")
else:
urlspec = None
cmd = [self.python, "-m", "pip", "install"]
if extra_args is not None:
cmd.extend(extra_args)
cmd.append(
compose_pip_requirement(
package, version=version, urlspec=urlspec, extras=extras
)
)
runcmd(*cmd)
user = extra_args is not None and "--user" in extra_args
with tempfile.NamedTemporaryFile("w+", delete=False) as script:
# Passing this code to Python with `input` doesn't work for some
# reason, so we need to save it as a script instead.
print(
"try:\n"
" from pip._internal.locations import get_scheme\n"
f" path = get_scheme({package!r}, user={user!r}).scripts\n"
"except ImportError:\n"
" from pip._internal.locations import distutils_scheme\n"
f" path = distutils_scheme({package!r}, user={user!r})['scripts']\n"
"print(path, end='')\n",
file=script,
flush=True,
)
# We need to close before passing to Python for Windows
# compatibility
script.close()
binpath = Path(readcmd(self.python, script.name))
os.unlink(script.name)
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
### TODO: Detect whether pip is installed in the current Python,
### preferably without importing it
pass
@GitAnnexComponent.register_installer
class NeurodebianInstaller(AptInstaller):
"""Installs via apt-get and the NeuroDebian repositories"""
NAME = "neurodebian"
PACKAGES = {
"git-annex": ("git-annex-standalone", ["git-annex"]),
}
def assert_supported_system(self) -> None:
super().assert_supported_system()
if "l=NeuroDebian" not in readcmd("apt-cache", "policy"):
raise MethodNotSupportedError("Neurodebian not configured")
@GitAnnexComponent.register_installer
@DataladComponent.register_installer
class DebURLInstaller(Installer):
"""Installs a ``*.deb`` package by URL"""
NAME = "deb-url"
OPTIONS = [
Option("--url", metavar="URL", help="URL from which to download `*.deb` file"),
Option(
"--install-dir",
converter=Path,
metavar="DIR",
help="Directory in which to unpack the `*.deb`",
),
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
"datalad": ("datalad", ["datalad"]),
}
def install_package(
self,
package: str,
url: Optional[str] = None,
install_dir: Optional[Path] = None,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> Path:
log.info("Installing %s via deb-url", package)
if url is None:
raise RuntimeError("deb-url method requires URL")
log.info("URL: %s", url)
if install_dir is not None:
if package != "git-annex":
raise RuntimeError("--install-dir is only supported for git-annex")
install_dir = untmppath(install_dir)
log.info("Install dir: %s", install_dir)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
with tempfile.TemporaryDirectory() as tmpdir:
debpath = os.path.join(tmpdir, f"{package}.deb")
download_file(url, debpath)
if install_dir is not None and "{version}" in str(install_dir):
deb_version = readcmd(
"dpkg-deb", "--showformat", "${Version}", "-W", debpath
)
install_dir = Path(str(install_dir).format(version=deb_version))
log.info("Expanded install dir to %s", install_dir)
binpath = install_deb(
debpath,
self.manager,
Path("usr/bin"),
install_dir=install_dir,
extra_args=extra_args,
)
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
if shutil.which("dpkg") is None:
raise MethodNotSupportedError("dpkg command not found")
class AutobuildSnapshotInstaller(Installer):
OPTIONS: ClassVar[List[Option]] = []
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
def _install_linux(self, path: str) -> Path:
tmpdir = mktempdir("dl-build-")
annex_bin = tmpdir / "git-annex.linux"
log.info("Downloading and extracting under %s", annex_bin)
gzfile = tmpdir / "git-annex-standalone-amd64.tar.gz"
download_file(
f"https://downloads.kitenet.net/git-annex/{path}"
"/git-annex-standalone-amd64.tar.gz",
gzfile,
)
runcmd("tar", "-C", tmpdir, "-xzf", gzfile)
self.manager.addpath(annex_bin)
return annex_bin
def _install_macos(self, path: str) -> Path:
with tempfile.TemporaryDirectory() as tmpdir:
dmgpath = os.path.join(tmpdir, "git-annex.dmg")
download_file(
f"https://downloads.kitenet.net/git-annex/{path}/git-annex.dmg",
dmgpath,
)
return install_git_annex_dmg(dmgpath, self.manager)
def assert_supported_system(self) -> None:
if not ON_POSIX:
raise MethodNotSupportedError(f"{SYSTEM} OS not supported")
@GitAnnexComponent.register_installer
class AutobuildInstaller(AutobuildSnapshotInstaller):
"""Installs the latest official build of git-annex from kitenet.net"""
NAME = "autobuild"
def install_package(self, package: str, **kwargs: Any) -> Path:
log.info("Installing %s via autobuild", package)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
assert package == "git-annex"
if ON_LINUX:
binpath = self._install_linux("autobuild/amd64")
elif ON_MACOS:
binpath = self._install_macos("autobuild/x86_64-apple-yosemite")
else:
raise AssertionError("Method should not be called on unsupported platforms")
log.debug("Installed program directory: %s", binpath)
return binpath
@GitAnnexComponent.register_installer
class SnapshotInstaller(AutobuildSnapshotInstaller):
"""
Installs the latest official snapshot build of git-annex from kitenet.net
"""
NAME = "snapshot"
def install_package(self, package: str, **kwargs: Any) -> Path:
log.info("Installing %s via snapshot", package)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
assert package == "git-annex"
if ON_LINUX:
binpath = self._install_linux("linux/current")
elif ON_MACOS:
binpath = self._install_macos("OSX/current/10.15_Catalina")
else:
raise AssertionError("Method should not be called on unsupported platforms")
log.debug("Installed program directory: %s", binpath)
return binpath
@GitAnnexComponent.register_installer
@DataladComponent.register_installer
class CondaInstaller(Installer):
"""Installs via conda"""
NAME = "conda"
OPTIONS = [
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
"git-annex": ("git-annex", ["git-annex"]),
}
def __init__(
self, manager: DataladInstaller, conda_instance: Optional[CondaInstance] = None
) -> None:
super().__init__(manager)
self.conda_instance: Optional[CondaInstance] = conda_instance
def install_package(
self,
package: str,
version: Optional[str] = None,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> Path:
if package == "git-annex" and not ON_LINUX:
raise MethodNotSupportedError(
"Conda only supports installing git-annex on Linux"
)
log.info("Installing %s via conda", package)
if self.conda_instance is not None:
conda = self.conda_instance
else:
conda = self.manager.get_conda()
log.info("Environment: %s", conda.name)
log.info("Version: %s", version)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
cmd = [conda.conda_exe, "install"]
if conda.name is not None:
cmd.append("--name")
cmd.append(conda.name)
cmd += ["-q", "-c", "conda-forge", "-y"]
if extra_args is not None:
cmd.extend(extra_args)
if version is None:
cmd.append(package)
else:
cmd.append(f"{package}={version}")
i = 0
while True:
try:
runcmd(*cmd)
except subprocess.CalledProcessError as e:
if i < 3:
log.error(
"Command failed with exit status %d; sleeping and retrying",
e.returncode,
)
i += 1
sleep(5)
else:
raise
else:
break
binpath = conda.bindir
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
if not self.manager.conda_stack and shutil.which("conda") is None:
raise MethodNotSupportedError("Conda installation not found")
@GitAnnexComponent.register_installer
class DataladGitAnnexBuildInstaller(Installer):
"""
Installs git-annex via the artifact from the latest successful build of
datalad/git-annex
"""
NAME = "datalad/git-annex:tested"
OPTIONS = [
Option(
"--install-dir",
converter=Path,
metavar="DIR",
help="Directory in which to unpack the `*.deb`",
),
]
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
def install_package(
self, package: str, install_dir: Optional[Path] = None, **kwargs: Any
) -> Path:
log.info("Installing %s via %s", package, self.NAME)
if install_dir is not None:
if not ON_LINUX:
raise RuntimeError("--install-dir is only supported on Linux")
install_dir = untmppath(install_dir)
log.info("Install dir: %s", install_dir)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
assert package == "git-annex"
with tempfile.TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
if ON_LINUX:
self.download("ubuntu", tmpdir)
(debpath,) = tmpdir.glob("*.deb")
binpath = install_deb(
debpath,
self.manager,
Path("usr", "bin"),
install_dir=install_dir,
)
elif ON_MACOS:
self.download("macos", tmpdir)
(dmgpath,) = tmpdir.glob("*.dmg")
binpath = install_git_annex_dmg(dmgpath, self.manager)
elif ON_WINDOWS:
self.download("windows", tmpdir)
(exepath,) = tmpdir.glob("*.exe")
self.manager.run_maybe_elevated(exepath, "/S")
binpath = Path("C:/Program Files", "Git", "usr", "bin")
self.manager.addpath(binpath)
else:
raise AssertionError(
"Method should not be called on unsupported platforms"
)
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
if not (ON_LINUX or ON_MACOS or ON_WINDOWS):
raise MethodNotSupportedError(f"{SYSTEM} OS not supported")
@staticmethod
def download(ostype: str, target_dir: Path) -> None:
"""
Download & unzip the artifact from the latest successful build of
datalad/git-annex for the given OS in the given directory
"""
GitHubArtifactDownloader().download_last_successful_artifact(
target_dir, repo="datalad/git-annex", workflow=f"build-{ostype}.yaml"
)
@GitAnnexComponent.register_installer
class DataladGitAnnexLatestBuildInstaller(DataladGitAnnexBuildInstaller):
"""
Installs git-annex via the artifact from the latest artifact-producing
build (successful or unsuccessful) of datalad/git-annex
"""
NAME = "datalad/git-annex"
@staticmethod
def download(ostype: str, target_dir: Path) -> None:
"""
Download & unzip the artifact from the latest build of
datalad/git-annex for the given OS in the given directory
"""
GitHubArtifactDownloader().download_latest_artifact(
target_dir, repo="datalad/git-annex", workflow=f"build-{ostype}.yaml"
)
class GitHubArtifactDownloader:
def __init__(self) -> None:
token = os.environ.get("GITHUB_TOKEN")
if not token:
r = subprocess.run(
["git", "config", "hub.oauthtoken"],
stdout=subprocess.PIPE,
universal_newlines=True,
)
if r.returncode != 0 or not r.stdout.strip():
raise RuntimeError(
"GitHub OAuth token not set. Set via GITHUB_TOKEN"
" environment variable or hub.oauthtoken Git config option."
)
token = r.stdout.strip()
self.token: str = token
@contextmanager
def get(self, url: str) -> Iterator[Any]:
log.debug("HTTP request: GET %s", url)
req = Request(url, headers={"Authorization": f"Bearer {self.token}"})
with urlopen(req) as r:
yield r
def getjson(self, url: str) -> Any:
with self.get(url) as r:
return json.load(r)
def get_workflow_runs(self, url: str) -> Iterator[dict]:
while True:
with self.get(url) as r:
data = json.load(r)
for run in data["workflow_runs"]:
assert isinstance(run, dict)
yield run
links = parse_header_links(r.headers.get("Link"))
url2 = links.get("next", {}).get("url")
if url2 is None:
break
url = url2
def get_archive_download_url(self, artifacts_url: str) -> Optional[str]:
"""
Given a workflow run's ``artifacts_url``, returns the
``archive_download_url`` for the one & only artifact. If there are no
artifacts, `None` is returned. If there is more than one artifact, a
`RuntimeError` is raised.
"""
log.info("Getting archive download URL from %s", artifacts_url)
artifacts = self.getjson(artifacts_url)
if artifacts["total_count"] < 1:
log.debug("No artifacts found")
return None
elif artifacts["total_count"] > 1:
raise RuntimeError("Too many artifacts found!")
else:
url = artifacts["artifacts"][0]["archive_download_url"]
assert isinstance(url, str)
return url
def download_archive(self, target_dir: Path, archive_download_url: str) -> None:
"""
Downloads the workflow build artifact zip from ``archive_download_url``
and expands it in ``target_dir``
"""
log.info("Downloading artifact package from %s", archive_download_url)
target_dir.mkdir(parents=True, exist_ok=True)
artifact_path = target_dir / ".artifact.zip"
download_file(
archive_download_url,
artifact_path,
headers={"Authorization": f"Bearer {self.token}"},
)
with ZipFile(str(artifact_path)) as zipf:
zipf.extractall(str(target_dir))
artifact_path.unlink()
def download_latest_artifact(
self, target_dir: Path, repo: str, workflow: str, branch: str = "master"
) -> None:
"""
Downloads the most recent artifact built by ``workflow`` on ``branch``
in ``repo`` to ``target_dir``
"""
runs_url = (
f"https://api.github.com/repos/{repo}/actions/workflows/{workflow}"
f"/runs?branch={branch}"
)
log.info("Getting artifacts_url from %s", runs_url)
for run in self.get_workflow_runs(runs_url):
artifacts_url = run["artifacts_url"]
archive_download_url = self.get_archive_download_url(artifacts_url)
if archive_download_url is not None:
self.download_archive(target_dir, archive_download_url)
return
else:
raise RuntimeError("No workflow runs with artifacts found!")
def download_last_successful_artifact(
self, target_dir: Path, repo: str, workflow: str, branch: str = "master"
) -> None:
"""
Downloads the most recent artifact built by a succesful run of
``workflow`` on ``branch`` in ``repo`` to ``target_dir``
"""
runs_url = (
f"https://api.github.com/repos/{repo}/actions/workflows/{workflow}"
f"/runs?status=success&branch={branch}"
)
log.info("Getting artifacts_url from %s", runs_url)
for run in self.get_workflow_runs(runs_url):
artifacts_url = run["artifacts_url"]
archive_download_url = self.get_archive_download_url(artifacts_url)
if archive_download_url is not None:
self.download_archive(target_dir, archive_download_url)
return
else:
raise RuntimeError("No workflow runs with artifacts found!")
@GitAnnexComponent.register_installer
class DataladPackagesBuildInstaller(Installer):
"""
Installs git-annex via artifacts uploaded to
<https://datasets.datalad.org/?dir=/datalad/packages>
"""
NAME = "datalad/packages"
OPTIONS: ClassVar[List[Option]] = []
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
def install_package(
self, package: str, version: Optional[str] = None, **kwargs: Any
) -> Path:
log.info("Installing %s via datalad/packages", package)
log.info("Version: %s", version)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
assert package == "git-annex"
# Installing under a tempfile.TemporaryDirectory() leads to an error
# when Python tries to clean up the directory, so we'll just leave the
# .exe file alone.
tmpdir = mktempdir("dl-datalad-package-")
if ON_WINDOWS:
if version is None:
exefile = "git-annex-installer_latest-snapshot_x64.exe"
else:
exefile = f"git-annex-installer_{version}_x64.exe"
exepath = tmpdir / exefile
download_file(
f"https://datasets.datalad.org/datalad/packages/windows/{exefile}",
exepath,
)
self.manager.run_maybe_elevated(exepath, "/S")
binpath = Path("C:/Program Files", "Git", "usr", "bin")
self.manager.addpath(binpath)
else:
raise AssertionError("Method should not be called on unsupported platforms")
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
if not ON_WINDOWS:
raise MethodNotSupportedError(f"{SYSTEM} OS not supported")
@GitAnnexComponent.register_installer
class DMGInstaller(Installer):
"""Installs a local ``*.dmg`` file"""
NAME = "dmg"
OPTIONS = [
Option(
"--path",
converter=Path,
metavar="PATH",
help="Path to local `*.dmg` to install",
),
]
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
def install_package(
self,
package: str,
path: Optional[Path] = None,
**kwargs: Any,
) -> Path:
log.info("Installing %s via dmg", package)
if path is None:
raise RuntimeError("dmg method requires path")
log.info("Path: %s", path)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
binpath = install_git_annex_dmg(path, self.manager)
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
if not ON_MACOS:
raise MethodNotSupportedError(f"{SYSTEM} OS not supported")
class MethodNotSupportedError(Exception):
"""
Raised when an installer's `install()` method is called on an unsupported
system or with an unsupported component
"""
pass
def download_file(
url: str, path: Union[str, os.PathLike], headers: Optional[Dict[str, str]] = None
) -> None:
"""
Download a file from ``url``, saving it at ``path``. Optional ``headers``
are sent in the HTTP request.
"""
log.info("Downloading %s", url)
if headers is None:
headers = {}
req = Request(url, headers=headers)
with urlopen(req) as r:
with open(path, "wb") as fp:
shutil.copyfileobj(r, fp)
def compose_pip_requirement(
package: str,
version: Optional[str] = None,
urlspec: Optional[str] = None,
extras: Optional[str] = None,
) -> str:
"""Compose a PEP 503 requirement specifier"""
req = package
if extras is not None:
req += f"[{extras}]"
if urlspec is None:
if version is not None:
req += f"=={version}"
else:
req += f" @ {urlspec}"
if version is not None:
req += f"@{version}"
return req
def mktempdir(prefix: str) -> Path:
"""Create a directory in ``$TMPDIR`` with the given prefix"""
return Path(tempfile.mkdtemp(prefix=prefix))
def runcmd(*args: Any, **kwargs: Any) -> subprocess.CompletedProcess:
"""Run (and log) a given command. Raise an error if it fails."""
arglist = [str(a) for a in args]
log.info("Running: %s", " ".join(map(shlex.quote, arglist)))
return subprocess.run(arglist, check=True, **kwargs)
def readcmd(*args: Any) -> str:
"""Run a command, capturing & returning its stdout"""
s = runcmd(*args, stdout=subprocess.PIPE, universal_newlines=True).stdout
assert isinstance(s, str)
return s
def install_git_annex_dmg(
dmgpath: Union[str, os.PathLike], manager: DataladInstaller
) -> Path:
"""Install git-annex from a DMG file at ``dmgpath``"""
runcmd("hdiutil", "attach", dmgpath)
runcmd("rsync", "-a", "/Volumes/git-annex/git-annex.app", "/Applications/")
runcmd("hdiutil", "detach", "/Volumes/git-annex/")
annex_bin = Path("/Applications/git-annex.app/Contents/MacOS")
manager.addpath(annex_bin)
return annex_bin
def install_deb(
debpath: Union[str, os.PathLike],
manager: DataladInstaller,
bin_path: Path,
install_dir: Optional[Path] = None,
extra_args: Optional[List[str]] = None,
) -> Path:
if install_dir is None:
cmd: List[Union[str, os.PathLike]] = ["dpkg"]
if extra_args is not None:
cmd.extend(extra_args)
cmd.append("-i")
cmd.append(debpath)
manager.sudo(*cmd)
return Path("/usr/bin")
else:
if extra_args:
log.warning("Not using dpkg; ignoring --extra-args")
assert os.path.isabs(debpath)
install_dir.mkdir(parents=True, exist_ok=True)
install_dir = install_dir.resolve()
with tempfile.TemporaryDirectory() as tmpdir:
oldpwd = os.getcwd()
os.chdir(tmpdir)
runcmd("ar", "-x", debpath)
runcmd("tar", "-C", install_dir, "-xzf", "data.tar.gz")
os.chdir(oldpwd)
manager.addpath(install_dir / bin_path)
return install_dir / bin_path
def ask(prompt: str, choices: List[str]) -> str:
full_prompt = f"{prompt} [{'/'.join(choices)}] "
while True:
answer = input(full_prompt)
if answer in choices:
return answer
def get_version_codename() -> str:
with open("/etc/os-release") as fp:
for line in fp:
m = re.fullmatch(
r'VERSION_CODENAME=(")?(?P<value>[^"]+)(?(1)"|)', line.strip()
)
if m:
return m["value"]
# If VERSION_CODENAME is not set in /etc/os-release, then the contents of
# /etc/debian_version should be of the form "$VERSION/sid".
with open("/etc/debian_version") as fp:
return fp.read().partition("/")[0]
def parse_header_links(links_header: str) -> Dict[str, Dict[str, str]]:
"""
Parse a "Link" header from an HTTP response into a `dict` of the form::
{"next": {"url": "...", "rel": "next"}, "last": { ... }}
"""
# <https://git.io/JcYZi>
links: Dict[str, Dict[str, str]] = {}
replace_chars = " '\""
value = links_header.strip(replace_chars)
if not value:
return links
for val in re.split(r", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ""
link: Dict[str, str] = {"url": url.strip("<> '\"")}
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
key = link.get("rel") or link.get("url")
assert key is not None
links[key] = link
return links
def untmppath(path: Path) -> Path:
if "{tmpdir}" in str(path):
return Path(str(path).format(tmpdir=mktempdir("dl-")))
else:
return path
def main(argv: Optional[List[str]] = None) -> int:
with DataladInstaller() as manager:
return manager.main(argv)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 39,771 | 5,767 | 1,471 |
0ad7f61aed9b153afd213f5a01cf465a50844018 | 9,205 | py | Python | moabb/paradigms/ssvep.py | plcrodrigues/moabb | aa4274fe7905631864e854c121c92e1927061f29 | [
"BSD-3-Clause"
] | 321 | 2017-06-03T16:14:45.000Z | 2022-03-28T17:43:59.000Z | moabb/paradigms/ssvep.py | plcrodrigues/moabb | aa4274fe7905631864e854c121c92e1927061f29 | [
"BSD-3-Clause"
] | 223 | 2017-06-03T17:41:57.000Z | 2022-03-29T09:07:44.000Z | moabb/paradigms/ssvep.py | girafe-ai/moabb | 78bbb48a2a0058b0725ebeba1ba1e3203f0eacd5 | [
"BSD-3-Clause"
] | 118 | 2017-06-03T18:36:35.000Z | 2022-03-16T06:22:02.000Z | """Steady-State Visually Evoked Potentials Paradigms"""
import logging
from moabb.datasets import utils
from moabb.datasets.fake import FakeDataset
from moabb.paradigms.base import BaseParadigm
log = logging.getLogger(__name__)
class BaseSSVEP(BaseParadigm):
"""Base SSVEP Paradigm
Parameters
----------
filters: list of list | None (default [7, 45])
Bank of bandpass filter to apply.
events: list of str | None (default None)
List of stimulation frequencies. If None, use all stimulus
found in the dataset.
n_classes: int or None (default None)
Number of classes each dataset must have. All dataset classes if None.
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
baseline: None | tuple of length 2
The time interval to consider as “baseline” when applying baseline
correction. If None, do not apply baseline correction.
If a tuple (a, b), the interval is between a and b (in seconds),
including the endpoints.
Correction is applied by computing the mean of the baseline period
and subtracting it from the data (see mne.Epochs)
channels: list of str | None (default None)
List of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
@property
@property
class SSVEP(BaseSSVEP):
"""Single bandpass filter SSVEP
SSVEP paradigm with only one bandpass filter (default 7 to 45 Hz)
Metric is 'roc-auc' if 2 classes and 'accuracy' if more
Parameters
----------
fmin: float (default 7)
cutoff frequency (Hz) for the high pass filter
fmax: float (default 45)
cutoff frequency (Hz) for the low pass filter
events: list of str | None (default None)
List of stimulation frequencies. If None, use all stimulus
found in the dataset.
n_classes: int or None (default None)
Number of classes each dataset must have. All dataset classes if None
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
baseline: None | tuple of length 2
The time interval to consider as “baseline” when applying baseline
correction. If None, do not apply baseline correction.
If a tuple (a, b), the interval is between a and b (in seconds),
including the endpoints.
Correction is applied by computing the mean of the baseline period
and subtracting it from the data (see mne.Epochs)
channels: list of str | None (default None)
List of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
class FilterBankSSVEP(BaseSSVEP):
"""Filtered bank n-class SSVEP paradigm
SSVEP paradigm with multiple narrow bandpass filters, centered around the
frequencies of considered events.
Metric is 'roc-auc' if 2 classes and 'accuracy' if more.
Parameters
-----------
filters: list of list | None (default None)
If None, bandpass set around freqs of events with [f_n-0.5, f_n+0.5]
events: List of str,
List of stimulation frequencies. If None, use all stimulus
found in the dataset.
n_classes: int or None (default 2)
Number of classes each dataset must have. All dataset classes if None
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
baseline: None | tuple of length 2
The time interval to consider as “baseline” when applying baseline
correction. If None, do not apply baseline correction.
If a tuple (a, b), the interval is between a and b (in seconds),
including the endpoints.
Correction is applied by computing the mean of the baseline period
and subtracting it from the data (see mne.Epochs)
channels: list of str | None (default None)
List of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
class FakeSSVEPParadigm(BaseSSVEP):
"""Fake SSVEP classification."""
@property
| 34.475655 | 86 | 0.610864 | """Steady-State Visually Evoked Potentials Paradigms"""
import logging
from moabb.datasets import utils
from moabb.datasets.fake import FakeDataset
from moabb.paradigms.base import BaseParadigm
log = logging.getLogger(__name__)
class BaseSSVEP(BaseParadigm):
"""Base SSVEP Paradigm
Parameters
----------
filters: list of list | None (default [7, 45])
Bank of bandpass filter to apply.
events: list of str | None (default None)
List of stimulation frequencies. If None, use all stimulus
found in the dataset.
n_classes: int or None (default None)
Number of classes each dataset must have. All dataset classes if None.
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
baseline: None | tuple of length 2
The time interval to consider as “baseline” when applying baseline
correction. If None, do not apply baseline correction.
If a tuple (a, b), the interval is between a and b (in seconds),
including the endpoints.
Correction is applied by computing the mean of the baseline period
and subtracting it from the data (see mne.Epochs)
channels: list of str | None (default None)
List of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
def __init__(
self,
filters=((7, 45),),
events=None,
n_classes=None,
tmin=0.0,
tmax=None,
baseline=None,
channels=None,
resample=None,
):
super().__init__()
self.filters = filters
self.events = events
self.n_classes = n_classes
self.baseline = baseline
self.channels = channels
self.resample = resample
if tmax is not None and tmin >= tmax:
raise (ValueError("tmax must be greater than tmin"))
self.tmin = tmin
self.tmax = tmax
if self.events is None:
log.warning(
"Choosing the first "
+ str(n_classes)
+ " classes"
+ " from all possible events"
)
else:
assert n_classes <= len(self.events), "More classes than events specified"
def is_valid(self, dataset):
ret = True
if not (dataset.paradigm == "ssvep"):
ret = False
# check if dataset has required events
if self.events:
if not set(self.events) <= set(dataset.event_id.keys()):
ret = False
return ret
def used_events(self, dataset):
out = {}
if self.events is None:
for k, v in dataset.event_id.items():
out[k] = v
if self.n_classes and len(out) == self.n_classes:
break
else:
for event in self.events:
if event in dataset.event_id.keys():
out[event] = dataset.event_id[event]
if self.n_classes and len(out) == self.n_classes:
break
if self.n_classes and len(out) < self.n_classes:
raise (
ValueError(
f"Dataset {dataset.code} did not have enough "
f"freqs in {self.events} to run analysis"
)
)
return out
def prepare_process(self, dataset):
event_id = self.used_events(dataset)
# get filters
if self.filters is None:
self.filters = [
[float(f) - 0.5, float(f) + 0.5]
for f in event_id.keys()
if f.replace(".", "", 1).isnumeric()
]
@property
def datasets(self):
if self.tmax is None:
interval = None
else:
interval = self.tmax - self.tmin
return utils.dataset_search(
paradigm="ssvep",
events=self.events,
# total_classes=self.n_classes,
interval=interval,
has_all_events=True,
)
@property
def scoring(self):
if self.n_classes == 2:
return "roc_auc"
else:
return "accuracy"
class SSVEP(BaseSSVEP):
"""Single bandpass filter SSVEP
SSVEP paradigm with only one bandpass filter (default 7 to 45 Hz)
Metric is 'roc-auc' if 2 classes and 'accuracy' if more
Parameters
----------
fmin: float (default 7)
cutoff frequency (Hz) for the high pass filter
fmax: float (default 45)
cutoff frequency (Hz) for the low pass filter
events: list of str | None (default None)
List of stimulation frequencies. If None, use all stimulus
found in the dataset.
n_classes: int or None (default None)
Number of classes each dataset must have. All dataset classes if None
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
baseline: None | tuple of length 2
The time interval to consider as “baseline” when applying baseline
correction. If None, do not apply baseline correction.
If a tuple (a, b), the interval is between a and b (in seconds),
including the endpoints.
Correction is applied by computing the mean of the baseline period
and subtracting it from the data (see mne.Epochs)
channels: list of str | None (default None)
List of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
def __init__(self, fmin=7, fmax=45, **kwargs):
if "filters" in kwargs.keys():
raise (ValueError("SSVEP does not take argument filters"))
super().__init__(filters=[(fmin, fmax)], **kwargs)
class FilterBankSSVEP(BaseSSVEP):
"""Filtered bank n-class SSVEP paradigm
SSVEP paradigm with multiple narrow bandpass filters, centered around the
frequencies of considered events.
Metric is 'roc-auc' if 2 classes and 'accuracy' if more.
Parameters
-----------
filters: list of list | None (default None)
If None, bandpass set around freqs of events with [f_n-0.5, f_n+0.5]
events: List of str,
List of stimulation frequencies. If None, use all stimulus
found in the dataset.
n_classes: int or None (default 2)
Number of classes each dataset must have. All dataset classes if None
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
baseline: None | tuple of length 2
The time interval to consider as “baseline” when applying baseline
correction. If None, do not apply baseline correction.
If a tuple (a, b), the interval is between a and b (in seconds),
including the endpoints.
Correction is applied by computing the mean of the baseline period
and subtracting it from the data (see mne.Epochs)
channels: list of str | None (default None)
List of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
def __init__(self, filters=None, **kwargs):
super().__init__(filters=filters, **kwargs)
class FakeSSVEPParadigm(BaseSSVEP):
"""Fake SSVEP classification."""
@property
def datasets(self):
return [FakeDataset(event_list=["13", "15"], paradigm="ssvep")]
| 3,019 | 0 | 240 |
3bce5d801f2b7e46030a48b54ef5fc9e31dec0f2 | 394 | py | Python | .ipython/profile_default/startup/startup_file.py | mmphego/dot-files | 0563646cd9e9d627c08c710000afcc038a55fa2c | [
"MIT"
] | 29 | 2019-03-03T17:54:46.000Z | 2021-12-05T00:06:30.000Z | .ipython/profile_default/startup/startup_file.py | deltakapa/dot-files | bb43088d2bcea15e892dfa45bff934b8e7399e17 | [
"MIT"
] | 1 | 2019-03-04T05:41:14.000Z | 2019-03-04T05:41:14.000Z | .ipython/profile_default/startup/startup_file.py | deltakapa/dot-files | bb43088d2bcea15e892dfa45bff934b8e7399e17 | [
"MIT"
] | 6 | 2019-03-03T17:50:34.000Z | 2021-01-18T13:12:45.000Z | import os
import subprocess
import sys
import time
modulenames = ", ".join(list(set(sys.modules) & set(globals())))
msg = "---> Automagically imported these packages (if available): {}".format(modulenames)
formatted_msg = Style.LINE + Style.BOLD + Style.RED + msg + Style.END
print(formatted_msg)
| 21.888889 | 89 | 0.659898 | import os
import subprocess
import sys
import time
class Style:
BOLD = "\033[1m"
END = "\033[0m\n"
RED = "\033[91m"
LINE = "\n"
modulenames = ", ".join(list(set(sys.modules) & set(globals())))
msg = "---> Automagically imported these packages (if available): {}".format(modulenames)
formatted_msg = Style.LINE + Style.BOLD + Style.RED + msg + Style.END
print(formatted_msg)
| 0 | 71 | 23 |
3bdcdb5f7e2a731cc8ba3d1d150a2d7eaeb46753 | 1,602 | py | Python | tests/test_parser_helpers.py | zagaran/instant-census | 62dd5bbc62939f43776a10708ef663722ead98af | [
"MIT"
] | 1 | 2021-06-01T17:03:47.000Z | 2021-06-01T17:03:47.000Z | tests/test_parser_helpers.py | zagaran/instant-census | 62dd5bbc62939f43776a10708ef663722ead98af | [
"MIT"
] | null | null | null | tests/test_parser_helpers.py | zagaran/instant-census | 62dd5bbc62939f43776a10708ef663722ead98af | [
"MIT"
] | null | null | null | from tests.common import InstantCensusTestCase
from utils.parser_helpers import split_standard_separators
# from parsers.number_parser import text2int
from string import whitespace as WHITESPACE_CHARS
# text2int_tests = {
# "twenty-two" : 22,
# "ninety seven" : 97,
# "one hundred thirty seven" : 137,
# "one million" : 1000000,
# "fiftieth" : 50,
# "four-hundred and forty-fourth" : 444,
# "eighty" : 80,
# "ten thousand and one" : 10001,
# }
# def test_text2int():
# with Test() as test:
# for test_inp, result in text2int_tests.iteritems():
# ret = text2int(test_inp)
# test.assertTrue(ret == result, str(test_inp) + " parsed incorrectly: " +
# str(ret) + " != " + str(result))
| 31.411765 | 86 | 0.519975 | from tests.common import InstantCensusTestCase
from utils.parser_helpers import split_standard_separators
# from parsers.number_parser import text2int
from string import whitespace as WHITESPACE_CHARS
class TestParserHelpers(InstantCensusTestCase):
SPLIT_STRING_TESTS = {
#simple splitting cases
'a,b': ['a', 'b'],
'a,b,c': ['a', 'b', 'c'],
'a,bc': ['a', 'bc'],
'a b c': ['a', 'b', 'c'],
#multiple separators in a row
"a, b, c": ['a', 'b', 'c'],
#individual splitter character tests
"-": [],
",": [],
".": [],
"/": [],
";": [],
":": [],
"|": [],
WHITESPACE_CHARS: [],
}
def test_split_string(self):
for test_inp, result in self.SPLIT_STRING_TESTS.iteritems():
ret = split_standard_separators(test_inp)
self.assertTrue(ret == result, "'%s' parsed incorrectly: %s != %s" %
(test_inp, ret, result))
# text2int_tests = {
# "twenty-two" : 22,
# "ninety seven" : 97,
# "one hundred thirty seven" : 137,
# "one million" : 1000000,
# "fiftieth" : 50,
# "four-hundred and forty-fourth" : 444,
# "eighty" : 80,
# "ten thousand and one" : 10001,
# }
# def test_text2int():
# with Test() as test:
# for test_inp, result in text2int_tests.iteritems():
# ret = text2int(test_inp)
# test.assertTrue(ret == result, str(test_inp) + " parsed incorrectly: " +
# str(ret) + " != " + str(result))
| 264 | 523 | 23 |
1f48ee108a1a017bfe9393336c3a466100542c05 | 206 | py | Python | millisecond1/millisecond1_1.py | Walop/AdventOfCode2017 | 32786e46d8fdfb5c824b72403cbc1a8858bac2bb | [
"MIT"
] | null | null | null | millisecond1/millisecond1_1.py | Walop/AdventOfCode2017 | 32786e46d8fdfb5c824b72403cbc1a8858bac2bb | [
"MIT"
] | null | null | null | millisecond1/millisecond1_1.py | Walop/AdventOfCode2017 | 32786e46d8fdfb5c824b72403cbc1a8858bac2bb | [
"MIT"
] | null | null | null | with open("input", "r") as file:
input = file.read()
nums = list(input)
sum = 0
for i in range(0, len(nums)):
if nums[i] == nums[i-1]:
sum += int(nums[i])
print(sum)
| 22.888889 | 33 | 0.490291 | with open("input", "r") as file:
input = file.read()
nums = list(input)
sum = 0
for i in range(0, len(nums)):
if nums[i] == nums[i-1]:
sum += int(nums[i])
print(sum)
| 0 | 0 | 0 |
a6b98cf550cbc9c04e82fd726c98a3cd54cc8498 | 15,173 | py | Python | framework-nucleus-segmentation/mrcnn/samples/cell/cell.py | CBIIT/nci-hitif | 2f825cbcba92ff2fdffac60de56604578f31e937 | [
"MIT"
] | null | null | null | framework-nucleus-segmentation/mrcnn/samples/cell/cell.py | CBIIT/nci-hitif | 2f825cbcba92ff2fdffac60de56604578f31e937 | [
"MIT"
] | 8 | 2020-04-13T18:52:30.000Z | 2022-02-10T01:18:21.000Z | mrcnn/samples/cell/cell.py | usnistgov/WIPP-fpn-inference-plugin | a3356305dcf2f3196833690c56f6bf5599de3d08 | [
"MIT"
] | 3 | 2018-07-10T15:19:54.000Z | 2021-02-16T17:10:01.000Z | import tensorflow as tf
tf_version = int((tf.__version__).split('.')[0])
if tf_version >= 2:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
import sys
import random
import numpy as np
import cv2
import skimage.io
import warnings; warnings.simplefilter('ignore')
import time
import h5py
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
print(ROOT_DIR)
# Import Mask RCNN
#sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn.model import log
from skimage import measure
####################################################################
# CONFIGURATION
####################################################################
####################################################################
# DATASET
####################################################################
class CellsDataset(utils.Dataset):
"""Generates a cells dataset for training. Dataset consists of microscope images.
"""
def generate_masks(mask_array):
"""
Generate a dictionary of masks. The keys are instance numbers from the numpy stack and the values are the corresponding binary masks.
Args:
mask_array: numpy array of size [H,W]. 0 represents the background. Any non zero integer represents a individual instance
Returns:
Mask dictionary {instance_id: [H,W] numpy binary mask array}
"""
masks = {} # keys are instances, values are corresponding binary mask array
for (x,y), value in np.ndenumerate(mask_array): #go through entire array
if value != 0: # if cell
if value not in masks: # if new instance introduced
masks[value] = np.zeros(mask_array.shape) #make new array
dummy_array = masks[value]
dummy_array[(x,y)] = 1
masks[value] = dummy_array # change value of array to 1 to represent cell
return masks
def load_cells(self, h5_file, image_ids):
"""
Loads cell images from the dataset h5 file.
Parameters:
-----------
h5_file: str
Path to the h5 file that contains the datasets
image_ids: numpy_array
The ids of the images that would be loaded
"""
# Add class
self.add_class("cells", 1, "cellobj")
# Name of images / masks datasets in the h5 file.
self.h5_file = h5_file
self.images_dataset_name = 'DAPI_uint16touint8_normalizeandscale'
self.masks_dataset_name = "bitmask_labeled_uint16"
#The attribute for h5 index
self.h5_index = 'h5_index'
count = 0
for _id in image_ids:
params = {}
params[self.h5_index] = _id
self.add_image('cells', count, path=None, **params)
count += 1
def load_image(self, image_id):
"""
Load the specified image from h5 file and return a [H,W,3] Numpy array.
Parameters
----------
image_id: int
The id of the image in the dataset
Returns
-------
numpy.ndarray[uint8][3]
"""
#t1s = time.time()
#HDF5 file with ~320K patches of 256x256. HDF5 saves data as "datasets". Note that the following datasets in the below mentioned .h5 file
info = self.image_info[image_id]
h5_index = info[self.h5_index]
with h5py.File(self.h5_file, 'r') as file_p:
image = np.copy(file_p[self.images_dataset_name][h5_index])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
#t1e = time.time()
#print("Load image time:{0}".format(t1e-t1s))
#print("loaded_image:{0}".format(image_id))
return image
def map_uint16_to_uint8(self, img, lower_bound=None, upper_bound=None):
'''
Map a 16-bit image trough a lookup table to convert it to 8-bit.
Parameters
----------
img: numpy.ndarray[np.uint16]
image that should be mapped
lower_bound: int, optional
lower bound of the range that should be mapped to ``[0, 255]``,
value must be in the range ``[0, 65535]`` and smaller than `upper_bound`
(defaults to ``numpy.min(img)``)
upper_bound: int, optional
upper bound of the range that should be mapped to ``[0, 255]``,
value must be in the range ``[0, 65535]`` and larger than `lower_bound`
(defaults to ``numpy.max(img)``)
Returns
-------
numpy.ndarray[uint8]
'''
if lower_bound is None:
lower_bound = np.min(img)
if not(0 <= lower_bound < 2**16):
raise ValueError(
'"lower_bound" must be in the range [0, 65535]')
if upper_bound is None:
upper_bound = np.max(img)
if not(0 <= upper_bound < 2**16):
raise ValueError(
'"upper_bound" must be in the range [0, 65535]')
if lower_bound >= upper_bound:
raise ValueError(
'"lower_bound" must be smaller than "upper_bound"')
lut = np.concatenate([
np.zeros(lower_bound, dtype=np.uint16),
np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),
np.ones(2**16 - upper_bound, dtype=np.uint16) * 255
])
return lut[img].astype(np.uint8)
def load_mask(self, image_id):
"""
Generates instance masks for images of the given image ID.
Parameters
----------
image_id: int
The id of the image in the class
Return
------
numpy.ndarray[n_objects, H, W] , numpy_ndarray[n_objects]
"""
#ts = time.time()
info = self.image_info[image_id]
h5_index = info[self.h5_index]
with h5py.File(self.h5_file, 'r') as file_p:
mask = np.copy(file_p[self.masks_dataset_name][h5_index])
#The mask already has a different id for every nucleus
labels = np.unique(mask)
#Remove the background
labels = labels[labels != 0]
all_masks = []
if not labels.size == 0:
for label in np.nditer(labels):
nucleus_mask = np.zeros(mask.shape, dtype=np.int8)
nucleus_mask[mask == label] = 1
all_masks.append(nucleus_mask)
else:
#If there are no masks
print("WARNING: h5_index:{0} has no masks".format(h5_index))
nucleus_mask = np.zeros(mask.shape, dtype=np.int8)
all_masks.append(nucleus_mask)
mask_np = np.stack(all_masks, axis = -1).astype(np.int8)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID, we return an array of ones
#tf = time.time()
#print("load_mask time:{0}".format(tf-ts))
#print("loaded_mask:{0}".format(image_id))
return mask_np, np.ones([len(all_masks)], dtype=np.int8)
def get_n_images(h5_file):
"""
Returns the number of images in the h5 file
"""
with h5py.File(h5_file, 'r') as file_p:
a_dataset = list(file_p.keys())[0]
shape = file_p[a_dataset].shape
return shape[0]
####################################################################
# TRAINING
####################################################################
def train(h5_file, model_dir, init_with='coco',latest="latest.h5"):
"""
Train the MRCNN using the
Parameters:
-----------
h5_file: str
Path to the h5file that contains the ground truth datasets
init_with: str
Name of the h5 file to initilaze the M-RCNN network
model_dir: str
Directory to save logs and trained model
lastes: src
The file to use as symlink for the best model
"""
# Total number of images in the .h5 file
n_images = get_n_images(h5_file)
print("number of images:{0}".format(n_images))
#n_images = 200
imgs_ind = np.arange(n_images)
np.random.shuffle(imgs_ind)
# Split 80-20
train_last_id = int(n_images*0.80)
train_indexes = imgs_ind[0:train_last_id]
test_indexes = imgs_ind[train_last_id+1: n_images]
n_test = len(test_indexes)
print("Total:{0}, Train:{1}, Test:{2}".format(n_images,
len(train_indexes),
len(test_indexes)))
dataset_train = CellsDataset()
dataset_train.load_cells(h5_file, train_indexes)
dataset_train.prepare()
dataset_test = CellsDataset()
dataset_test.load_cells(h5_file, test_indexes)
dataset_test.prepare()
MODEL_DIR = model_dir
config = CellsConfig()
#GZ: Change to accomodate the real number of passes while
#executing the schedule below or 200 epochs
total_passes = 30
n_epochs = 200
config.STEPS_PER_EPOCH= int(train_last_id * total_passes / \
n_epochs / config.BATCH_SIZE)
config.VALIDATION_STEPS = int(n_test * total_passes / \
n_epochs / config.BATCH_SIZE)
#config.STEPS_PER_EPOCH = train_indexes.shape[0] / config.BATCH_SIZE
#config.VALIDATION_STEPS = test_indexes.shape[0] / config.BATCH_SIZE
config.display()
print("MRCNN Train module:", modellib.__file__)
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=model_dir)
#print(image1.shape)
#print( mask1.shape, ids)
#np.save("image.npy", image1)
#np.save("mask.npy", mask1)
#exit()
# Which weights to start with?
# imagenet, coco, or last
print('initializing with {}'.format(init_with))
initial_layers = "heads"
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last(), by_name=True)
elif init_with == "random":
print("Warning: Model is initialized with random weights")
initial_layers = "all"
elif os.path.exists(init_with):
import inspect
print(inspect.getfullargspec(model.load_weights))
print(model.load_weights.__module__)
model.load_weights(init_with, by_name=True, reset_init_epoch=True)
else:
print("ERROR: No model initialization provided")
exit(1)
### TRAIN THE MODEL
# TGAR, modify how to train model. Epochs accumulate (ex. line first call to model.train means train epochs 1-75 and second call to train means train from epochs 75-100.
#DEVICE = '/device:GPU:0'
#with tf.device(DEVICE):
train_heads_start = time.time()
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE,
#augmentation=augmentation,
epochs=75,
layers= initial_layers)
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 10,
#augmentation=augmentation,
epochs=100,
layers=initial_layers)
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 100,
#augmentation=augmentation,
epochs=125,
layers=initial_layers)
train_heads_end = time.time()
train_heads_time = train_heads_end - train_heads_start
print('\n Done training {0}. Took {1} seconds'.format(initial_layers, train_heads_time))
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
train_all_start = time.time()
t1s = time.time()
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 10,
#augmentation=augmentation,
epochs=150,
layers="all")
t1e = time.time()
print(t1e-t1s)
t2s = time.time()
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 100,
#augmentation=augmentation,
epochs=175,
layers="all")
t2e = time.time()
print(t2e-t2s)
t3s = time.time()
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 1000,
#augmentation=augmentation,
epochs=200,
layers="all")
t3e = time.time()
print(t3e-t3s)
train_all_end = time.time()
train_all_time = train_all_end - train_all_start
print("Here", model.find_last())
best_model = os.path.abspath(model.find_last())
os.symlink(best_model, latest)
print('\n Best model {0} symlinked to {1}'.format(best_model, latest))
print('\n Done training all layers. Took {} seconds'.format(train_all_time))
| 33.056645 | 173 | 0.598827 | import tensorflow as tf
tf_version = int((tf.__version__).split('.')[0])
if tf_version >= 2:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
import sys
import random
import numpy as np
import cv2
import skimage.io
import warnings; warnings.simplefilter('ignore')
import time
import h5py
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
print(ROOT_DIR)
# Import Mask RCNN
#sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn.model import log
from skimage import measure
####################################################################
# CONFIGURATION
####################################################################
class CellsConfig(Config):
NAME = "cells"
GPU_COUNT = 1
# To George and Reddy (TGAR), img/gpu could be increased to maximize training (i think I'm undersaturating the GPU so maybe we can increase this later)
#GZ switching to 32 instead of 2 as the crops are 256 * 256 instead of 1024*1024
IMAGES_PER_GPU = 8
NUM_CLASSES = 1+1 # background + cell
# TGAR, change the following values based on the input size for training
# GZ: Images are scaled to max dimension during training
IMAGE_MIN_DIM = 256
IMAGE_MAX_DIM = 256
# TGAR, RPN_ANCHOR_SCALES can be decreased for smaller images. For example, the caltech images have very small cells so the following value can be decreased
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
#TRAIN_ROIS_PER_IMAGE = 512
TRAIN_ROIS_PER_IMAGE = 200
# batch_size = num_training_data/STEPS_PER_EPOCH
#GZ: Restore to 100, 50
STEPS_PER_EPOCH = 10
VALIDATION_STEPS = 2
LEARNING_RATE = 1e-4
BATCH_SIZE = IMAGES_PER_GPU * GPU_COUNT
####################################################################
# DATASET
####################################################################
class CellsDataset(utils.Dataset):
"""Generates a cells dataset for training. Dataset consists of microscope images.
"""
def generate_masks(mask_array):
"""
Generate a dictionary of masks. The keys are instance numbers from the numpy stack and the values are the corresponding binary masks.
Args:
mask_array: numpy array of size [H,W]. 0 represents the background. Any non zero integer represents a individual instance
Returns:
Mask dictionary {instance_id: [H,W] numpy binary mask array}
"""
masks = {} # keys are instances, values are corresponding binary mask array
for (x,y), value in np.ndenumerate(mask_array): #go through entire array
if value != 0: # if cell
if value not in masks: # if new instance introduced
masks[value] = np.zeros(mask_array.shape) #make new array
dummy_array = masks[value]
dummy_array[(x,y)] = 1
masks[value] = dummy_array # change value of array to 1 to represent cell
return masks
def load_cells(self, h5_file, image_ids):
"""
Loads cell images from the dataset h5 file.
Parameters:
-----------
h5_file: str
Path to the h5 file that contains the datasets
image_ids: numpy_array
The ids of the images that would be loaded
"""
# Add class
self.add_class("cells", 1, "cellobj")
# Name of images / masks datasets in the h5 file.
self.h5_file = h5_file
self.images_dataset_name = 'DAPI_uint16touint8_normalizeandscale'
self.masks_dataset_name = "bitmask_labeled_uint16"
#The attribute for h5 index
self.h5_index = 'h5_index'
count = 0
for _id in image_ids:
params = {}
params[self.h5_index] = _id
self.add_image('cells', count, path=None, **params)
count += 1
def load_image(self, image_id):
"""
Load the specified image from h5 file and return a [H,W,3] Numpy array.
Parameters
----------
image_id: int
The id of the image in the dataset
Returns
-------
numpy.ndarray[uint8][3]
"""
#t1s = time.time()
#HDF5 file with ~320K patches of 256x256. HDF5 saves data as "datasets". Note that the following datasets in the below mentioned .h5 file
info = self.image_info[image_id]
h5_index = info[self.h5_index]
with h5py.File(self.h5_file, 'r') as file_p:
image = np.copy(file_p[self.images_dataset_name][h5_index])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
#t1e = time.time()
#print("Load image time:{0}".format(t1e-t1s))
#print("loaded_image:{0}".format(image_id))
return image
def map_uint16_to_uint8(self, img, lower_bound=None, upper_bound=None):
'''
Map a 16-bit image trough a lookup table to convert it to 8-bit.
Parameters
----------
img: numpy.ndarray[np.uint16]
image that should be mapped
lower_bound: int, optional
lower bound of the range that should be mapped to ``[0, 255]``,
value must be in the range ``[0, 65535]`` and smaller than `upper_bound`
(defaults to ``numpy.min(img)``)
upper_bound: int, optional
upper bound of the range that should be mapped to ``[0, 255]``,
value must be in the range ``[0, 65535]`` and larger than `lower_bound`
(defaults to ``numpy.max(img)``)
Returns
-------
numpy.ndarray[uint8]
'''
if lower_bound is None:
lower_bound = np.min(img)
if not(0 <= lower_bound < 2**16):
raise ValueError(
'"lower_bound" must be in the range [0, 65535]')
if upper_bound is None:
upper_bound = np.max(img)
if not(0 <= upper_bound < 2**16):
raise ValueError(
'"upper_bound" must be in the range [0, 65535]')
if lower_bound >= upper_bound:
raise ValueError(
'"lower_bound" must be smaller than "upper_bound"')
lut = np.concatenate([
np.zeros(lower_bound, dtype=np.uint16),
np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),
np.ones(2**16 - upper_bound, dtype=np.uint16) * 255
])
return lut[img].astype(np.uint8)
def load_mask(self, image_id):
"""
Generates instance masks for images of the given image ID.
Parameters
----------
image_id: int
The id of the image in the class
Return
------
numpy.ndarray[n_objects, H, W] , numpy_ndarray[n_objects]
"""
#ts = time.time()
info = self.image_info[image_id]
h5_index = info[self.h5_index]
with h5py.File(self.h5_file, 'r') as file_p:
mask = np.copy(file_p[self.masks_dataset_name][h5_index])
#The mask already has a different id for every nucleus
labels = np.unique(mask)
#Remove the background
labels = labels[labels != 0]
all_masks = []
if not labels.size == 0:
for label in np.nditer(labels):
nucleus_mask = np.zeros(mask.shape, dtype=np.int8)
nucleus_mask[mask == label] = 1
all_masks.append(nucleus_mask)
else:
#If there are no masks
print("WARNING: h5_index:{0} has no masks".format(h5_index))
nucleus_mask = np.zeros(mask.shape, dtype=np.int8)
all_masks.append(nucleus_mask)
mask_np = np.stack(all_masks, axis = -1).astype(np.int8)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID, we return an array of ones
#tf = time.time()
#print("load_mask time:{0}".format(tf-ts))
#print("loaded_mask:{0}".format(image_id))
return mask_np, np.ones([len(all_masks)], dtype=np.int8)
def get_n_images(h5_file):
"""
Returns the number of images in the h5 file
"""
with h5py.File(h5_file, 'r') as file_p:
a_dataset = list(file_p.keys())[0]
shape = file_p[a_dataset].shape
return shape[0]
####################################################################
# TRAINING
####################################################################
def train(h5_file, model_dir, init_with='coco',latest="latest.h5"):
"""
Train the MRCNN using the
Parameters:
-----------
h5_file: str
Path to the h5file that contains the ground truth datasets
init_with: str
Name of the h5 file to initilaze the M-RCNN network
model_dir: str
Directory to save logs and trained model
lastes: src
The file to use as symlink for the best model
"""
# Total number of images in the .h5 file
n_images = get_n_images(h5_file)
print("number of images:{0}".format(n_images))
#n_images = 200
imgs_ind = np.arange(n_images)
np.random.shuffle(imgs_ind)
# Split 80-20
train_last_id = int(n_images*0.80)
train_indexes = imgs_ind[0:train_last_id]
test_indexes = imgs_ind[train_last_id+1: n_images]
n_test = len(test_indexes)
print("Total:{0}, Train:{1}, Test:{2}".format(n_images,
len(train_indexes),
len(test_indexes)))
dataset_train = CellsDataset()
dataset_train.load_cells(h5_file, train_indexes)
dataset_train.prepare()
dataset_test = CellsDataset()
dataset_test.load_cells(h5_file, test_indexes)
dataset_test.prepare()
MODEL_DIR = model_dir
config = CellsConfig()
#GZ: Change to accomodate the real number of passes while
#executing the schedule below or 200 epochs
total_passes = 30
n_epochs = 200
config.STEPS_PER_EPOCH= int(train_last_id * total_passes / \
n_epochs / config.BATCH_SIZE)
config.VALIDATION_STEPS = int(n_test * total_passes / \
n_epochs / config.BATCH_SIZE)
#config.STEPS_PER_EPOCH = train_indexes.shape[0] / config.BATCH_SIZE
#config.VALIDATION_STEPS = test_indexes.shape[0] / config.BATCH_SIZE
config.display()
print("MRCNN Train module:", modellib.__file__)
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=model_dir)
#print(image1.shape)
#print( mask1.shape, ids)
#np.save("image.npy", image1)
#np.save("mask.npy", mask1)
#exit()
# Which weights to start with?
# imagenet, coco, or last
print('initializing with {}'.format(init_with))
initial_layers = "heads"
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last(), by_name=True)
elif init_with == "random":
print("Warning: Model is initialized with random weights")
initial_layers = "all"
elif os.path.exists(init_with):
import inspect
print(inspect.getfullargspec(model.load_weights))
print(model.load_weights.__module__)
model.load_weights(init_with, by_name=True, reset_init_epoch=True)
else:
print("ERROR: No model initialization provided")
exit(1)
### TRAIN THE MODEL
# TGAR, modify how to train model. Epochs accumulate (ex. line first call to model.train means train epochs 1-75 and second call to train means train from epochs 75-100.
#DEVICE = '/device:GPU:0'
#with tf.device(DEVICE):
train_heads_start = time.time()
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE,
#augmentation=augmentation,
epochs=75,
layers= initial_layers)
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 10,
#augmentation=augmentation,
epochs=100,
layers=initial_layers)
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 100,
#augmentation=augmentation,
epochs=125,
layers=initial_layers)
train_heads_end = time.time()
train_heads_time = train_heads_end - train_heads_start
print('\n Done training {0}. Took {1} seconds'.format(initial_layers, train_heads_time))
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
train_all_start = time.time()
t1s = time.time()
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 10,
#augmentation=augmentation,
epochs=150,
layers="all")
t1e = time.time()
print(t1e-t1s)
t2s = time.time()
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 100,
#augmentation=augmentation,
epochs=175,
layers="all")
t2e = time.time()
print(t2e-t2s)
t3s = time.time()
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 1000,
#augmentation=augmentation,
epochs=200,
layers="all")
t3e = time.time()
print(t3e-t3s)
train_all_end = time.time()
train_all_time = train_all_end - train_all_start
print("Here", model.find_last())
best_model = os.path.abspath(model.find_last())
os.symlink(best_model, latest)
print('\n Best model {0} symlinked to {1}'.format(best_model, latest))
print('\n Done training all layers. Took {} seconds'.format(train_all_time))
| 0 | 1,136 | 23 |
2025037e1bf63d94579a9569795dc83706d80bac | 2,749 | py | Python | examples/adafruit_io_simpletest_esp_at.py | willingc/Adafruit_CircuitPython_AdafruitIO | 9f47ce31564f952072b804a162d738d1c872aa28 | [
"MIT"
] | null | null | null | examples/adafruit_io_simpletest_esp_at.py | willingc/Adafruit_CircuitPython_AdafruitIO | 9f47ce31564f952072b804a162d738d1c872aa28 | [
"MIT"
] | null | null | null | examples/adafruit_io_simpletest_esp_at.py | willingc/Adafruit_CircuitPython_AdafruitIO | 9f47ce31564f952072b804a162d738d1c872aa28 | [
"MIT"
] | null | null | null | """
Usage example of the ESP32 over UART
using the CircuitPython ESP_ATControl library.
Dependencies:
* https://github.com/adafruit/Adafruit_CircuitPython_ESP_ATcontrol
"""
from random import randint
import board
import busio
from digitalio import DigitalInOut
# Import Adafruit IO REST Client
from adafruit_io.adafruit_io import RESTClient, AdafruitIO_RequestError
# ESP32 AT
from adafruit_espatcontrol import adafruit_espatcontrol, adafruit_espatcontrol_wifimanager
#Use below for Most Boards
import neopixel
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2) # Uncomment for Most Boards
#Uncomment below for ItsyBitsy M4#
#import adafruit_dotstar as dotstar
#status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)
#Uncomment below for Particle Argon#
#status_light = None
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# With a Metro or Feather M4
uart = busio.UART(board.TX, board.RX, timeout=0.1)
resetpin = DigitalInOut(board.D5)
rtspin = DigitalInOut(board.D6)
# With a Particle Argon
"""
RX = board.ESP_TX
TX = board.ESP_RX
resetpin = DigitalInOut(board.ESP_WIFI_EN)
rtspin = DigitalInOut(board.ESP_CTS)
uart = busio.UART(TX, RX, timeout=0.1)
esp_boot = DigitalInOut(board.ESP_BOOT_MODE)
from digitalio import Direction
esp_boot.direction = Direction.OUTPUT
esp_boot.value = True
"""
esp = adafruit_espatcontrol.ESP_ATcontrol(uart, 115200,
reset_pin=resetpin, rts_pin=rtspin, debug=False)
wifi = adafruit_espatcontrol_wifimanager.ESPAT_WiFiManager(esp, secrets, status_light)
# Set your Adafruit IO Username and Key in secrets.py
# (visit io.adafruit.com if you need to create an account,
# or if you need your Adafruit IO key.)
ADAFRUIT_IO_USER = secrets['adafruit_io_user']
ADAFRUIT_IO_KEY = secrets['adafruit_io_key']
# Create an instance of the Adafruit IO REST client
io = RESTClient(ADAFRUIT_IO_USER, ADAFRUIT_IO_KEY, wifi)
try:
# Get the 'temperature' feed from Adafruit IO
temperature_feed = io.get_feed('temperature')
except AdafruitIO_RequestError:
# If no 'temperature' feed exists, create one
temperature_feed = io.create_new_feed('temperature')
# Send random integer values to the feed
random_value = randint(0, 50)
print('Sending {0} to temperature feed...'.format(random_value))
io.send_data(temperature_feed['key'], random_value)
print('Data sent!')
# Retrieve data value from the feed
print('Retrieving data from temperature feed...')
received_data = io.receive_data(temperature_feed['key'])
print('Data from temperature feed: ', received_data['value'])
| 32.341176 | 95 | 0.772645 | """
Usage example of the ESP32 over UART
using the CircuitPython ESP_ATControl library.
Dependencies:
* https://github.com/adafruit/Adafruit_CircuitPython_ESP_ATcontrol
"""
from random import randint
import board
import busio
from digitalio import DigitalInOut
# Import Adafruit IO REST Client
from adafruit_io.adafruit_io import RESTClient, AdafruitIO_RequestError
# ESP32 AT
from adafruit_espatcontrol import adafruit_espatcontrol, adafruit_espatcontrol_wifimanager
#Use below for Most Boards
import neopixel
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2) # Uncomment for Most Boards
#Uncomment below for ItsyBitsy M4#
#import adafruit_dotstar as dotstar
#status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)
#Uncomment below for Particle Argon#
#status_light = None
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# With a Metro or Feather M4
uart = busio.UART(board.TX, board.RX, timeout=0.1)
resetpin = DigitalInOut(board.D5)
rtspin = DigitalInOut(board.D6)
# With a Particle Argon
"""
RX = board.ESP_TX
TX = board.ESP_RX
resetpin = DigitalInOut(board.ESP_WIFI_EN)
rtspin = DigitalInOut(board.ESP_CTS)
uart = busio.UART(TX, RX, timeout=0.1)
esp_boot = DigitalInOut(board.ESP_BOOT_MODE)
from digitalio import Direction
esp_boot.direction = Direction.OUTPUT
esp_boot.value = True
"""
esp = adafruit_espatcontrol.ESP_ATcontrol(uart, 115200,
reset_pin=resetpin, rts_pin=rtspin, debug=False)
wifi = adafruit_espatcontrol_wifimanager.ESPAT_WiFiManager(esp, secrets, status_light)
# Set your Adafruit IO Username and Key in secrets.py
# (visit io.adafruit.com if you need to create an account,
# or if you need your Adafruit IO key.)
ADAFRUIT_IO_USER = secrets['adafruit_io_user']
ADAFRUIT_IO_KEY = secrets['adafruit_io_key']
# Create an instance of the Adafruit IO REST client
io = RESTClient(ADAFRUIT_IO_USER, ADAFRUIT_IO_KEY, wifi)
try:
# Get the 'temperature' feed from Adafruit IO
temperature_feed = io.get_feed('temperature')
except AdafruitIO_RequestError:
# If no 'temperature' feed exists, create one
temperature_feed = io.create_new_feed('temperature')
# Send random integer values to the feed
random_value = randint(0, 50)
print('Sending {0} to temperature feed...'.format(random_value))
io.send_data(temperature_feed['key'], random_value)
print('Data sent!')
# Retrieve data value from the feed
print('Retrieving data from temperature feed...')
received_data = io.receive_data(temperature_feed['key'])
print('Data from temperature feed: ', received_data['value'])
| 0 | 0 | 0 |
d9548f1abcc0c877d3c68c744296db8df25eecf0 | 2,925 | py | Python | toil/src/toil_marginphase/scripts/split_bam_by_coordinate.py | sachet-mittal/marginPhase | afe6c69825c5c51f02131b9f675a7d2c2d2c164e | [
"MIT"
] | 34 | 2017-08-07T00:24:11.000Z | 2021-11-19T04:34:44.000Z | toil/src/toil_marginphase/scripts/split_bam_by_coordinate.py | sachet-mittal/marginPhase | afe6c69825c5c51f02131b9f675a7d2c2d2c164e | [
"MIT"
] | 12 | 2018-04-16T06:34:53.000Z | 2022-03-04T03:40:48.000Z | toil/src/toil_marginphase/scripts/split_bam_by_coordinate.py | sachet-mittal/marginPhase | afe6c69825c5c51f02131b9f675a7d2c2d2c164e | [
"MIT"
] | 10 | 2017-02-18T03:48:23.000Z | 2020-01-07T00:57:21.000Z | #!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import os
import subprocess
import sys
CHR = "c"
START = "s"
END = "e"
DESC = "d"
if __name__ == "__main__":
main() | 37.5 | 128 | 0.6 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import os
import subprocess
import sys
CHR = "c"
START = "s"
END = "e"
DESC = "d"
def parse_args():
parser = argparse.ArgumentParser("Split BAM by region")
parser.add_argument('--input_bam_glob', '-i', dest='input_bam_glob', required=True, type=str,
help='Glob matching input BAMs (will perform for all bams)')
parser.add_argument('--coordinate_tsv', '-c', dest='coordinate_tsv', required=True, type=str,
help='Coordinates for splitting ($CHROM\\t$START\\t$END)')
parser.add_argument('--output_location', '-o', dest='output_location', default=".", type=str,
help='Location where output files are put')
parser.add_argument('--description_column', '-d', dest='description_column', default=None, type=int,
help='0-based index of description field in TSV (not required)')
return parser.parse_args()
def get_output_filename(input_file_location, output_directory, coordinates):
input_file_name = os.path.basename(input_file_location)
input_file_parts = input_file_name.split(".")
output_file_name = "{}.{}_{}-{}".format(".".join(input_file_parts[0:-1]), coordinates[CHR], coordinates[START],
coordinates[END])
if coordinates[DESC] is not None:
output_file_name += "." + coordinates[DESC]
output_file_name += "." + input_file_parts[-1]
return os.path.join(output_directory, output_file_name)
def main():
args = parse_args()
assert False not in [len(args.input_bam_glob) > 0, os.path.isfile(args.coordinate_tsv), os.path.isdir(args.output_location)]
coords = list()
with open(args.coordinate_tsv) as tsv_in:
header=True
for line in tsv_in:
if header:
header = False
continue
line = line.split("\t")
coords.append({
CHR:line[0],
START:int(line[1]),
END:int(line[2]),
DESC: None if args.description_column is None else "_".join(line[args.description_column].split())
})
for file in glob.glob(args.input_bam_glob):
for coord in coords:
outfile = get_output_filename(file, args.output_location, coord)
print("{}:\n\tloc: {}:{}-{}\n\tdesc: {}\n\tout: {}".format(file, coord[CHR], coord[START], coord[END],
coord[DESC], outfile), file=sys.stderr)
samtools_args = ['samtools', 'view', '-hb', file, "{}:{}-{}".format(coord[CHR], coord[START], coord[END])]
with open(outfile, 'w') as output:
subprocess.check_call(samtools_args, stdout=output)
print("Fin.", file=sys.stderr)
if __name__ == "__main__":
main() | 2,638 | 0 | 69 |
06d1b0decc735be6856a071f3dfe2d0445118634 | 2,520 | py | Python | select.py | rmccartney856/marvelMovieSelector | 8adaef2ce1ed2c83c840f36ff74312b4322d3cec | [
"MIT"
] | 2 | 2019-03-31T23:00:31.000Z | 2019-03-31T23:00:34.000Z | select.py | rmccartney856/marvelMovieSelector | 8adaef2ce1ed2c83c840f36ff74312b4322d3cec | [
"MIT"
] | null | null | null | select.py | rmccartney856/marvelMovieSelector | 8adaef2ce1ed2c83c840f36ff74312b4322d3cec | [
"MIT"
] | null | null | null | #NAME: select.py
#DATE: 31/03/2019
import json
import time
import random
import tkinter as tk
from PIL import Image, ImageTk
coverPath = "noimage.png"
root = tk.Tk()
root.title("Marvel Movie Generator")
root.configure(background='black')
#size of the window
root.geometry("450x880")
frame = tk.Frame(root)
frame.pack()
buttonGenerate = tk.Button(frame,text="Generate",fg="white",bg="green",font=("Arial", 16), height=2, width=10, command=generate)
buttonGenerate.pack(side=tk.LEFT)
buttonQuit = tk.Button(frame,text="Quit",fg="white",bg="red",font=("Arial", 16), height=2,width=10, command=quit)
buttonQuit.pack(side=tk.LEFT)
selectedTitle = tk.Label(root, bg="black",fg="white")
selectedTitle.config(font=("Arial", 16))
selectedTitle.pack()
releaseYear = tk.Label(root, bg="black",fg="white")
releaseYear.config(font=("Arial", 20))
releaseYear.pack()
#The Label widget is a standard Tkinter widget used to display a text or image on the screen.
cover = Image.open(coverPath)
cover = cover.resize((400, 650), Image.ANTIALIAS)
coverImage = ImageTk.PhotoImage(cover)
poster = tk.Label(root, bg="black", image=coverImage)
#The Pack geometry manager packs widgets in rows or columns.
poster.pack(fill = "both", expand = "yes")
phaseText = tk.Label(root, bg="black",fg="white")
phaseText.config(font=("Arial", 18),padx=10, pady=20)
phaseText.pack()
root.mainloop() | 30.361446 | 128 | 0.702381 | #NAME: select.py
#DATE: 31/03/2019
import json
import time
import random
import tkinter as tk
from PIL import Image, ImageTk
coverPath = "noimage.png"
def generate():
#Open Movie File
marvelMovies = open('movies.json').read()
marvel = json.loads(marvelMovies)
#Select Random Phase
print("Selecting a Marvel Phase.")
phases = len(marvel['marvel'])
phase = random.randint(0, phases-1)
print("Selecting a movie from Phase "+str(marvel['marvel'][phase]['phase'])+".")
#Select Movie from phase
phaseTitles = marvel['marvel'][phase]['movies']
titles = len(phaseTitles)
selctedMovie = random.randint(0, titles-1)
title = phaseTitles[selctedMovie]['title']
year = phaseTitles[selctedMovie]['year']
coverPath = phaseTitles[selctedMovie]['cover']
selectedTitle["text"] = str(title)
releaseYear["text"] = str(year)
phaseText["text"] = "Phase "+str(marvel['marvel'][phase]['phase'])
#Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.
cover = Image.open(coverPath)
cover = cover.resize((400, 650), Image.ANTIALIAS)
coverImage = ImageTk.PhotoImage(cover)
poster.configure(image=coverImage)
vlabel.pack()
print("You should watch "+title+".")
root = tk.Tk()
root.title("Marvel Movie Generator")
root.configure(background='black')
#size of the window
root.geometry("450x880")
frame = tk.Frame(root)
frame.pack()
buttonGenerate = tk.Button(frame,text="Generate",fg="white",bg="green",font=("Arial", 16), height=2, width=10, command=generate)
buttonGenerate.pack(side=tk.LEFT)
buttonQuit = tk.Button(frame,text="Quit",fg="white",bg="red",font=("Arial", 16), height=2,width=10, command=quit)
buttonQuit.pack(side=tk.LEFT)
selectedTitle = tk.Label(root, bg="black",fg="white")
selectedTitle.config(font=("Arial", 16))
selectedTitle.pack()
releaseYear = tk.Label(root, bg="black",fg="white")
releaseYear.config(font=("Arial", 20))
releaseYear.pack()
#The Label widget is a standard Tkinter widget used to display a text or image on the screen.
cover = Image.open(coverPath)
cover = cover.resize((400, 650), Image.ANTIALIAS)
coverImage = ImageTk.PhotoImage(cover)
poster = tk.Label(root, bg="black", image=coverImage)
#The Pack geometry manager packs widgets in rows or columns.
poster.pack(fill = "both", expand = "yes")
phaseText = tk.Label(root, bg="black",fg="white")
phaseText.config(font=("Arial", 18),padx=10, pady=20)
phaseText.pack()
root.mainloop() | 1,116 | 0 | 23 |
91a51dcbfc685464fa605e651f220b09aaf8f706 | 15,440 | py | Python | tests/test_utils.py | radioactivedecay/radioactivedecay | 39b9cf45465f8ed54dc67bc35a3bb20bd8c257c7 | [
"MIT"
] | 12 | 2021-11-12T21:15:22.000Z | 2022-03-30T12:36:03.000Z | tests/test_utils.py | radioactivedecay/radioactivedecay | 39b9cf45465f8ed54dc67bc35a3bb20bd8c257c7 | [
"MIT"
] | 15 | 2021-11-08T03:30:41.000Z | 2022-03-21T07:24:48.000Z | tests/test_utils.py | radioactivedecay/radioactivedecay | 39b9cf45465f8ed54dc67bc35a3bb20bd8c257c7 | [
"MIT"
] | 3 | 2021-11-07T16:33:19.000Z | 2022-02-10T09:50:42.000Z | """
Unit tests for utils.py functions.
"""
import unittest
import numpy as np
from sympy import Integer, log
from radioactivedecay.utils import (
get_metastable_chars,
Z_to_elem,
elem_to_Z,
build_id,
build_nuclide_string,
NuclideStrError,
parse_nuclide_str,
parse_id,
parse_nuclide,
add_dictionaries,
sort_dictionary_alphabetically,
sort_list_according_to_dataset,
)
class TestFunctions(unittest.TestCase):
"""
Unit tests for the utils.py functions.
"""
def test_get_metastable_chars(self) -> None:
"""
Test fetching of list of metastable state characters.
"""
self.assertEqual(get_metastable_chars(), ["m", "n", "p", "q", "r", "x"])
def test_Z_to_elem(self) -> None:
"""
Test the conversion of atomic number to element symbol.
"""
self.assertEqual(Z_to_elem(1), "H")
self.assertEqual(Z_to_elem(20), "Ca")
self.assertEqual(Z_to_elem(26), "Fe")
def test_elem_to_Z(self) -> None:
"""
Test the conversion of element symbol to atomic number.
"""
self.assertEqual(elem_to_Z("H"), 1)
self.assertEqual(elem_to_Z("Ca"), 20)
self.assertEqual(elem_to_Z("Fe"), 26)
def test_build_id(self) -> None:
"""
Test the canonical id builder.
"""
self.assertEqual(build_id(26, 56), 260560000)
self.assertEqual(build_id(53, 118), 531180000)
self.assertEqual(build_id(53, 118, "m"), 531180001)
self.assertEqual(build_id(65, 156, "n"), 651560002)
self.assertEqual(build_id(49, 129, "p"), 491290003)
self.assertEqual(build_id(71, 177, "q"), 711770004)
self.assertEqual(build_id(71, 177, "r"), 711770005)
self.assertEqual(build_id(71, 174, "x"), 711740006)
with self.assertRaises(ValueError):
build_id(65, 156, "z")
def test_built_nuclide_string(self) -> None:
"""
Test the nuclide string builder.
"""
self.assertEqual(build_nuclide_string(26, 56), "Fe-56")
self.assertEqual(build_nuclide_string(53, 118), "I-118")
self.assertEqual(build_nuclide_string(53, 118, "m"), "I-118m")
self.assertEqual(build_nuclide_string(65, 156, "n"), "Tb-156n")
self.assertEqual(build_nuclide_string(49, 129, "p"), "In-129p")
self.assertEqual(build_nuclide_string(71, 177, "q"), "Lu-177q")
self.assertEqual(build_nuclide_string(71, 177, "r"), "Lu-177r")
self.assertEqual(build_nuclide_string(71, 174, "x"), "Lu-174x")
with self.assertRaises(ValueError):
build_nuclide_string(999, 1000, "z")
def test_parse_nuclide_str(self) -> None:
"""
Test the parsing of nuclide strings.
"""
self.assertEqual(parse_nuclide_str("Ca-40"), "Ca-40")
self.assertEqual(parse_nuclide_str("Ca40"), "Ca-40")
self.assertEqual(parse_nuclide_str("40Ca"), "Ca-40")
# Whitespace removal (Issue #65)
self.assertEqual(parse_nuclide_str(" Ca -40 "), "Ca-40")
self.assertEqual(parse_nuclide_str("C\ta\n-40"), "Ca-40")
# Robust to capitalization mistakes (Issue #65)
self.assertEqual(parse_nuclide_str("y-91"), "Y-91")
self.assertEqual(parse_nuclide_str("y91"), "Y-91")
self.assertEqual(parse_nuclide_str("91y"), "Y-91")
self.assertEqual(parse_nuclide_str("y-91M"), "Y-91m")
self.assertEqual(parse_nuclide_str("y91M"), "Y-91m")
# Following test will fail as no capitalization of Y
# self.assertEqual(parse_nuclide_str("91my"), "Y-91m")
self.assertEqual(parse_nuclide_str("ca-40"), "Ca-40")
self.assertEqual(parse_nuclide_str("CA-40"), "Ca-40")
self.assertEqual(parse_nuclide_str("Tc-99M"), "Tc-99m")
self.assertEqual(parse_nuclide_str("iR192N"), "Ir-192n")
self.assertEqual(parse_nuclide_str("192NiR"), "Ir-192n")
self.assertEqual(parse_nuclide_str("iN129P"), "In-129p")
self.assertEqual(parse_nuclide_str("177qLu"), "Lu-177q")
self.assertEqual(parse_nuclide_str("LU177R"), "Lu-177r")
self.assertEqual(parse_nuclide_str("lu-174x"), "Lu-174x")
self.assertEqual(parse_nuclide_str("ni56"), "Ni-56")
self.assertEqual(parse_nuclide_str("ni-56"), "Ni-56")
self.assertEqual(parse_nuclide_str("56Ni"), "Ni-56")
self.assertEqual(parse_nuclide_str("56ni"), "Ni-56")
# Following test will fail as logic assumes this is I-56n
# self.assertEqual(parse_nuclide_str("56nI"), "Ni-56")
self.assertEqual(parse_nuclide_str("ni69M"), "Ni-69m")
self.assertEqual(parse_nuclide_str("ni-69n"), "Ni-69n")
self.assertEqual(parse_nuclide_str("69nni"), "Ni-69n")
self.assertEqual(parse_nuclide_str("130nI"), "I-130n")
# Following tests will fail as logic assumes Ni-130
# self.assertEqual(parse_nuclide_str("130NI"), "I-130n")
# self.assertEqual(parse_nuclide_str("130Ni"), "I-130n")
# self.assertEqual(parse_nuclide_str("130ni"), "I-130n")
with self.assertRaises(NuclideStrError):
parse_nuclide_str("H3.") # not alpha-numeric
with self.assertRaises(NuclideStrError):
parse_nuclide_str("H-3-") # too many hyphens
with self.assertRaises(NuclideStrError):
parse_nuclide_str("H-301") # mass number too large
with self.assertRaises(NuclideStrError):
parse_nuclide_str("H") # no mass number
with self.assertRaises(NuclideStrError):
parse_nuclide_str("Tc-99m3") # more than one number
with self.assertRaises(NuclideStrError):
parse_nuclide_str("F26m0") # more than one number
with self.assertRaises(NuclideStrError):
parse_nuclide_str("A3") # invalid element
with self.assertRaises(NuclideStrError):
parse_nuclide_str("Tc-99mm") # metastable char too long
with self.assertRaises(NuclideStrError):
parse_nuclide_str("Tc-99o") # metastable char invalid
def test_parse_id(self) -> None:
"""
Test the canonical id to nuclide string converter.
"""
self.assertEqual(parse_id(260560000), "Fe-56")
self.assertEqual(parse_id(531180000), "I-118")
self.assertEqual(parse_id(531180001), "I-118m")
self.assertEqual(parse_id(651560002), "Tb-156n")
self.assertEqual(parse_id(491290003), "In-129p")
self.assertEqual(parse_id(711770004), "Lu-177q")
self.assertEqual(parse_id(711770005), "Lu-177r")
self.assertEqual(parse_id(711740006), "Lu-174x")
def test_parse_nuclide(self) -> None:
"""
Test the parsing of nuclide strings.
"""
nuclides = np.array(
[
"H-3",
"Be-7",
"C-10",
"Ne-19",
"I-118",
"Pd-100",
"Cl-34m",
"I-118m",
"Tb-156m",
"Tb-156n",
"In-129p",
"Lu-177q",
"Lu-177r",
"Lu-174x",
]
)
dataset_name = "test"
# Re-formatting of acceptable strings e.g. 100Pd -> Pd-100
self.assertEqual(parse_nuclide("H-3", nuclides, dataset_name), "H-3")
self.assertEqual(parse_nuclide("H3", nuclides, dataset_name), "H-3")
self.assertEqual(parse_nuclide("3H", nuclides, dataset_name), "H-3")
self.assertEqual(parse_nuclide(10030000, nuclides, dataset_name), "H-3")
self.assertEqual(parse_nuclide("Be-7", nuclides, dataset_name), "Be-7")
self.assertEqual(parse_nuclide("Be7", nuclides, dataset_name), "Be-7")
self.assertEqual(parse_nuclide("7Be", nuclides, dataset_name), "Be-7")
self.assertEqual(parse_nuclide(40070000, nuclides, dataset_name), "Be-7")
self.assertEqual(parse_nuclide("C-10", nuclides, dataset_name), "C-10")
self.assertEqual(parse_nuclide("C10", nuclides, dataset_name), "C-10")
self.assertEqual(parse_nuclide("10C", nuclides, dataset_name), "C-10")
self.assertEqual(parse_nuclide(60100000, nuclides, dataset_name), "C-10")
self.assertEqual(parse_nuclide("Ne-19", nuclides, dataset_name), "Ne-19")
self.assertEqual(parse_nuclide("Ne19", nuclides, dataset_name), "Ne-19")
self.assertEqual(parse_nuclide("19Ne", nuclides, dataset_name), "Ne-19")
self.assertEqual(parse_nuclide(100190000, nuclides, dataset_name), "Ne-19")
self.assertEqual(parse_nuclide("I-118", nuclides, dataset_name), "I-118")
self.assertEqual(parse_nuclide("I118", nuclides, dataset_name), "I-118")
self.assertEqual(parse_nuclide("118I", nuclides, dataset_name), "I-118")
self.assertEqual(parse_nuclide(531180000, nuclides, dataset_name), "I-118")
self.assertEqual(parse_nuclide("Pd-100", nuclides, dataset_name), "Pd-100")
self.assertEqual(parse_nuclide("Pd100", nuclides, dataset_name), "Pd-100")
self.assertEqual(parse_nuclide("100Pd", nuclides, dataset_name), "Pd-100")
self.assertEqual(parse_nuclide(461000000, nuclides, dataset_name), "Pd-100")
self.assertEqual(parse_nuclide("Cl-34m", nuclides, dataset_name), "Cl-34m")
self.assertEqual(parse_nuclide("Cl34m", nuclides, dataset_name), "Cl-34m")
self.assertEqual(parse_nuclide("34mCl", nuclides, dataset_name), "Cl-34m")
self.assertEqual(parse_nuclide(170340001, nuclides, dataset_name), "Cl-34m")
self.assertEqual(parse_nuclide("I-118m", nuclides, dataset_name), "I-118m")
self.assertEqual(parse_nuclide("I118m", nuclides, dataset_name), "I-118m")
self.assertEqual(parse_nuclide("118mI", nuclides, dataset_name), "I-118m")
self.assertEqual(parse_nuclide(531180001, nuclides, dataset_name), "I-118m")
self.assertEqual(parse_nuclide("Tb-156m", nuclides, dataset_name), "Tb-156m")
self.assertEqual(parse_nuclide("Tb156m", nuclides, dataset_name), "Tb-156m")
self.assertEqual(parse_nuclide("156mTb", nuclides, dataset_name), "Tb-156m")
self.assertEqual(parse_nuclide(651560001, nuclides, dataset_name), "Tb-156m")
self.assertEqual(parse_nuclide("Tb-156n", nuclides, dataset_name), "Tb-156n")
self.assertEqual(parse_nuclide("Tb156n", nuclides, dataset_name), "Tb-156n")
self.assertEqual(parse_nuclide("156nTb", nuclides, dataset_name), "Tb-156n")
self.assertEqual(parse_nuclide(651560002, nuclides, dataset_name), "Tb-156n")
self.assertEqual(parse_nuclide("In-129p", nuclides, dataset_name), "In-129p")
self.assertEqual(parse_nuclide("In129p", nuclides, dataset_name), "In-129p")
self.assertEqual(parse_nuclide("129pIn", nuclides, dataset_name), "In-129p")
self.assertEqual(parse_nuclide(491290003, nuclides, dataset_name), "In-129p")
self.assertEqual(parse_nuclide("Lu-177q", nuclides, dataset_name), "Lu-177q")
self.assertEqual(parse_nuclide("Lu177q", nuclides, dataset_name), "Lu-177q")
self.assertEqual(parse_nuclide("177qLu", nuclides, dataset_name), "Lu-177q")
self.assertEqual(parse_nuclide(711770004, nuclides, dataset_name), "Lu-177q")
self.assertEqual(parse_nuclide("Lu-177r", nuclides, dataset_name), "Lu-177r")
self.assertEqual(parse_nuclide("Lu-177r", nuclides, dataset_name), "Lu-177r")
self.assertEqual(parse_nuclide("177rLu", nuclides, dataset_name), "Lu-177r")
self.assertEqual(parse_nuclide(711770005, nuclides, dataset_name), "Lu-177r")
self.assertEqual(parse_nuclide("Lu-174x", nuclides, dataset_name), "Lu-174x")
self.assertEqual(parse_nuclide("Lu-174x", nuclides, dataset_name), "Lu-174x")
self.assertEqual(parse_nuclide("174xLu", nuclides, dataset_name), "Lu-174x")
self.assertEqual(parse_nuclide(711740006, nuclides, dataset_name), "Lu-174x")
# Catch erroneous strings
with self.assertRaises(TypeError):
parse_nuclide(1.2, nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("H", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("A1", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("1A", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("H-4", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("H4", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("4H", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("Pb-198m", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("Pbo-198m", nuclides, dataset_name)
def test_add_dictionaries(self) -> None:
"""
Test function which adds two inventory dictionaries together.
"""
dict1 = {"Pm-141": 1.0, "Rb-78": 2.0}
dict2 = {"Pm-141": 3.0, "Rb-90": 4.0}
self.assertEqual(
add_dictionaries(dict1, dict2),
{"Pm-141": 4.0, "Rb-78": 2.0, "Rb-90": 4.0},
)
dict1 = {"Pm-141": Integer(2) * log(3), "Rb-78": Integer(4) / log(5)}
dict2 = {"Pm-141": log(3) / Integer(7), "Rb-90": Integer(9)}
self.assertEqual(
add_dictionaries(dict1, dict2),
{
"Pm-141": Integer(15) * log(3) / Integer(7),
"Rb-78": Integer(4) / log(5),
"Rb-90": Integer(9),
},
)
def test_sort_dictionary_alphabetically(self) -> None:
"""
Test the sorting of a dictionary by its keys alphabetically.
"""
inv_dict = {"U-235": 1.2, "Tc-99m": 2.3, "Tc-99": 5.8}
self.assertEqual(
sort_dictionary_alphabetically(inv_dict),
{"Tc-99": 5.8, "Tc-99m": 2.3, "U-235": 1.2},
)
inv_dict = {"U-235": Integer(1), "Tc-99m": Integer(2), "Tc-99": Integer(3)}
self.assertEqual(
sort_dictionary_alphabetically(inv_dict),
{"Tc-99": Integer(3), "Tc-99m": Integer(2), "U-235": Integer(1)},
)
def test_sort_list_according_to_dataset(self) -> None:
"""
Test the sorting of list of nuclides according to their position in the decay dataset.
"""
nuclide_list = ["Tc-99", "Tc-99m"]
nuclide_dict = {"Tc-99m": 0, "Tc-99": 1}
self.assertEqual(
sort_list_according_to_dataset(nuclide_list, nuclide_dict),
["Tc-99m", "Tc-99"],
)
class TestNuclideStrError(unittest.TestCase):
"""
Unit tests for the NuclideStrError class.
"""
def test_instantiation(self) -> None:
"""
Test instantiation of NuclideStrError exceptions.
"""
err = NuclideStrError("A4", "Dummy message.")
self.assertEqual(err.nuclide, "A4")
self.assertEqual(err.additional_message, "Dummy message.")
def test___str__(self) -> None:
"""
Test string representation f NuclideStrError exceptions.
"""
err = NuclideStrError("A4", "Dummy message.")
self.assertEqual(str(err), "A4 is not a valid nuclide string. Dummy message.")
if __name__ == "__main__":
unittest.main()
| 44.240688 | 94 | 0.629922 | """
Unit tests for utils.py functions.
"""
import unittest
import numpy as np
from sympy import Integer, log
from radioactivedecay.utils import (
get_metastable_chars,
Z_to_elem,
elem_to_Z,
build_id,
build_nuclide_string,
NuclideStrError,
parse_nuclide_str,
parse_id,
parse_nuclide,
add_dictionaries,
sort_dictionary_alphabetically,
sort_list_according_to_dataset,
)
class TestFunctions(unittest.TestCase):
"""
Unit tests for the utils.py functions.
"""
def test_get_metastable_chars(self) -> None:
"""
Test fetching of list of metastable state characters.
"""
self.assertEqual(get_metastable_chars(), ["m", "n", "p", "q", "r", "x"])
def test_Z_to_elem(self) -> None:
"""
Test the conversion of atomic number to element symbol.
"""
self.assertEqual(Z_to_elem(1), "H")
self.assertEqual(Z_to_elem(20), "Ca")
self.assertEqual(Z_to_elem(26), "Fe")
def test_elem_to_Z(self) -> None:
"""
Test the conversion of element symbol to atomic number.
"""
self.assertEqual(elem_to_Z("H"), 1)
self.assertEqual(elem_to_Z("Ca"), 20)
self.assertEqual(elem_to_Z("Fe"), 26)
def test_build_id(self) -> None:
"""
Test the canonical id builder.
"""
self.assertEqual(build_id(26, 56), 260560000)
self.assertEqual(build_id(53, 118), 531180000)
self.assertEqual(build_id(53, 118, "m"), 531180001)
self.assertEqual(build_id(65, 156, "n"), 651560002)
self.assertEqual(build_id(49, 129, "p"), 491290003)
self.assertEqual(build_id(71, 177, "q"), 711770004)
self.assertEqual(build_id(71, 177, "r"), 711770005)
self.assertEqual(build_id(71, 174, "x"), 711740006)
with self.assertRaises(ValueError):
build_id(65, 156, "z")
def test_built_nuclide_string(self) -> None:
"""
Test the nuclide string builder.
"""
self.assertEqual(build_nuclide_string(26, 56), "Fe-56")
self.assertEqual(build_nuclide_string(53, 118), "I-118")
self.assertEqual(build_nuclide_string(53, 118, "m"), "I-118m")
self.assertEqual(build_nuclide_string(65, 156, "n"), "Tb-156n")
self.assertEqual(build_nuclide_string(49, 129, "p"), "In-129p")
self.assertEqual(build_nuclide_string(71, 177, "q"), "Lu-177q")
self.assertEqual(build_nuclide_string(71, 177, "r"), "Lu-177r")
self.assertEqual(build_nuclide_string(71, 174, "x"), "Lu-174x")
with self.assertRaises(ValueError):
build_nuclide_string(999, 1000, "z")
def test_parse_nuclide_str(self) -> None:
"""
Test the parsing of nuclide strings.
"""
self.assertEqual(parse_nuclide_str("Ca-40"), "Ca-40")
self.assertEqual(parse_nuclide_str("Ca40"), "Ca-40")
self.assertEqual(parse_nuclide_str("40Ca"), "Ca-40")
# Whitespace removal (Issue #65)
self.assertEqual(parse_nuclide_str(" Ca -40 "), "Ca-40")
self.assertEqual(parse_nuclide_str("C\ta\n-40"), "Ca-40")
# Robust to capitalization mistakes (Issue #65)
self.assertEqual(parse_nuclide_str("y-91"), "Y-91")
self.assertEqual(parse_nuclide_str("y91"), "Y-91")
self.assertEqual(parse_nuclide_str("91y"), "Y-91")
self.assertEqual(parse_nuclide_str("y-91M"), "Y-91m")
self.assertEqual(parse_nuclide_str("y91M"), "Y-91m")
# Following test will fail as no capitalization of Y
# self.assertEqual(parse_nuclide_str("91my"), "Y-91m")
self.assertEqual(parse_nuclide_str("ca-40"), "Ca-40")
self.assertEqual(parse_nuclide_str("CA-40"), "Ca-40")
self.assertEqual(parse_nuclide_str("Tc-99M"), "Tc-99m")
self.assertEqual(parse_nuclide_str("iR192N"), "Ir-192n")
self.assertEqual(parse_nuclide_str("192NiR"), "Ir-192n")
self.assertEqual(parse_nuclide_str("iN129P"), "In-129p")
self.assertEqual(parse_nuclide_str("177qLu"), "Lu-177q")
self.assertEqual(parse_nuclide_str("LU177R"), "Lu-177r")
self.assertEqual(parse_nuclide_str("lu-174x"), "Lu-174x")
self.assertEqual(parse_nuclide_str("ni56"), "Ni-56")
self.assertEqual(parse_nuclide_str("ni-56"), "Ni-56")
self.assertEqual(parse_nuclide_str("56Ni"), "Ni-56")
self.assertEqual(parse_nuclide_str("56ni"), "Ni-56")
# Following test will fail as logic assumes this is I-56n
# self.assertEqual(parse_nuclide_str("56nI"), "Ni-56")
self.assertEqual(parse_nuclide_str("ni69M"), "Ni-69m")
self.assertEqual(parse_nuclide_str("ni-69n"), "Ni-69n")
self.assertEqual(parse_nuclide_str("69nni"), "Ni-69n")
self.assertEqual(parse_nuclide_str("130nI"), "I-130n")
# Following tests will fail as logic assumes Ni-130
# self.assertEqual(parse_nuclide_str("130NI"), "I-130n")
# self.assertEqual(parse_nuclide_str("130Ni"), "I-130n")
# self.assertEqual(parse_nuclide_str("130ni"), "I-130n")
with self.assertRaises(NuclideStrError):
parse_nuclide_str("H3.") # not alpha-numeric
with self.assertRaises(NuclideStrError):
parse_nuclide_str("H-3-") # too many hyphens
with self.assertRaises(NuclideStrError):
parse_nuclide_str("H-301") # mass number too large
with self.assertRaises(NuclideStrError):
parse_nuclide_str("H") # no mass number
with self.assertRaises(NuclideStrError):
parse_nuclide_str("Tc-99m3") # more than one number
with self.assertRaises(NuclideStrError):
parse_nuclide_str("F26m0") # more than one number
with self.assertRaises(NuclideStrError):
parse_nuclide_str("A3") # invalid element
with self.assertRaises(NuclideStrError):
parse_nuclide_str("Tc-99mm") # metastable char too long
with self.assertRaises(NuclideStrError):
parse_nuclide_str("Tc-99o") # metastable char invalid
def test_parse_id(self) -> None:
"""
Test the canonical id to nuclide string converter.
"""
self.assertEqual(parse_id(260560000), "Fe-56")
self.assertEqual(parse_id(531180000), "I-118")
self.assertEqual(parse_id(531180001), "I-118m")
self.assertEqual(parse_id(651560002), "Tb-156n")
self.assertEqual(parse_id(491290003), "In-129p")
self.assertEqual(parse_id(711770004), "Lu-177q")
self.assertEqual(parse_id(711770005), "Lu-177r")
self.assertEqual(parse_id(711740006), "Lu-174x")
def test_parse_nuclide(self) -> None:
"""
Test the parsing of nuclide strings.
"""
nuclides = np.array(
[
"H-3",
"Be-7",
"C-10",
"Ne-19",
"I-118",
"Pd-100",
"Cl-34m",
"I-118m",
"Tb-156m",
"Tb-156n",
"In-129p",
"Lu-177q",
"Lu-177r",
"Lu-174x",
]
)
dataset_name = "test"
# Re-formatting of acceptable strings e.g. 100Pd -> Pd-100
self.assertEqual(parse_nuclide("H-3", nuclides, dataset_name), "H-3")
self.assertEqual(parse_nuclide("H3", nuclides, dataset_name), "H-3")
self.assertEqual(parse_nuclide("3H", nuclides, dataset_name), "H-3")
self.assertEqual(parse_nuclide(10030000, nuclides, dataset_name), "H-3")
self.assertEqual(parse_nuclide("Be-7", nuclides, dataset_name), "Be-7")
self.assertEqual(parse_nuclide("Be7", nuclides, dataset_name), "Be-7")
self.assertEqual(parse_nuclide("7Be", nuclides, dataset_name), "Be-7")
self.assertEqual(parse_nuclide(40070000, nuclides, dataset_name), "Be-7")
self.assertEqual(parse_nuclide("C-10", nuclides, dataset_name), "C-10")
self.assertEqual(parse_nuclide("C10", nuclides, dataset_name), "C-10")
self.assertEqual(parse_nuclide("10C", nuclides, dataset_name), "C-10")
self.assertEqual(parse_nuclide(60100000, nuclides, dataset_name), "C-10")
self.assertEqual(parse_nuclide("Ne-19", nuclides, dataset_name), "Ne-19")
self.assertEqual(parse_nuclide("Ne19", nuclides, dataset_name), "Ne-19")
self.assertEqual(parse_nuclide("19Ne", nuclides, dataset_name), "Ne-19")
self.assertEqual(parse_nuclide(100190000, nuclides, dataset_name), "Ne-19")
self.assertEqual(parse_nuclide("I-118", nuclides, dataset_name), "I-118")
self.assertEqual(parse_nuclide("I118", nuclides, dataset_name), "I-118")
self.assertEqual(parse_nuclide("118I", nuclides, dataset_name), "I-118")
self.assertEqual(parse_nuclide(531180000, nuclides, dataset_name), "I-118")
self.assertEqual(parse_nuclide("Pd-100", nuclides, dataset_name), "Pd-100")
self.assertEqual(parse_nuclide("Pd100", nuclides, dataset_name), "Pd-100")
self.assertEqual(parse_nuclide("100Pd", nuclides, dataset_name), "Pd-100")
self.assertEqual(parse_nuclide(461000000, nuclides, dataset_name), "Pd-100")
self.assertEqual(parse_nuclide("Cl-34m", nuclides, dataset_name), "Cl-34m")
self.assertEqual(parse_nuclide("Cl34m", nuclides, dataset_name), "Cl-34m")
self.assertEqual(parse_nuclide("34mCl", nuclides, dataset_name), "Cl-34m")
self.assertEqual(parse_nuclide(170340001, nuclides, dataset_name), "Cl-34m")
self.assertEqual(parse_nuclide("I-118m", nuclides, dataset_name), "I-118m")
self.assertEqual(parse_nuclide("I118m", nuclides, dataset_name), "I-118m")
self.assertEqual(parse_nuclide("118mI", nuclides, dataset_name), "I-118m")
self.assertEqual(parse_nuclide(531180001, nuclides, dataset_name), "I-118m")
self.assertEqual(parse_nuclide("Tb-156m", nuclides, dataset_name), "Tb-156m")
self.assertEqual(parse_nuclide("Tb156m", nuclides, dataset_name), "Tb-156m")
self.assertEqual(parse_nuclide("156mTb", nuclides, dataset_name), "Tb-156m")
self.assertEqual(parse_nuclide(651560001, nuclides, dataset_name), "Tb-156m")
self.assertEqual(parse_nuclide("Tb-156n", nuclides, dataset_name), "Tb-156n")
self.assertEqual(parse_nuclide("Tb156n", nuclides, dataset_name), "Tb-156n")
self.assertEqual(parse_nuclide("156nTb", nuclides, dataset_name), "Tb-156n")
self.assertEqual(parse_nuclide(651560002, nuclides, dataset_name), "Tb-156n")
self.assertEqual(parse_nuclide("In-129p", nuclides, dataset_name), "In-129p")
self.assertEqual(parse_nuclide("In129p", nuclides, dataset_name), "In-129p")
self.assertEqual(parse_nuclide("129pIn", nuclides, dataset_name), "In-129p")
self.assertEqual(parse_nuclide(491290003, nuclides, dataset_name), "In-129p")
self.assertEqual(parse_nuclide("Lu-177q", nuclides, dataset_name), "Lu-177q")
self.assertEqual(parse_nuclide("Lu177q", nuclides, dataset_name), "Lu-177q")
self.assertEqual(parse_nuclide("177qLu", nuclides, dataset_name), "Lu-177q")
self.assertEqual(parse_nuclide(711770004, nuclides, dataset_name), "Lu-177q")
self.assertEqual(parse_nuclide("Lu-177r", nuclides, dataset_name), "Lu-177r")
self.assertEqual(parse_nuclide("Lu-177r", nuclides, dataset_name), "Lu-177r")
self.assertEqual(parse_nuclide("177rLu", nuclides, dataset_name), "Lu-177r")
self.assertEqual(parse_nuclide(711770005, nuclides, dataset_name), "Lu-177r")
self.assertEqual(parse_nuclide("Lu-174x", nuclides, dataset_name), "Lu-174x")
self.assertEqual(parse_nuclide("Lu-174x", nuclides, dataset_name), "Lu-174x")
self.assertEqual(parse_nuclide("174xLu", nuclides, dataset_name), "Lu-174x")
self.assertEqual(parse_nuclide(711740006, nuclides, dataset_name), "Lu-174x")
# Catch erroneous strings
with self.assertRaises(TypeError):
parse_nuclide(1.2, nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("H", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("A1", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("1A", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("H-4", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("H4", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("4H", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("Pb-198m", nuclides, dataset_name)
with self.assertRaises(ValueError):
parse_nuclide("Pbo-198m", nuclides, dataset_name)
def test_add_dictionaries(self) -> None:
"""
Test function which adds two inventory dictionaries together.
"""
dict1 = {"Pm-141": 1.0, "Rb-78": 2.0}
dict2 = {"Pm-141": 3.0, "Rb-90": 4.0}
self.assertEqual(
add_dictionaries(dict1, dict2),
{"Pm-141": 4.0, "Rb-78": 2.0, "Rb-90": 4.0},
)
dict1 = {"Pm-141": Integer(2) * log(3), "Rb-78": Integer(4) / log(5)}
dict2 = {"Pm-141": log(3) / Integer(7), "Rb-90": Integer(9)}
self.assertEqual(
add_dictionaries(dict1, dict2),
{
"Pm-141": Integer(15) * log(3) / Integer(7),
"Rb-78": Integer(4) / log(5),
"Rb-90": Integer(9),
},
)
def test_sort_dictionary_alphabetically(self) -> None:
"""
Test the sorting of a dictionary by its keys alphabetically.
"""
inv_dict = {"U-235": 1.2, "Tc-99m": 2.3, "Tc-99": 5.8}
self.assertEqual(
sort_dictionary_alphabetically(inv_dict),
{"Tc-99": 5.8, "Tc-99m": 2.3, "U-235": 1.2},
)
inv_dict = {"U-235": Integer(1), "Tc-99m": Integer(2), "Tc-99": Integer(3)}
self.assertEqual(
sort_dictionary_alphabetically(inv_dict),
{"Tc-99": Integer(3), "Tc-99m": Integer(2), "U-235": Integer(1)},
)
def test_sort_list_according_to_dataset(self) -> None:
"""
Test the sorting of list of nuclides according to their position in the decay dataset.
"""
nuclide_list = ["Tc-99", "Tc-99m"]
nuclide_dict = {"Tc-99m": 0, "Tc-99": 1}
self.assertEqual(
sort_list_according_to_dataset(nuclide_list, nuclide_dict),
["Tc-99m", "Tc-99"],
)
class TestNuclideStrError(unittest.TestCase):
"""
Unit tests for the NuclideStrError class.
"""
def test_instantiation(self) -> None:
"""
Test instantiation of NuclideStrError exceptions.
"""
err = NuclideStrError("A4", "Dummy message.")
self.assertEqual(err.nuclide, "A4")
self.assertEqual(err.additional_message, "Dummy message.")
def test___str__(self) -> None:
"""
Test string representation f NuclideStrError exceptions.
"""
err = NuclideStrError("A4", "Dummy message.")
self.assertEqual(str(err), "A4 is not a valid nuclide string. Dummy message.")
if __name__ == "__main__":
unittest.main()
| 0 | 0 | 0 |
f4ed724a958fb55e950f7a8e3a6a1c517973e5d2 | 759 | py | Python | flexslider/models.py | ForumDev/djangocms-flexslider | 181168aa9752d7023e03880b27004b886c96afdf | [
"MIT"
] | null | null | null | flexslider/models.py | ForumDev/djangocms-flexslider | 181168aa9752d7023e03880b27004b886c96afdf | [
"MIT"
] | null | null | null | flexslider/models.py | ForumDev/djangocms-flexslider | 181168aa9752d7023e03880b27004b886c96afdf | [
"MIT"
] | 1 | 2020-10-12T06:32:34.000Z | 2020-10-12T06:32:34.000Z | from django.db import models
from cms.models.pluginmodel import CMSPlugin
from django.utils.http import int_to_base36
# Create your models here.
| 44.647059 | 116 | 0.716733 | from django.db import models
from cms.models.pluginmodel import CMSPlugin
from django.utils.http import int_to_base36
# Create your models here.
class Slide(models.Model):
title = models.CharField(max_length=25)
index = models.IntegerField(default=0)
descript = models.TextField(default='')
short_name = models.CharField(max_length=25,default='',help_text='short-name: no special characters, no spaces')
image = models.ImageField("Slider image", upload_to="images/flexslider/", blank=False, null=False)
get_latest_by = 'index'
def __str__(self): # __unicode__ on Python 2
return int_to_base36(self.index)+ ' ' + self.title
def __unicode__(self):
return int_to_base36(self.index)+ ' ' + self.title
| 155 | 436 | 23 |
5b5eae6199479a508c9a2978661678252607a7c1 | 1,147 | py | Python | level_loader.py | OHopiak/over_the_wire_level_loader | 31a5ecc68553d1ed9c09402af025347c209d1f15 | [
"MIT"
] | 1 | 2022-03-06T19:10:21.000Z | 2022-03-06T19:10:21.000Z | level_loader.py | OHopiak/over_the_wire_level_loader | 31a5ecc68553d1ed9c09402af025347c209d1f15 | [
"MIT"
] | null | null | null | level_loader.py | OHopiak/over_the_wire_level_loader | 31a5ecc68553d1ed9c09402af025347c209d1f15 | [
"MIT"
] | null | null | null | import json
import os
from config import Config
from level import Level
| 24.404255 | 58 | 0.707934 | import json
import os
from config import Config
from level import Level
class LevelLoader:
def __init__(self, config: Config, filename):
self.config = config
self.filename = filename
self.levels = {}
def load(self):
if not os.path.exists(self.filename):
self.levels[0] = Level(self.config)
self.save()
return
with open(self.filename, 'r') as f:
levels_data = json.load(f)
for level_number_str, level_data in levels_data.items():
level_number = int(level_number_str)
level = Level(self.config, level_number)
level.password = level_data.get('password')
level.description = level_data.get('description')
level.solution = level_data.get('solution')
level.tip = level_data.get('tip')
self.levels[level_number] = level
def save(self):
levels_data = {
level.level_number: level.to_dict()
for level in self.levels.values()
}
with open(self.filename, 'w') as f:
json.dump(levels_data, f, indent='\t')
def get_level(self, level_number: int) -> Level:
return self.levels.get(level_number)
def save_level(self, level: Level):
self.levels[level.level_number] = level
self.save()
| 934 | -3 | 142 |
22892635c988f34b654c60f02e026254afa31ea3 | 6,210 | py | Python | hw/rmnist.py | vihari/CSD | 9902cdc8ea54f2650cd1396f904a06598a864a76 | [
"MIT"
] | 41 | 2020-05-01T10:08:26.000Z | 2021-12-21T12:47:53.000Z | hw/rmnist.py | kevinbro96/CSD | 9902cdc8ea54f2650cd1396f904a06598a864a76 | [
"MIT"
] | 6 | 2020-07-10T03:48:16.000Z | 2021-07-21T06:49:05.000Z | hw/rmnist.py | kevinbro96/CSD | 9902cdc8ea54f2650cd1396f904a06598a864a76 | [
"MIT"
] | 6 | 2020-06-07T13:57:36.000Z | 2021-12-09T11:52:39.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the MNIST network.
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
import tqdm
from scipy import misc
from scipy.ndimage import rotate as rot
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
| 36.315789 | 114 | 0.703865 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the MNIST network.
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
import tqdm
from scipy import misc
from scipy.ndimage import rotate as rot
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
def prepare_data(leftout_angles):
# Get the sets of images and labels for training, validation, and
# test on MNIST.
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
TRAIN_SIZE = 1000
TEST_SIZE = 1000
np.random.seed(0)
idxs = np.random.choice(np.arange(len(train_images)), TRAIN_SIZE, replace=False)
idxs2 = np.random.choice(np.arange(len(test_images)), TEST_SIZE, replace=False)
train_images = train_images[idxs].astype(np.float32)
train_labels = train_labels[idxs].tolist()
test_images = test_images[idxs2].astype(np.float32)
test_labels = test_labels[idxs2].tolist()
train_images = (train_images - 128.)/128.
test_images = (test_images - 128.)/128.
# transform all train and test images
_train_images, _train_labels, _train_uids = [], [], []
_test_images, _test_labels, _test_uids = [], [], []
for ai, angle in enumerate(range(0, 90, 15)):
if angle in leftout_angles:
_timgs = []
for ti in tqdm.tqdm(range(len(test_images)), desc="Transforming test images"):
_tr = test_images[ti]
_timgs.append(rot(_tr, angle, reshape=False))
_test_images += _timgs
_test_labels += test_labels
_test_uids += [ai-1]*len(test_images)
else:
_timgs = []
for ti in tqdm.tqdm(range(len(train_images)), desc="Transforming train images"):
_tr = train_images[ti]
_timgs.append(rot(_tr, angle, reshape=False))
_train_images += _timgs
_train_labels += train_labels
_train_uids += [ai-1]*len(train_images)
train_images, train_labels, train_uids = np.array(_train_images), np.array(_train_labels), np.array(_train_uids)
test_images, test_labels, test_uids = np.array(_test_images), np.array(_test_labels), np.array(_test_uids)
train = (train_images, train_labels, train_uids)
test = (test_images, test_labels, test_uids)
print (np.max(train[0]), np.min(train[0]))
print (np.max(test[0]), np.min(test[0]))
print ("Num Train: %d num test: %d" % (len(train_images), len(_test_images)))
return train, test, test
def prepare_data2():
# Get the sets of images and labels for training, validation, and
# test on MNIST.
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
TRAIN_SIZE = -1
TEST_SIZE = -1
np.random.seed(2)
if TRAIN_SIZE > 0:
idxs = np.random.choice(np.arange(len(train_images)), TRAIN_SIZE, replace=False)
train_images = train_images[idxs]
train_labels = train_labels[idxs].tolist()
if TEST_SIZE > 0:
idxs = np.random.choice(np.arange(len(test_images)), TEST_SIZE, replace=False)
test_images = test_images[idxs]
test_labels = test_labels[idxs]
train = (np.array(train_images), np.array(train_labels), np.zeros(len(train_labels)))
test = (np.array(test_images), np.array(test_labels), np.zeros(len(test_labels)))
print (np.shape(train[0]))
print (np.shape(test[0]))
return train, test
def prepare_data_for(angle, DEF=0):
# Get the sets of images and labels for training, validation, and
# test on MNIST.
np.random.seed(0)
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
TRAIN_SIZE = 1000
TEST_SIZE = 1000
idxs = np.random.choice(np.arange(len(train_images)), TRAIN_SIZE, replace=False)
train_images = train_images[idxs].astype(np.float32)
train_labels = train_labels[idxs].tolist()
idxs = np.random.choice(np.arange(len(test_images)), TEST_SIZE, replace=False)
test_images = test_images[idxs].astype(np.float32)
test_labels = test_labels[idxs].tolist()
# test_labels = test_labels.tolist()
# transform all train and test images
_train_images, _train_labels, _train_uids = [], [], []
train_per_domain, test_per_domain = {}, {}
_timgs, _labels = [], []
for angle in range(15, 90, 15):
for ti in tqdm.tqdm(range(len(train_images)), desc="Transforming train images"):
_timgs.append(rot(train_images[ti], angle, reshape=False))
_labels += train_labels
train = [np.array(_timgs), np.array(_labels), np.array([DEF]*len(_timgs))]
_timgs = []
_labels = []
angles = [_ for _ in range(-20, 15, 5)]
angles += [_ for _ in range(80, 125, 5)]
for angle in angles:
for ti in tqdm.tqdm(range(len(test_images)), desc="Transforming test images"):
_timgs.append(rot(test_images[ti], angle, reshape=False))
_labels += test_labels
test = [np.array(_timgs), np.array(_labels), np.array([DEF]*len(_labels))]
return train, test
| 4,503 | 0 | 69 |
c9fb9128994d65e802fe7e09c5ea25c6fdc37c5a | 2,290 | py | Python | tests/test_plotting.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
] | 1 | 2020-11-20T17:17:50.000Z | 2020-11-20T17:17:50.000Z | tests/test_plotting.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
] | 12 | 2020-11-19T05:22:13.000Z | 2020-12-15T03:50:33.000Z | tests/test_plotting.py | dpanici/DESC | e98a16394d02411952efc18cc6c009e5226b11e4 | [
"MIT"
] | null | null | null | import unittest
from desc.plotting import Plot
| 43.207547 | 88 | 0.461135 | import unittest
from desc.plotting import Plot
class TestPlot(unittest.TestCase):
def setUp(self):
self.names = ['B', '|B|', 'B^zeta', 'B_zeta', 'B_r', 'B^zeta_r',
'B_zeta_r', 'B**2', 'B_r**2', 'B^zeta**2', 'B_zeta**2',
'B^zeta_r**2', 'B_zeta_r**2']
self.bases = ['B', '|B|', 'B', 'B', 'B', 'B',
'B', 'B', 'B', 'B', 'B',
'B', 'B']
self.sups = ['', '', 'zeta', '', '', 'zeta',
'', '', '', 'zeta', '',
'zeta', '']
self.subs = ['', '', '', 'zeta', '', '',
'zeta', '', '', '', 'zeta',
'', 'zeta']
self.ds = ['', '', '', '', 'r', 'r',
'r', '', 'r', '', '',
'r', 'r']
self.pows = ['', '', '', '', '', '',
'', '2', '2', '2', '2',
'2', '2']
self.name_dicts = []
self.plot = Plot()
for name in self.names:
self.name_dicts.append(self.plot.format_name(name))
def test_name_dict(self):
self.assertTrue(all([self.name_dicts[i]['base'] == self.bases[i] for i in
range(len(self.names))]))
self.assertTrue(all([self.name_dicts[i]['sups'] == self.sups[i] for i in
range(len(self.names))]))
self.assertTrue(all([self.name_dicts[i]['subs'] == self.subs[i] for i in
range(len(self.names))]))
self.assertTrue(all([self.name_dicts[i]['d'] == self.ds[i] for i in
range(len(self.names))]))
self.assertTrue(all([self.name_dicts[i]['power'] == self.pows[i] for i in
range(len(self.names))]))
def test_name_label(self):
labels = [self.plot.name_label(nd) for nd in self.name_dicts]
print(labels)
self.assertTrue(all([label[0] == '$' and label[-1] == '$' for label in labels]))
self.assertTrue(all(['/dr' in labels[i] for i in range(len(labels)) if
self.name_dicts[i]['d'] != '']))
self.assertTrue(all(['^{' not in labels[i] for i in range(len(labels))
if self.name_dicts[i]['sups'] == '' and self.name_dicts[i]['power'] == '']))
self.assertTrue(all(['_{' not in labels[i] for i in range(len(labels))
if self.name_dicts[i]['subs'] == '']))
| 2,125 | 13 | 104 |
4395828f7160f986e3513ec5a042011bcc7933d9 | 5,193 | py | Python | appimagebuilder/__main__.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
] | null | null | null | appimagebuilder/__main__.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
] | null | null | null | appimagebuilder/__main__.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import argparse
import logging
import os
from appimagebuilder.common import shell
from appimagebuilder import recipe
from appimagebuilder.builder.builder import Builder
from appimagebuilder.appimage import AppImageCreator
from appimagebuilder.generator.generator import RecipeGenerator
from appimagebuilder.tester import ExecutionTest
from appimagebuilder.tester.errors import TestFailed
if __name__ == "__main__":
# execute only if run as the entry point into the program
__main__()
| 32.867089 | 88 | 0.629886 | #!/usr/bin/env python3
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import argparse
import logging
import os
from appimagebuilder.common import shell
from appimagebuilder import recipe
from appimagebuilder.builder.builder import Builder
from appimagebuilder.appimage import AppImageCreator
from appimagebuilder.generator.generator import RecipeGenerator
from appimagebuilder.tester import ExecutionTest
from appimagebuilder.tester.errors import TestFailed
def __main__():
parser = argparse.ArgumentParser(description="AppImage crafting tool")
parser.add_argument(
"--recipe",
dest="recipe",
default=os.path.join(os.getcwd(), "AppImageBuilder.yml"),
help="recipe file path (default: $PWD/AppImageBuilder.yml)",
)
parser.add_argument(
"--log", dest="loglevel", default="INFO", help="logging level (default: INFO)"
)
parser.add_argument(
"--skip-script",
dest="skip_script",
action="store_true",
help="Skip script execution",
)
parser.add_argument(
"--skip-build",
dest="skip_build",
action="store_true",
help="Skip AppDir building",
)
parser.add_argument(
"--skip-tests",
dest="skip_tests",
action="store_true",
help="Skip AppDir testing",
)
parser.add_argument(
"--skip-appimage",
dest="skip_appimage",
action="store_true",
help="Skip AppImage generation",
)
parser.add_argument(
"--generate",
dest="generate",
action="store_true",
help="Try to generate recipe from an AppDir",
)
args = parser.parse_args()
logger = logging.getLogger("appimage-builder")
numeric_level = getattr(logging, args.loglevel.upper())
if not isinstance(numeric_level, int):
logging.error("Invalid log level: %s" % args.loglevel)
logging.basicConfig(level=numeric_level)
if args.generate:
generator = RecipeGenerator()
generator.generate()
exit(0)
recipe_data = load_recipe(args.recipe)
recipe_version = recipe_data.get_item("version")
if recipe_version == 1:
if not args.skip_script:
script_instructions = recipe_data.get_item("script", [])
logging.info("======")
logging.info("Script")
logging.info("======")
appdir = recipe_data.get_item("AppDir/path")
shell.execute(script_instructions, env={"APPDIR": os.path.abspath(appdir)})
if not args.skip_build:
creator = Builder(recipe_data)
creator.build()
if not args.skip_tests:
if recipe_data.get_item("AppDir/test", []):
logging.info("============")
logging.info("AppDir tests")
logging.info("============")
test_cases = _load_tests(recipe_data)
try:
for test in test_cases:
test.run()
except TestFailed as err:
logger.error("Tests failed")
logger.error(err)
exit(1)
if not args.skip_appimage:
creator = AppImageCreator(recipe_data)
creator.create()
else:
logger.error("Unknown recipe version: %s" % recipe_version)
logger.info("Please make sure you're using the latest appimage-builder version")
exit(1)
def _load_tests(recipe_data):
test_cases = []
appdir = recipe_data.get_item("AppDir/path", "AppDir")
appdir = os.path.abspath(appdir)
test_case_configs = recipe_data.get_item("AppDir/test", [])
for name in test_case_configs:
env = recipe_data.get_item("AppDir/test/%s/env" % name, [])
if isinstance(env, dict):
env = ["%s=%s" % (k, v) for k, v in env.items()]
test = ExecutionTest(
appdir=appdir,
name=name,
image=recipe_data.get_item("AppDir/test/%s/image" % name),
command=recipe_data.get_item("AppDir/test/%s/command" % name),
use_host_x=recipe_data.get_item("AppDir/test/%s/use_host_x" % name, False),
env=env,
)
test_cases.append(test)
return test_cases
def load_recipe(path):
recipe_data = recipe.read_recipe(path=path)
recipe_validator = recipe.Schema()
recipe_validator.v1.validate(recipe_data)
recipe_access = recipe.Recipe(recipe_data)
return recipe_access
if __name__ == "__main__":
# execute only if run as the entry point into the program
__main__()
| 3,974 | 0 | 69 |
c3cc1bbc8361fb79bfc3929e7c307b0d7476fa52 | 8,616 | py | Python | python/coursera_python/IBM/FakeAlbumCoverGame.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 16 | 2018-11-26T08:39:42.000Z | 2019-05-08T10:09:52.000Z | python/coursera_python/IBM/FakeAlbumCoverGame.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 8 | 2020-05-04T06:29:26.000Z | 2022-02-12T05:33:16.000Z | python/coursera_python/IBM/FakeAlbumCoverGame.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 5 | 2020-02-11T16:02:21.000Z | 2021-02-05T07:48:30.000Z |
# coding: utf-8
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a href="http://cocl.us/NotebooksPython101"><img src = "https://ibm.box.com/shared/static/yfe6h4az47ktg2mm9h05wby2n7e8kei3.png" width = 750, align = "center"></a>
# <a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 300, align = "center"></a>
#
# <h1 align=center><font size = 5> Make Fake Album Cover Game</font></h1>
# ## Table of Contents
# Our goal is to create randomly generated album covers with:
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ol>
#
# <li><a href="#ref1">Learn how to use the function display_cover</a></li>
# <li><a href="#ref2">Loading a random page from Wikipedia</a></li>
# <li><a href="#ref3">Extracting the Title of the Article</a></li>
# <li><a href="#ref4"> Displaying the Album Cover</a></li>
#
#
# </ol>
# <br>
# <p></p>
# Estimated Time Needed: <strong>60 min</strong>
# </div>
#
# <hr>
#
# Inspiration: [Fake Album Covers](https://fakealbumcovers.com/)
# #### Import libraries
#
# In[8]:
from IPython.display import Image as IPythonImage
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
# #### Helper function to superimpose text on image
#
# In[4]:
def display_cover(top,bottom ):
"""This fucntoin
"""
import requests
name='album_art_raw.png'
# Now let's make get an album cover.
# https://picsum.photos/ is a free service that offers random images.
# Let's get a random image:
album_art_raw = requests.get('https://picsum.photos/500/500/?random')
# and save it as 'album_art_raw.png'
with open(name,'wb') as album_art_raw_file:
album_art_raw_file.write(album_art_raw.content)
# Now that we have our raw image, let's open it
# and write our band and album name on it
img = Image.open("album_art_raw.png")
draw = ImageDraw.Draw(img)
# We'll choose a font for our band and album title,
# run "% ls /usr/share/fonts/truetype/dejavu" in a cell to see what else is available,
# or download your own .ttf fonts!
band_name_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 25) #25pt font
album_name_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 20) # 20pt font
# the x,y coordinates for where our album name and band name text will start
# counted from the top left of the picture (in pixels)
band_x, band_y = 50, 50
album_x, album_y = 50, 400
# Our text should be visible on any image. A good way
# of accomplishing that is to use white text with a
# black border. We'll use the technique shown here to draw the border:
# https://mail.python.org/pipermail/image-sig/2009-May/005681.html
outline_color ="black"
draw.text((band_x-1, band_y-1), top, font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y-1), top, font=band_name_font, fill=outline_color)
draw.text((band_x-1, band_y+1), top, font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y+1), top, font=band_name_font, fill=outline_color)
draw.text((album_x-1, album_y-1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y-1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x-1, album_y+1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y+1), bottom , font=album_name_font, fill=outline_color)
draw.text((band_x,band_y),top,(255,255,255),font=band_name_font)
draw.text((album_x, album_y),bottom,(255,255,255),font=album_name_font)
return img
# ## 1) Learn how to use the function display_cover <a id='ref1'></a>
# The function **display_cover** selects a random image from https://picsum.photos/ and will help us superimpose two strings over the image. The parameter **top** is the string we would like to superimpose on the top of an image. The parameter bottom is the string we would like to display on the bottom of the image. The function does not return the image but returns an object of type Image from the Pillow library; the object represents a PIL image.
# In[ ]:
img=display_cover(top='top',bottom='bottom')
# To save the image, we use the method **save** . The argument is the file name of the image we would like to save in this case 'sample-out.png'
# In[ ]:
img.save('sample-out.png')
# Finely we use **IPythonImage** to read the image file and display the results.
#
# In[11]:
IPythonImage(filename='sample-out.png')
# **Question 1)** Use the **display_cover** function to display the image with the name Python on the top and Data Science on the bottom. Save the image as **'sample-out.png'**.
# In[9]:
img=display_cover(top='Python',bottom='Data Science')
# In[10]:
img.save('sample-out.png')
# ## Part 2: Loading a random page from Wikipedia <a id='ref2'></a>
# In this project, we will use the request library, we used it in the function **display_cover**, but you should import the library in the next cell.
# In[12]:
import requests
# The following is the URL to the page
# In[13]:
wikipedia_link='https://en.wikipedia.org/wiki/Special:Random'
# **Question 2)** Get Wikipedia page is converted to a string
# Use the function **get** from the **requests** library to download the Wikipedia page using the **wikipedia_link** as an argument. Assign the object to the variable **raw_random_wikipedia_page**.
# In[14]:
#hint: requests.get()
raw_random_wikipedia_page=requests.get(wikipedia_link)
# Use the data attribute **text** to extract the XML as a text file a string and assign the result variable **page**:
# In[18]:
page=raw_random_wikipedia_page.text
print(page)
# # Part 3: Extracting the Title of the Article <a id='ref3'></a>
# **Question 3 (part 1)** Use the title of the Wikipedia article as the title of the band. The title of the article is surrounded by the XML node title as follows: **<title>title - Wikipedia</title>**
# . For example, if the title of the article was Python we would see the following: **<title>Python - Wikipedia</title>**. Consider the example where the title of the article is Teenage Mutant Ninja Turtles the result would be: **<title>Teenage Mutant Ninja Turtles - Wikipedia</title>**. The first step is to find the XML node **<title>** and **</title>**indicating the start and end of the title. The string function **find** maybe helpful, you can also use libraries like **xlxml**.
# In[27]:
page.title()
# **Question 3 (part 2)** Next get rid of the term ** - Wikipedia** from the title and assign the result to the **band_title** For example you can use the function or method **strip** or **replace**.
#
#
# **Question 4)** Repeat the second and third step, to extract the title of a second Wikipedia article but use the result to **album_title**
# In[ ]:
# If you did everything correct the following cell should display the album and band name:
#
# In[ ]:
print("Your band: ", band_title)
print("Your album: ", album_title)
# ## Part 4: Displaying the Album Cover <a id='ref4'></a>
# Use the function **display_cover** to superimpose the band and album title over a random image, assign the result to the variable **album_cover **.
# **Question 5)** use the function display_cover to display the album cover with two random article titles representing the name of the band and the title of the album.
# In[29]:
album_cover=display_cover(top='Python',bottom='Data Science')
# Use the method save to save the image as **sample-out.png**:
# In[30]:
img.save('sample-out.png')
# Use the function **IPythonImage** to display the image
#
# In[31]:
IPythonImage(filename='sample-out.png')
# ### About the Authors:
# [James Reeve]( https://www.linkedin.com/in/reevejamesd/) James Reeves is a Software Engineering intern at IBM.
#
#
# [Joseph Santarcangelo]( https://www.linkedin.com/in/joseph-s-50398b136/) has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
#
# <hr>
# Copyright © 2018 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
| 36.201681 | 520 | 0.709262 |
# coding: utf-8
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a href="http://cocl.us/NotebooksPython101"><img src = "https://ibm.box.com/shared/static/yfe6h4az47ktg2mm9h05wby2n7e8kei3.png" width = 750, align = "center"></a>
# <a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 300, align = "center"></a>
#
# <h1 align=center><font size = 5> Make Fake Album Cover Game</font></h1>
# ## Table of Contents
# Our goal is to create randomly generated album covers with:
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ol>
#
# <li><a href="#ref1">Learn how to use the function display_cover</a></li>
# <li><a href="#ref2">Loading a random page from Wikipedia</a></li>
# <li><a href="#ref3">Extracting the Title of the Article</a></li>
# <li><a href="#ref4"> Displaying the Album Cover</a></li>
#
#
# </ol>
# <br>
# <p></p>
# Estimated Time Needed: <strong>60 min</strong>
# </div>
#
# <hr>
#
# Inspiration: [Fake Album Covers](https://fakealbumcovers.com/)
# #### Import libraries
#
# In[8]:
from IPython.display import Image as IPythonImage
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
# #### Helper function to superimpose text on image
#
# In[4]:
def display_cover(top,bottom ):
"""This fucntoin
"""
import requests
name='album_art_raw.png'
# Now let's make get an album cover.
# https://picsum.photos/ is a free service that offers random images.
# Let's get a random image:
album_art_raw = requests.get('https://picsum.photos/500/500/?random')
# and save it as 'album_art_raw.png'
with open(name,'wb') as album_art_raw_file:
album_art_raw_file.write(album_art_raw.content)
# Now that we have our raw image, let's open it
# and write our band and album name on it
img = Image.open("album_art_raw.png")
draw = ImageDraw.Draw(img)
# We'll choose a font for our band and album title,
# run "% ls /usr/share/fonts/truetype/dejavu" in a cell to see what else is available,
# or download your own .ttf fonts!
band_name_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 25) #25pt font
album_name_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 20) # 20pt font
# the x,y coordinates for where our album name and band name text will start
# counted from the top left of the picture (in pixels)
band_x, band_y = 50, 50
album_x, album_y = 50, 400
# Our text should be visible on any image. A good way
# of accomplishing that is to use white text with a
# black border. We'll use the technique shown here to draw the border:
# https://mail.python.org/pipermail/image-sig/2009-May/005681.html
outline_color ="black"
draw.text((band_x-1, band_y-1), top, font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y-1), top, font=band_name_font, fill=outline_color)
draw.text((band_x-1, band_y+1), top, font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y+1), top, font=band_name_font, fill=outline_color)
draw.text((album_x-1, album_y-1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y-1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x-1, album_y+1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y+1), bottom , font=album_name_font, fill=outline_color)
draw.text((band_x,band_y),top,(255,255,255),font=band_name_font)
draw.text((album_x, album_y),bottom,(255,255,255),font=album_name_font)
return img
# ## 1) Learn how to use the function display_cover <a id='ref1'></a>
# The function **display_cover** selects a random image from https://picsum.photos/ and will help us superimpose two strings over the image. The parameter **top** is the string we would like to superimpose on the top of an image. The parameter bottom is the string we would like to display on the bottom of the image. The function does not return the image but returns an object of type Image from the Pillow library; the object represents a PIL image.
# In[ ]:
img=display_cover(top='top',bottom='bottom')
# To save the image, we use the method **save** . The argument is the file name of the image we would like to save in this case 'sample-out.png'
# In[ ]:
img.save('sample-out.png')
# Finely we use **IPythonImage** to read the image file and display the results.
#
# In[11]:
IPythonImage(filename='sample-out.png')
# **Question 1)** Use the **display_cover** function to display the image with the name Python on the top and Data Science on the bottom. Save the image as **'sample-out.png'**.
# In[9]:
img=display_cover(top='Python',bottom='Data Science')
# In[10]:
img.save('sample-out.png')
# ## Part 2: Loading a random page from Wikipedia <a id='ref2'></a>
# In this project, we will use the request library, we used it in the function **display_cover**, but you should import the library in the next cell.
# In[12]:
import requests
# The following is the URL to the page
# In[13]:
wikipedia_link='https://en.wikipedia.org/wiki/Special:Random'
# **Question 2)** Get Wikipedia page is converted to a string
# Use the function **get** from the **requests** library to download the Wikipedia page using the **wikipedia_link** as an argument. Assign the object to the variable **raw_random_wikipedia_page**.
# In[14]:
#hint: requests.get()
raw_random_wikipedia_page=requests.get(wikipedia_link)
# Use the data attribute **text** to extract the XML as a text file a string and assign the result variable **page**:
# In[18]:
page=raw_random_wikipedia_page.text
print(page)
# # Part 3: Extracting the Title of the Article <a id='ref3'></a>
# **Question 3 (part 1)** Use the title of the Wikipedia article as the title of the band. The title of the article is surrounded by the XML node title as follows: **<title>title - Wikipedia</title>**
# . For example, if the title of the article was Python we would see the following: **<title>Python - Wikipedia</title>**. Consider the example where the title of the article is Teenage Mutant Ninja Turtles the result would be: **<title>Teenage Mutant Ninja Turtles - Wikipedia</title>**. The first step is to find the XML node **<title>** and **</title>**indicating the start and end of the title. The string function **find** maybe helpful, you can also use libraries like **xlxml**.
# In[27]:
page.title()
# **Question 3 (part 2)** Next get rid of the term ** - Wikipedia** from the title and assign the result to the **band_title** For example you can use the function or method **strip** or **replace**.
#
#
# **Question 4)** Repeat the second and third step, to extract the title of a second Wikipedia article but use the result to **album_title**
# In[ ]:
# If you did everything correct the following cell should display the album and band name:
#
# In[ ]:
print("Your band: ", band_title)
print("Your album: ", album_title)
# ## Part 4: Displaying the Album Cover <a id='ref4'></a>
# Use the function **display_cover** to superimpose the band and album title over a random image, assign the result to the variable **album_cover **.
# **Question 5)** use the function display_cover to display the album cover with two random article titles representing the name of the band and the title of the album.
# In[29]:
album_cover=display_cover(top='Python',bottom='Data Science')
# Use the method save to save the image as **sample-out.png**:
# In[30]:
img.save('sample-out.png')
# Use the function **IPythonImage** to display the image
#
# In[31]:
IPythonImage(filename='sample-out.png')
# ### About the Authors:
# [James Reeve]( https://www.linkedin.com/in/reevejamesd/) James Reeves is a Software Engineering intern at IBM.
#
#
# [Joseph Santarcangelo]( https://www.linkedin.com/in/joseph-s-50398b136/) has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
#
# <hr>
# Copyright © 2018 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
| 0 | 0 | 0 |
5ac1a60094da7f7d24954f092d12c53c1fb20c08 | 1,915 | py | Python | src/create_feature_descriptor.py | gmaher/cmd_tools | 27f31eadca16e5fb7e4175ff3d6dd5881bfa3e27 | [
"MIT"
] | null | null | null | src/create_feature_descriptor.py | gmaher/cmd_tools | 27f31eadca16e5fb7e4175ff3d6dd5881bfa3e27 | [
"MIT"
] | null | null | null | src/create_feature_descriptor.py | gmaher/cmd_tools | 27f31eadca16e5fb7e4175ff3d6dd5881bfa3e27 | [
"MIT"
] | null | null | null | import os
import json
import argparse
import pandas as pd
from tqdm import tqdm
from dateutil.parser import parse
parser = argparse.ArgumentParser()
parser.add_argument('-input')
parser.add_argument('-output_dir')
parser.add_argument('-override_file',type=str,default="")
args = parser.parse_args()
input_ = os.path.abspath(args.input)
files = os.listdir(input_)
files = [input_ + '/' + f for f in files]
override_file = os.path.abspath(args.override_file)
with open(override_file,'r') as f:
override = json.load(f)
feature_names = override['FEATURE_NAMES']
features = {}
descriptors = {}
print("using features {}".format(feature_names))
for k in feature_names:
features[k] = []
print(k)
for f in tqdm(files):
with open(f,'r') as record:
r = json.load(record)
if k in r: features[k].append(r[k])
vals = list(set(features[k]))
is_float = any([type(v) == float for v in vals])
is_string = any([type(v) == str for v in vals if not v == ""])
is_int = any([type(v) == int for v in vals])
is_date = any([is_date_func(v) for v in vals])
if is_date:
descriptors[k] = {"type":"date"}
elif is_string:
descriptors[k] = {"type":"categorical", "values":vals}
elif is_float:
descriptors[k] = {"type":"number"}
elif is_int:
if len(vals) <= args.max_int_categories:
descriptors[k] = {"type":"categorical", "values":vals}
else:
descriptors[k] = {"type":"number"}
else:
print("could not recognize feature {}".format(k))
for k in override.keys():
if not k == "FEATURE_NAMES":
descriptors[k] = override[k]
with open(args.output_dir+'/feature_descriptor.json','w') as f:
json.dump(descriptors, f, indent=2, sort_keys=True)
| 25.197368 | 66 | 0.627676 | import os
import json
import argparse
import pandas as pd
from tqdm import tqdm
from dateutil.parser import parse
def is_date_func(string):
try:
parse(string)
return True
except:
return False
parser = argparse.ArgumentParser()
parser.add_argument('-input')
parser.add_argument('-output_dir')
parser.add_argument('-override_file',type=str,default="")
args = parser.parse_args()
input_ = os.path.abspath(args.input)
files = os.listdir(input_)
files = [input_ + '/' + f for f in files]
override_file = os.path.abspath(args.override_file)
with open(override_file,'r') as f:
override = json.load(f)
feature_names = override['FEATURE_NAMES']
features = {}
descriptors = {}
print("using features {}".format(feature_names))
for k in feature_names:
features[k] = []
print(k)
for f in tqdm(files):
with open(f,'r') as record:
r = json.load(record)
if k in r: features[k].append(r[k])
vals = list(set(features[k]))
is_float = any([type(v) == float for v in vals])
is_string = any([type(v) == str for v in vals if not v == ""])
is_int = any([type(v) == int for v in vals])
is_date = any([is_date_func(v) for v in vals])
if is_date:
descriptors[k] = {"type":"date"}
elif is_string:
descriptors[k] = {"type":"categorical", "values":vals}
elif is_float:
descriptors[k] = {"type":"number"}
elif is_int:
if len(vals) <= args.max_int_categories:
descriptors[k] = {"type":"categorical", "values":vals}
else:
descriptors[k] = {"type":"number"}
else:
print("could not recognize feature {}".format(k))
for k in override.keys():
if not k == "FEATURE_NAMES":
descriptors[k] = override[k]
with open(args.output_dir+'/feature_descriptor.json','w') as f:
json.dump(descriptors, f, indent=2, sort_keys=True)
| 88 | 0 | 23 |
bfaf623c08a95eaf936f8bdf7d57dc7654bcf392 | 1,425 | py | Python | src/utils.py | uvipen/QuickDraw-AirGesture-tensorflow | 377f3344e37496306d12c753794b06ddca84c3f3 | [
"MIT"
] | 94 | 2021-07-12T13:02:40.000Z | 2022-02-15T10:48:57.000Z | src/utils.py | haoict/QuickDraw-AirGesture-tensorflow | 3e11cf12a08d3ebf012d20ff0ebb44afdfb17bad | [
"MIT"
] | null | null | null | src/utils.py | haoict/QuickDraw-AirGesture-tensorflow | 3e11cf12a08d3ebf012d20ff0ebb44afdfb17bad | [
"MIT"
] | 24 | 2021-07-12T13:02:03.000Z | 2021-12-06T09:42:45.000Z | """
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import cv2
import numpy as np
from collections import OrderedDict
# https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/image_classification/quickdraw_labels.txt
# Rule: key of category = index -1, with index from the link above
CLASS_IDS = OrderedDict()
CLASS_IDS[8] = "apple"
CLASS_IDS[35] = "book"
CLASS_IDS[38] = "bowtie"
CLASS_IDS[58] = "candle"
CLASS_IDS[74] = "cloud"
CLASS_IDS[87] = "cup"
CLASS_IDS[94] = "door"
CLASS_IDS[104] = "envelope"
CLASS_IDS[107] = "eyeglasses"
CLASS_IDS[136] = "hammer"
CLASS_IDS[139] = "hat"
CLASS_IDS[156] = "ice cream"
CLASS_IDS[167] = "leaf"
CLASS_IDS[252] = "scissors"
CLASS_IDS[283] = "star"
CLASS_IDS[301] = "t-shirt"
CLASS_IDS[209] = "pants"
CLASS_IDS[323] = "tree"
| 29.6875 | 114 | 0.698246 | """
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import cv2
import numpy as np
from collections import OrderedDict
# https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/image_classification/quickdraw_labels.txt
# Rule: key of category = index -1, with index from the link above
CLASS_IDS = OrderedDict()
CLASS_IDS[8] = "apple"
CLASS_IDS[35] = "book"
CLASS_IDS[38] = "bowtie"
CLASS_IDS[58] = "candle"
CLASS_IDS[74] = "cloud"
CLASS_IDS[87] = "cup"
CLASS_IDS[94] = "door"
CLASS_IDS[104] = "envelope"
CLASS_IDS[107] = "eyeglasses"
CLASS_IDS[136] = "hammer"
CLASS_IDS[139] = "hat"
CLASS_IDS[156] = "ice cream"
CLASS_IDS[167] = "leaf"
CLASS_IDS[252] = "scissors"
CLASS_IDS[283] = "star"
CLASS_IDS[301] = "t-shirt"
CLASS_IDS[209] = "pants"
CLASS_IDS[323] = "tree"
def get_images(path, classes):
images = [cv2.imread("{}/{}.png".format(path, item), cv2.IMREAD_UNCHANGED) for item in classes]
return images
def get_overlay(bg_image, fg_image, sizes=(40, 40)):
fg_image = cv2.resize(fg_image, sizes)
fg_mask = fg_image[:, :, 3:]
fg_image = fg_image[:, :, :3]
bg_mask = 255 - fg_mask
bg_image = bg_image / 255
fg_image = fg_image / 255
fg_mask = cv2.cvtColor(fg_mask, cv2.COLOR_GRAY2BGR) / 255
bg_mask = cv2.cvtColor(bg_mask, cv2.COLOR_GRAY2BGR) / 255
image = cv2.addWeighted(bg_image * bg_mask, 255, fg_image * fg_mask, 255, 0.).astype(np.uint8)
return image
| 596 | 0 | 46 |
a4c89ea15ba64b8b6f264c35fcaeb76de5ace39c | 6,496 | py | Python | reinvent-2019/connected-photo-booth/py_client/config.py | chriscoombs/aws-builders-fair-projects | eee405931030b833fa8c51e906c73d09ce051bcd | [
"Apache-2.0"
] | null | null | null | reinvent-2019/connected-photo-booth/py_client/config.py | chriscoombs/aws-builders-fair-projects | eee405931030b833fa8c51e906c73d09ce051bcd | [
"Apache-2.0"
] | null | null | null | reinvent-2019/connected-photo-booth/py_client/config.py | chriscoombs/aws-builders-fair-projects | eee405931030b833fa8c51e906c73d09ce051bcd | [
"Apache-2.0"
] | null | null | null | import boto3
import botocore
import os
import glob
import json
import requests
from datetime import datetime
from time import sleep
from time import gmtime, strftime
import sys, getopt
import argparse
import subprocess
from shutil import copyfile, rmtree
import logging
import configparser
__CONFIG_FILE_PATH__ = "cerebro.config"
__SSM_BASE_PATH__ = "/Cerebro"
| 26.406504 | 114 | 0.671952 | import boto3
import botocore
import os
import glob
import json
import requests
from datetime import datetime
from time import sleep
from time import gmtime, strftime
import sys, getopt
import argparse
import subprocess
from shutil import copyfile, rmtree
import logging
import configparser
__CONFIG_FILE_PATH__ = "cerebro.config"
__SSM_BASE_PATH__ = "/Cerebro"
class Configuration(object):
def __init__(self,config_file=__CONFIG_FILE_PATH__):
self.config_file = config_file
self.config_parser = configparser.ConfigParser()
self.config_parser.read(self.config_file)
self.get_config_entries()
self._ssm = boto3.client('ssm')
def get_config_entries(self):
self.config_entries = {}
for section in self.config_parser.sections():
#print("Section: %s" % section)
for item in self.config_parser.items(section):
#print("Item: ")
#print(item)
#print(item[0], item[1])
param_name = item[0].upper()
param_value = "%s/%s/%s" % (__SSM_BASE_PATH__, section, item[1])
param_dict = {param_name:param_value}
#print(param_dict)
self.config_entries.update(param_dict)
return True
def getConfig(self, configEntry):
if configEntry not in self.config_entries:
return None
ssm_param_name = self.config_entries[configEntry]
#print(ssm_param_name)
response = self._ssm.get_parameter(
Name=ssm_param_name
)
#print(response)
if ("Parameter" in response) and ("Name" in response["Parameter"]) and ("Value" in response["Parameter"]):
ssm_param_value = response["Parameter"]["Value"]
#print(ssm_param_value)
else:
return None
return ssm_param_value
'''
config_entry = self.config_parser.get("Cerebro", configEntry)
print(config_entry)
if "ssm:" in config_entry:
# then this means that we need to retrieve the actual value from the SSM Param store
config_entry = "foobar"
return config_entry
'''
@property
def __QUEUE_URL__(self):
return self.getConfig("__QUEUE_URL__")
@property
def __SQS_QUEUE_NAME__(self):
return self.getConfig("__SQS_QUEUE_NAME__")
@property
def __SQS_BACKEND_QUEUE__(self):
return self.getConfig("__SQS_BACKEND_QUEUE__")
@property
def __APIGW_X_API_KEY__(self):
return self.getConfig("__APIGW_X_API_KEY__")
@property
def __APIGW_X_API_KEY_QR_CODE__(self):
return self.getConfig("__APIGW_X_API_KEY_QR_CODE__")
@property
def __APIGW_API__(self):
return self.getConfig("__APIGW_API__")
@property
def __APIGW_API_QR_CODE__(self):
return self.getConfig("__APIGW_API_QR_CODE__")
@property
def __S3_BUCKET__(self):
return self.getConfig("__S3_BUCKET__")
@property
def __CEREBRO_TEMP_DIR__(self):
return self.getConfig("__CEREBRO_TEMP_DIR__")
@property
def __CEREBRO_MEDIA_DIR__(self):
return self.getConfig("__CEREBRO_MEDIA_DIR__")
@property
def __CEREBRO_LOGS_DIR__(self):
return self.getConfig("__CEREBRO_LOGS_DIR__")
@property
def __CEREBRO_PROFILES_DIR__(self):
return self.getConfig("__CEREBRO_PROFILES_DIR__")
@property
def __CEREBRO_SYSTEM_DIR__(self):
return self.getConfig("__CEREBRO_SYSTEM_DIR__")
@property
def __IMAGE_MAX_COUNT__(self):
return int(self.getConfig("__IMAGE_MAX_COUNT__"))
@property
def __GREEN_LED__(self):
return int(self.getConfig("__GREEN_LED__"))
@property
def __GREEN_BUTTON__(self):
return int(self.getConfig("__GREEN_BUTTON__"))
@property
def __YELLOW_LED__(self):
return int(self.getConfig("__YELLOW_LED__"))
@property
def __YELLOW_BUTTON__(self):
return int(self.getConfig("__YELLOW_BUTTON__"))
@property
def __IOT_TOPIC__(self):
return self.getConfig("__IOT_TOPIC__")
@property
def __IOT_HOST__(self):
return self.getConfig("__IOT_HOST__")
@property
def __IOT_ROOT_CA_PATH__(self):
return self.getConfig("__IOT_ROOT_CA_PATH__")
@property
def __IOT_CERTIFICATE_PATH__(self):
return self.getConfig("__IOT_CERTIFICATE_PATH__")
@property
def __IOT_PRIVATE_KEY_PATH__(self):
return self.getConfig("__IOT_PRIVATE_KEY_PATH__")
@property
def __IOT_CLIENT_ID_REQUESTER__(self):
return self.getConfig("__IOT_CLIENT_ID_REQUESTER__")
@property
def __IOT_CLIENT_ID_PROCESSOR__(self):
return self.getConfig("__IOT_CLIENT_ID_PROCESSOR__")
@property
def __CEREBRO_AUDIO_DIR__(self):
return self.getConfig("__CEREBRO_AUDIO_DIR__")
@property
def __PUSHBUTTON_DELAY__(self):
return int(self.getConfig("__PUSHBUTTON_DELAY__"))
@property
def __S3_BUCKET__(self):
return self.getConfig("__S3_BUCKET__")
@property
def __ACCEPT_INPUT__(self):
return int(self.getConfig("__ACCEPT_INPUT__"))
@property
def __CHOOSE_AGAIN__(self):
return int(self.getConfig("__CHOOSE_AGAIN__"))
@property
def __CADENCE__(self):
return int(self.getConfig("__CADENCE__"))
@property
def __DDB_TABLE__(self):
return self.getConfig("__DDB_TABLE__")
@property
def __PRINTER_TYPE__(self):
return self.getConfig("__PRINTER_TYPE__")
@property
def __FILTERED_IMAGE_NAME__(self):
return self.getConfig("__FILTERED_IMAGE_NAME__")
@property
def __PIG_NOSE_FILTER__(self):
return self.getConfig("__PIG_NOSE_FILTER__")
@property
def __FLOWER_CROWN_FILTER__(self):
return self.getConfig("__FLOWER_CROWN_FILTER__")
@property
def __EYE_MASK_FILTER__(self):
return self.getConfig("__EYE_MASK_FILTER__")
@property
def __DOG_NOSE_FILTER__(self):
return self.getConfig("__DOG_NOSE_FILTER__")
@property
def __DOG_LEFT_EAR_FILTER__(self):
return self.getConfig("__DOG_LEFT_EAR_FILTER__")
@property
def __DOG_RIGHT_EAR_FILTER__(self):
return self.getConfig("__DOG_RIGHT_EAR_FILTER__")
@property
def __DOG_TONGUE_FILTER__(self):
return self.getConfig("__DOG_TONGUE_FILTER__")
| 4,326 | 1,776 | 23 |
56e7ca0fc7489f5c223fc37faf7817929b1b8643 | 2,240 | py | Python | convert_hdf52recordio.py | Helmholtz-AI-Energy/mlperf-deepcam | d4869bce18029cc9877d7ed04178d6e4ca73a411 | [
"MIT"
] | 3 | 2021-11-18T20:01:35.000Z | 2021-12-17T17:47:23.000Z | convert_hdf52recordio.py | Helmholtz-AI-Energy/mlperf-deepcam | d4869bce18029cc9877d7ed04178d6e4ca73a411 | [
"MIT"
] | 1 | 2022-03-16T07:29:30.000Z | 2022-03-31T10:19:07.000Z | convert_hdf52recordio.py | Helmholtz-AI-Energy/mlperf-deepcam | d4869bce18029cc9877d7ed04178d6e4ca73a411 | [
"MIT"
] | 1 | 2021-11-18T01:53:25.000Z | 2021-11-18T01:53:25.000Z | import os
import sys
import glob
import h5py as h5
import numpy as np
import math
import argparse as ap
import mxnet as mx
from mpi4py import MPI
if __name__ == "__main__":
AP = ap.ArgumentParser()
AP.add_argument("--input_directory", type=str, help="Directory with input files", required = True)
AP.add_argument("--output_directory", type=str, help="Directory for output files", required = True)
AP.add_argument("--num_files", type=int, default=None, help="Maximum number of files to convert")
pargs = AP.parse_args()
main(pargs)
| 29.473684 | 131 | 0.647768 | import os
import sys
import glob
import h5py as h5
import numpy as np
import math
import argparse as ap
import mxnet as mx
from mpi4py import MPI
def filter_func(item, lst):
item = os.path.basename(item).replace(".h5", ".npy")
return item not in lst
def read(ifname):
with h5.File(ifname, 'r') as f:
data = f["climate/data"][...]
label = f["climate/labels_0"][...]
return data, label
def main(args):
# get rank
comm = MPI.COMM_WORLD.Dup()
comm_rank = comm.Get_rank()
comm_size = comm.Get_size()
# get input files
inputfiles_all = glob.glob(os.path.join(args.input_directory, "*.h5"))
# select just a few files
if pargs.num_files is not None:
num_files = max([min([len(inputfiles_all), pargs.num_files]), 0])
inputfiles_all = inputfiles_all[:num_files]
# create output dir
output_dir = pargs.output_directory
if not os.path.isdir(output_dir):
os.makedirs(output_dir, exist_ok=True)
# create recordio files
data_record = mx.recordio.MXIndexedRecordIO(os.path.join(output_dir, 'data.idx'), os.path.join(output_dir, 'data.rec'), 'w')
label_record = mx.recordio.MXIndexedRecordIO(os.path.join(output_dir, 'label.idx'), os.path.join(output_dir, 'label.rec'), 'w')
for idx, filename in enumerate(inputfiles_all):
# read file
data, label = read(filename)
# create header
header = mx.recordio.IRHeader(0, 0., idx, 0)
# pack
data_packed = mx.recordio.pack(header, data.tobytes())
label_packed = mx.recordio.pack(header, label.tobytes())
# write:
data_record.write_idx(idx, data_packed)
label_record.write_idx(idx, label_packed)
# wait for the others
comm.barrier()
if __name__ == "__main__":
AP = ap.ArgumentParser()
AP.add_argument("--input_directory", type=str, help="Directory with input files", required = True)
AP.add_argument("--output_directory", type=str, help="Directory for output files", required = True)
AP.add_argument("--num_files", type=int, default=None, help="Maximum number of files to convert")
pargs = AP.parse_args()
main(pargs)
| 1,594 | 0 | 69 |
ea7ea6e4956da5520f3970ca5834c2b81388da01 | 754 | py | Python | setup.py | nkennek/pytorch-cnn-visualizations | 54699710b1beae1edae4bc12e9403080191c40ed | [
"MIT"
] | null | null | null | setup.py | nkennek/pytorch-cnn-visualizations | 54699710b1beae1edae4bc12e9403080191c40ed | [
"MIT"
] | null | null | null | setup.py | nkennek/pytorch-cnn-visualizations | 54699710b1beae1edae4bc12e9403080191c40ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='pytorch-cnn-visualization',
version='0.0',
description='pytorch implementation of CNN visualization techniques',
packages=find_packages(),
include_package_data=True,
install_requires=[
'numpy==1.14.5',
'opencv-python==3.4.1.15',
'torch==0.4.0',
'torchvision==0.2.1',
],
extras_require={
'dev': [
'matplotlib',
'ipdb',
'flake8',
'pylint',
'pep8',
'mypy',
'pytest',
'pytest-asyncio'
],
'test': [
'pytest',
'pytest-asyncio'
],
},
)
| 21.542857 | 73 | 0.485411 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='pytorch-cnn-visualization',
version='0.0',
description='pytorch implementation of CNN visualization techniques',
packages=find_packages(),
include_package_data=True,
install_requires=[
'numpy==1.14.5',
'opencv-python==3.4.1.15',
'torch==0.4.0',
'torchvision==0.2.1',
],
extras_require={
'dev': [
'matplotlib',
'ipdb',
'flake8',
'pylint',
'pep8',
'mypy',
'pytest',
'pytest-asyncio'
],
'test': [
'pytest',
'pytest-asyncio'
],
},
)
| 0 | 0 | 0 |
eef9139890c2b1751504590b390a2fe9c136409e | 3,840 | py | Python | replication_handler/models/mysql_dumps.py | ywlianghang/mysql_streamer | 7fc85efaca3db6a387ea4b791632c2df2d04cb3e | [
"Apache-2.0"
] | 419 | 2016-11-17T18:41:47.000Z | 2022-03-14T02:50:02.000Z | replication_handler/models/mysql_dumps.py | ywlianghang/mysql_streamer | 7fc85efaca3db6a387ea4b791632c2df2d04cb3e | [
"Apache-2.0"
] | 19 | 2016-11-30T18:09:00.000Z | 2019-04-02T06:20:02.000Z | replication_handler/models/mysql_dumps.py | ywlianghang/mysql_streamer | 7fc85efaca3db6a387ea4b791632c2df2d04cb3e | [
"Apache-2.0"
] | 90 | 2016-11-23T06:26:20.000Z | 2022-01-22T09:24:42.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import logging
from sqlalchemy import Column
from sqlalchemy import exists
from sqlalchemy import String
from sqlalchemy import UnicodeText
from replication_handler.models.database import Base
logger = logging.getLogger('replication_handler.models.mysql_dumps')
| 33.982301 | 81 | 0.654167 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import logging
from sqlalchemy import Column
from sqlalchemy import exists
from sqlalchemy import String
from sqlalchemy import UnicodeText
from replication_handler.models.database import Base
logger = logging.getLogger('replication_handler.models.mysql_dumps')
class DumpUnavailableError(Exception):
def __init__(self, cluster_name):
Exception.__init__(self, "MySQL Dump unavailable for cluster {c}".format(
c=cluster_name
))
class MySQLDumps(Base):
__tablename__ = 'mysql_dumps'
database_dump = Column(UnicodeText, nullable=False)
cluster_name = Column(String, primary_key=True)
@classmethod
def get_latest_mysql_dump(cls, session, cluster_name):
logger.info("Retrieving the latest MySQL dump for cluster {c}".format(
c=cluster_name
))
with session.connect_begin(ro=True) as s:
ret = s.query(
MySQLDumps
).filter(
MySQLDumps.cluster_name == cluster_name
).first()
latest_dump = copy.copy(ret)
logger.info("Fetched the latest MySQL dump")
try:
return latest_dump.database_dump
except AttributeError:
raise DumpUnavailableError(cluster_name=cluster_name)
@classmethod
def dump_exists(cls, session, cluster_name):
logger.info("Checking for MySQL dump for cluster {c}".format(
c=cluster_name
))
with session.connect_begin(ro=True) as s:
mysql_dump_exists = s.query(
exists().where(
MySQLDumps.cluster_name == cluster_name
)
).scalar()
logger.info("MySQL dump exists") if mysql_dump_exists else \
logger.info("MySQL dump doesn't exist")
return mysql_dump_exists
@classmethod
def update_mysql_dump(cls, session, database_dump, cluster_name):
logger.info("Replacing MySQL dump for cluster {c}".format(
c=cluster_name
))
with session.connect_begin(ro=False) as s:
s.query(MySQLDumps).filter(
MySQLDumps.cluster_name == cluster_name
).delete()
new_dump = MySQLDumps()
new_dump.database_dump = database_dump
new_dump.cluster_name = cluster_name
s.add(new_dump)
logger.info("Replaced the old MySQL dump with new one")
return new_dump
@classmethod
def delete_mysql_dump(cls, session, cluster_name):
logger.info("Deleting the existing database dump for cluster {c}".format(
c=cluster_name
))
with session.connect_begin(ro=False) as s:
s.query(MySQLDumps).filter(
MySQLDumps.cluster_name == cluster_name
).delete()
@classmethod
def delete_mysql_dump_with_active_session(cls, session, cluster_name):
logger.info("Deleting the existing database dump for cluster {c}".format(
c=cluster_name
))
session.query(MySQLDumps).filter(
MySQLDumps.cluster_name == cluster_name
).delete()
| 2,428 | 382 | 72 |
38a4e46d7b35ffafb2edb463735f6e74a3e69b52 | 12,614 | py | Python | mordred/_base/calculator.py | zhengfj1994/mordred | 2848b088fd7b6735590242b5e22573babc724f10 | [
"BSD-3-Clause"
] | 1 | 2019-09-12T03:38:47.000Z | 2019-09-12T03:38:47.000Z | mordred/_base/calculator.py | zhengfj1994/mordred | 2848b088fd7b6735590242b5e22573babc724f10 | [
"BSD-3-Clause"
] | null | null | null | mordred/_base/calculator.py | zhengfj1994/mordred | 2848b088fd7b6735590242b5e22573babc724f10 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import sys
import warnings
from types import ModuleType
from contextlib import contextmanager
from multiprocessing import cpu_count
from distutils.version import StrictVersion
from .result import Result
from .._util import Capture, DummyBar
from ..error import Error, Missing, MultipleFragments, DuplicatedDescriptorName
from .context import Context
from .._version import __version__
from .descriptor import Descriptor, MissingValueException, is_descriptor_class
try:
from tqdm import tqdm
from .._util import NotebookWrapper
except ImportError:
tqdm = NotebookWrapper = DummyBar
class Calculator(object):
r"""descriptor calculator.
Parameters:
descs: see Calculator.register() method
ignore_3D: see Calculator.register() method
"""
__slots__ = (
"_descriptors", "_name_dict", "_explicit_hydrogens", "_kekulizes", "_require_3D",
"_cache", "_debug", "_progress_bar",
)
@classmethod
def from_json(cls, obj):
"""Create Calculator from json descriptor objects.
Parameters:
obj(list or dict): descriptors to register
Returns:
Calculator: calculator
"""
calc = cls()
calc.register_json(obj)
return calc
def register_json(self, obj):
"""Register Descriptors from json descriptor objects.
Parameters:
obj(list or dict): descriptors to register
"""
if not isinstance(obj, list):
obj = [obj]
self.register(Descriptor.from_json(j) for j in obj)
def to_json(self):
"""Convert descriptors to json serializable data.
Returns:
list: descriptors
"""
return [d.to_json() for d in self.descriptors]
@property
def descriptors(self):
r"""All descriptors.
you can get/set/delete descriptor.
Returns:
tuple[Descriptor]: registered descriptors
"""
return tuple(self._descriptors)
@descriptors.setter
@descriptors.deleter
def register(self, desc, version=None, ignore_3D=False):
r"""Register descriptors.
Descriptor-like:
* Descriptor instance: self
* Descriptor class: use Descriptor.preset() method
* module: use Descriptor-likes in module
* Iterable: use Descriptor-likes in Iterable
Parameters:
desc(Descriptor-like): descriptors to register
version(str): version
ignore_3D(bool): ignore 3D descriptors
"""
if version is None:
version = __version__
version = StrictVersion(version)
return self._register(desc, version, ignore_3D)
def __call__(self, mol, id=-1):
r"""Calculate descriptors.
:type mol: rdkit.Chem.Mol
:param mol: molecular
:type id: int
:param id: conformer id
:rtype: Result[scalar or Error]
:returns: iterator of descriptor and value
"""
return self._wrap_result(
mol,
self._calculate(Context.from_calculator(self, mol, id)),
)
@contextmanager
def echo(self, s, file=sys.stdout, end="\n"):
"""Output message.
Parameters:
s(str): message to output
file(file-like): output to
end(str): end mark of message
Return:
None
"""
p = getattr(self, "_progress_bar", None)
if p is not None:
p.write(s, file=file, end="\n")
return
print(s, file=file, end="\n") # noqa: T003
def map(self, mols, nproc=None, nmols=None, quiet=False, ipynb=False, id=-1):
r"""Calculate descriptors over mols.
Parameters:
mols(Iterable[rdkit.Mol]): moleculars
nproc(int): number of process to use. default: multiprocessing.cpu_count()
nmols(int): number of all mols to use in progress-bar. default: mols.__len__()
quiet(bool): don't show progress bar. default: False
ipynb(bool): use ipython notebook progress bar. default: False
id(int): conformer id to use. default: -1.
Returns:
Iterator[Result[scalar]]
"""
if nproc is None:
nproc = cpu_count()
if hasattr(mols, "__len__"):
nmols = len(mols)
if nproc == 1:
return self._serial(mols, nmols=nmols, quiet=quiet, ipynb=ipynb, id=id)
else:
return self._parallel(mols, nproc, nmols=nmols, quiet=quiet, ipynb=ipynb, id=id)
def pandas(self, mols, nproc=None, nmols=None, quiet=False, ipynb=False, id=-1):
r"""Calculate descriptors over mols.
Returns:
pandas.DataFrame
"""
from .pandas_module import MordredDataFrame, Series
if isinstance(mols, Series):
index = mols.index
else:
index = None
return MordredDataFrame(
(list(r) for r in self.map(mols, nproc, nmols, quiet, ipynb, id)),
columns=[str(d) for d in self.descriptors],
index=index,
)
def get_descriptors_from_module(mdl, submodule=False):
r"""[DEPRECATED] Get descriptors from module.
Parameters:
mdl(module): module to search
Returns:
[Descriptor]
"""
warnings.warn("use get_descriptors_in_module", DeprecationWarning)
__all__ = getattr(mdl, "__all__", None)
if __all__ is None:
__all__ = dir(mdl)
all_functions = (getattr(mdl, name) for name in __all__ if name[:1] != "_")
if submodule:
descs = [
d
for fn in all_functions
if is_descriptor_class(fn) or isinstance(fn, ModuleType)
for d in (
[fn] if is_descriptor_class(fn)
else get_descriptors_from_module(fn, submodule=True)
)
]
else:
descs = [
fn
for fn in all_functions
if is_descriptor_class(fn)
]
return descs
def get_descriptors_in_module(mdl, submodule=True):
r"""Get descriptors in module.
Parameters:
mdl(module): module to search
submodule(bool): search recursively
Returns:
Iterator[Descriptor]
"""
__all__ = getattr(mdl, "__all__", None)
if __all__ is None:
__all__ = dir(mdl)
all_values = (getattr(mdl, name) for name in __all__ if name[:1] != "_")
if submodule:
for v in all_values:
if is_descriptor_class(v):
yield v
if isinstance(v, ModuleType):
for v in get_descriptors_in_module(v, submodule=True):
yield v
else:
for v in all_values:
if is_descriptor_class(v):
yield v
| 27.968958 | 99 | 0.570002 | from __future__ import print_function
import sys
import warnings
from types import ModuleType
from contextlib import contextmanager
from multiprocessing import cpu_count
from distutils.version import StrictVersion
from .result import Result
from .._util import Capture, DummyBar
from ..error import Error, Missing, MultipleFragments, DuplicatedDescriptorName
from .context import Context
from .._version import __version__
from .descriptor import Descriptor, MissingValueException, is_descriptor_class
try:
from tqdm import tqdm
from .._util import NotebookWrapper
except ImportError:
tqdm = NotebookWrapper = DummyBar
class Calculator(object):
r"""descriptor calculator.
Parameters:
descs: see Calculator.register() method
ignore_3D: see Calculator.register() method
"""
__slots__ = (
"_descriptors", "_name_dict", "_explicit_hydrogens", "_kekulizes", "_require_3D",
"_cache", "_debug", "_progress_bar",
)
def __setstate__(self, dict):
ds = self._descriptors = dict.get("_descriptors", [])
self._name_dict = {str(d): d for d in ds}
self._explicit_hydrogens = dict.get("_explicit_hydrogens", {True, False})
self._kekulizes = dict.get("_kekulizes", {True, False})
self._require_3D = dict.get("_require_3D", False)
@classmethod
def from_json(cls, obj):
"""Create Calculator from json descriptor objects.
Parameters:
obj(list or dict): descriptors to register
Returns:
Calculator: calculator
"""
calc = cls()
calc.register_json(obj)
return calc
def register_json(self, obj):
"""Register Descriptors from json descriptor objects.
Parameters:
obj(list or dict): descriptors to register
"""
if not isinstance(obj, list):
obj = [obj]
self.register(Descriptor.from_json(j) for j in obj)
def to_json(self):
"""Convert descriptors to json serializable data.
Returns:
list: descriptors
"""
return [d.to_json() for d in self.descriptors]
def __reduce_ex__(self, version):
return self.__class__, (), {
"_descriptors": self._descriptors,
"_explicit_hydrogens": self._explicit_hydrogens,
"_kekulizes": self._kekulizes,
"_require_3D": self._require_3D,
}
def __getitem__(self, key):
return self._name_dict[key]
def __init__(self, descs=None, version=None, ignore_3D=False):
if descs is None:
descs = []
self._descriptors = []
self._name_dict = {}
self._explicit_hydrogens = set()
self._kekulizes = set()
self._require_3D = False
self._debug = False
self.register(descs, version=version, ignore_3D=ignore_3D)
@property
def descriptors(self):
r"""All descriptors.
you can get/set/delete descriptor.
Returns:
tuple[Descriptor]: registered descriptors
"""
return tuple(self._descriptors)
@descriptors.setter
def descriptors(self, descs):
del self.descriptors
self.register(descs)
@descriptors.deleter
def descriptors(self):
self._descriptors = []
self._name_dict = {}
self._explicit_hydrogens.clear()
self._kekulizes.clear()
self._require_3D = False
def __len__(self):
return len(self._descriptors)
def _register_one(self, desc, check_only=False, ignore_3D=False):
if not isinstance(desc, Descriptor):
raise ValueError("{!r} is not descriptor".format(desc))
if ignore_3D and desc.require_3D:
return
self._explicit_hydrogens.add(bool(desc.explicit_hydrogens))
self._kekulizes.add(bool(desc.kekulize))
self._require_3D |= desc.require_3D
for dep in (desc.dependencies() or {}).values():
if isinstance(dep, Descriptor):
self._register_one(dep, check_only=True)
if not check_only:
sdesc = str(desc)
old = self._name_dict.get(sdesc)
if old is not None:
raise DuplicatedDescriptorName(desc, old)
self._name_dict[sdesc] = desc
self._descriptors.append(desc)
def register(self, desc, version=None, ignore_3D=False):
r"""Register descriptors.
Descriptor-like:
* Descriptor instance: self
* Descriptor class: use Descriptor.preset() method
* module: use Descriptor-likes in module
* Iterable: use Descriptor-likes in Iterable
Parameters:
desc(Descriptor-like): descriptors to register
version(str): version
ignore_3D(bool): ignore 3D descriptors
"""
if version is None:
version = __version__
version = StrictVersion(version)
return self._register(desc, version, ignore_3D)
def _register(self, desc, version, ignore_3D):
if not hasattr(desc, "__iter__"):
if is_descriptor_class(desc):
if desc.since > version:
return
for d in desc.preset(version=version):
self._register_one(d, ignore_3D=ignore_3D)
elif isinstance(desc, ModuleType):
self._register(
get_descriptors_in_module(desc),
version=version,
ignore_3D=ignore_3D,
)
else:
self._register_one(desc, ignore_3D=ignore_3D)
else:
for d in desc:
self._register(d, version=version, ignore_3D=ignore_3D)
def _calculate_one(self, cxt, desc, reset):
if desc in self._cache:
return self._cache[desc]
if reset:
cxt.reset()
desc._context = cxt
cxt.add_stack(desc)
if desc.require_connected and desc._context.n_frags != 1:
return False, Missing(MultipleFragments(), desc._context.get_stack())
args = {}
for name, dep in (desc.dependencies() or {}).items():
if dep is None:
args[name] = None
else:
ok, r = self._calculate_one(cxt, dep, False)
if ok:
args[name] = r
else:
return False, r
ok = False
try:
r = desc.calculate(**args)
if self._debug:
self._check_rtype(desc, r)
ok = True
except MissingValueException as e:
r = Missing(e.error, desc._context.get_stack())
except Exception as e:
r = Error(e, desc._context.get_stack())
self._cache[desc] = ok, r
return ok, r
def _check_rtype(self, desc, result):
if desc.rtype is None:
return
if isinstance(result, Error):
return
if not isinstance(result, desc.rtype):
raise TypeError("{} not match {}".format(result, desc.rtype))
def _calculate(self, cxt):
self._cache = {}
for desc in self.descriptors:
_, r = self._calculate_one(cxt, desc, True)
yield r
def __call__(self, mol, id=-1):
r"""Calculate descriptors.
:type mol: rdkit.Chem.Mol
:param mol: molecular
:type id: int
:param id: conformer id
:rtype: Result[scalar or Error]
:returns: iterator of descriptor and value
"""
return self._wrap_result(
mol,
self._calculate(Context.from_calculator(self, mol, id)),
)
def _wrap_result(self, mol, r):
return Result(mol, r, self._descriptors)
def _serial(self, mols, nmols, quiet, ipynb, id):
with self._progress(quiet, nmols, ipynb) as bar:
for m in mols:
with Capture() as capture:
r = self._wrap_result(m, self._calculate(Context.from_calculator(self, m, id)))
for e in capture.result:
e = e.rstrip()
if not e:
continue
bar.write(e, file=capture.orig)
yield r
bar.update()
@contextmanager
def _progress(self, quiet, total, ipynb):
args = {
"dynamic_ncols": True,
"leave": True,
"total": total,
}
if quiet:
Bar = DummyBar
elif ipynb:
Bar = NotebookWrapper
else:
Bar = tqdm
try:
with Bar(**args) as self._progress_bar:
yield self._progress_bar
finally:
if hasattr(self, "_progress_bar"):
del self._progress_bar
def echo(self, s, file=sys.stdout, end="\n"):
"""Output message.
Parameters:
s(str): message to output
file(file-like): output to
end(str): end mark of message
Return:
None
"""
p = getattr(self, "_progress_bar", None)
if p is not None:
p.write(s, file=file, end="\n")
return
print(s, file=file, end="\n") # noqa: T003
def map(self, mols, nproc=None, nmols=None, quiet=False, ipynb=False, id=-1):
r"""Calculate descriptors over mols.
Parameters:
mols(Iterable[rdkit.Mol]): moleculars
nproc(int): number of process to use. default: multiprocessing.cpu_count()
nmols(int): number of all mols to use in progress-bar. default: mols.__len__()
quiet(bool): don't show progress bar. default: False
ipynb(bool): use ipython notebook progress bar. default: False
id(int): conformer id to use. default: -1.
Returns:
Iterator[Result[scalar]]
"""
if nproc is None:
nproc = cpu_count()
if hasattr(mols, "__len__"):
nmols = len(mols)
if nproc == 1:
return self._serial(mols, nmols=nmols, quiet=quiet, ipynb=ipynb, id=id)
else:
return self._parallel(mols, nproc, nmols=nmols, quiet=quiet, ipynb=ipynb, id=id)
def pandas(self, mols, nproc=None, nmols=None, quiet=False, ipynb=False, id=-1):
r"""Calculate descriptors over mols.
Returns:
pandas.DataFrame
"""
from .pandas_module import MordredDataFrame, Series
if isinstance(mols, Series):
index = mols.index
else:
index = None
return MordredDataFrame(
(list(r) for r in self.map(mols, nproc, nmols, quiet, ipynb, id)),
columns=[str(d) for d in self.descriptors],
index=index,
)
def get_descriptors_from_module(mdl, submodule=False):
r"""[DEPRECATED] Get descriptors from module.
Parameters:
mdl(module): module to search
Returns:
[Descriptor]
"""
warnings.warn("use get_descriptors_in_module", DeprecationWarning)
__all__ = getattr(mdl, "__all__", None)
if __all__ is None:
__all__ = dir(mdl)
all_functions = (getattr(mdl, name) for name in __all__ if name[:1] != "_")
if submodule:
descs = [
d
for fn in all_functions
if is_descriptor_class(fn) or isinstance(fn, ModuleType)
for d in (
[fn] if is_descriptor_class(fn)
else get_descriptors_from_module(fn, submodule=True)
)
]
else:
descs = [
fn
for fn in all_functions
if is_descriptor_class(fn)
]
return descs
def get_descriptors_in_module(mdl, submodule=True):
r"""Get descriptors in module.
Parameters:
mdl(module): module to search
submodule(bool): search recursively
Returns:
Iterator[Descriptor]
"""
__all__ = getattr(mdl, "__all__", None)
if __all__ is None:
__all__ = dir(mdl)
all_values = (getattr(mdl, name) for name in __all__ if name[:1] != "_")
if submodule:
for v in all_values:
if is_descriptor_class(v):
yield v
if isinstance(v, ModuleType):
for v in get_descriptors_in_module(v, submodule=True):
yield v
else:
for v in all_values:
if is_descriptor_class(v):
yield v
| 5,302 | 0 | 402 |
cea0480cc90d81ea261b1a8a170bb14ba568725e | 584 | py | Python | tests/type/any_type/test_equality.py | llambeau/finitio.py | 27c2799709993c6edb9d9038290792ed90a97346 | [
"0BSD"
] | 1 | 2016-02-06T17:16:22.000Z | 2016-02-06T17:16:22.000Z | tests/type/any_type/test_equality.py | llambeau/finitio.py | 27c2799709993c6edb9d9038290792ed90a97346 | [
"0BSD"
] | null | null | null | tests/type/any_type/test_equality.py | llambeau/finitio.py | 27c2799709993c6edb9d9038290792ed90a97346 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_equality
----------------------------------
Tests for the `AnyType` __eq__ method
"""
import unittest
from finitio.types import AnyType
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| 19.466667 | 73 | 0.672945 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_equality
----------------------------------
Tests for the `AnyType` __eq__ method
"""
import unittest
from finitio.types import AnyType
class TestAnyTypeEq(unittest.TestCase):
_type = AnyType()
_type2 = AnyType()
def test_it_should_apply_structural_equality(self):
self.assertEquals(self._type, self._type2)
def test_it_should_be_a_total_function_with_null_for_non_types(self):
self.assertNotEquals(self._type, 12)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| 174 | 118 | 23 |
7418a9ede9105dcc2f6808eb0451d86e1f5d6771 | 2,294 | py | Python | PARAMETERS.py | sk-stm/banana_project | 56423e0b516297652eb402a4a70559b2afd8c8a1 | [
"MIT"
] | null | null | null | PARAMETERS.py | sk-stm/banana_project | 56423e0b516297652eb402a4a70559b2afd8c8a1 | [
"MIT"
] | null | null | null | PARAMETERS.py | sk-stm/banana_project | 56423e0b516297652eb402a4a70559b2afd8c8a1 | [
"MIT"
] | null | null | null | # # DQN agent
# # agent hyper parameters
# N_EPISODES = 2000 # how many episodes to train
# MAX_T = 10000 # maximum steps per episode
# EPS_START = 1.0 # start values of epsilon (for epsilon greedy exploration)
# EPS_END = 0.01 # minimum value of epsilon
# EPS_DECAY = 0.995 # decay rate of epsilon new_eps = old_eps * eps_decay for each step
# GAMMA = 0.99 # discount factor
#
# # neural network hyper parameters
# TAU = 1e-3 # for soft update of target parameters
# LR = 5e-4 # learning rate
# UPDATE_EVERY = 4 # how often to update the network
# BATCH_SIZE = 64 # minibatch size
#
# # replay memory hyper parameters
# BUFFER_SIZE = int(1e4) # replay buffer size
#
# # environment hyper parameters
# STATE_SIZE = 37
# ACTION_SIZE = 4
# # DDQN agent (works after 609 episodes)
# # agent hyper parameters
# N_EPISODES = 2000 # how many episodes to train
# MAX_T = 10000 # maximum steps per episode
# EPS_START = 1.0 # start values of epsilon (for epsilon greedy exploration)
# EPS_END = 0.01 # minimum value of epsilon
# EPS_DECAY = 0.995 # decay rate of epsilon new_eps = old_eps * eps_decay for each step
# GAMMA = 0.99 # discount factor
#
# # neural network hyper parameters
# TAU = 1e-1 # for soft update of target parameters
# LR = 5e-4 # learning rate
# UPDATE_EVERY = 8 # how often to update the network
# BATCH_SIZE = 64 # minibatch size
#
# # replay memory hyper parameters
# BUFFER_SIZE = int(1e4) # replay buffer size
#
# # environment hyper parameters
# STATE_SIZE = 37
# ACTION_SIZE = 4
# DDQN agent with prioritized experience replay
# agent hyper parameters
N_EPISODES = 2000 # how many episodes to train
MAX_T = 10000 # maximum steps per episode
EPS_START = 1.0 # start values of epsilon (for epsilon greedy exploration)
EPS_END = 0.01 # minimum value of epsilon
EPS_DECAY = 0.995 # decay rate of epsilon new_eps = old_eps * eps_decay for each step
GAMMA = 0.99 # discount factor
# neural network hyper parameters
TAU = 1e-1 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 8 # how often to update the network
BATCH_SIZE = 64 # minibatch size
# replay memory hyper parameters
BUFFER_SIZE = int(1e4) # replay buffer size
PROBABILITY_EXPONENT = 0.8
# environment hyper parameters
STATE_SIZE = 37
ACTION_SIZE = 4 | 34.757576 | 88 | 0.723627 | # # DQN agent
# # agent hyper parameters
# N_EPISODES = 2000 # how many episodes to train
# MAX_T = 10000 # maximum steps per episode
# EPS_START = 1.0 # start values of epsilon (for epsilon greedy exploration)
# EPS_END = 0.01 # minimum value of epsilon
# EPS_DECAY = 0.995 # decay rate of epsilon new_eps = old_eps * eps_decay for each step
# GAMMA = 0.99 # discount factor
#
# # neural network hyper parameters
# TAU = 1e-3 # for soft update of target parameters
# LR = 5e-4 # learning rate
# UPDATE_EVERY = 4 # how often to update the network
# BATCH_SIZE = 64 # minibatch size
#
# # replay memory hyper parameters
# BUFFER_SIZE = int(1e4) # replay buffer size
#
# # environment hyper parameters
# STATE_SIZE = 37
# ACTION_SIZE = 4
# # DDQN agent (works after 609 episodes)
# # agent hyper parameters
# N_EPISODES = 2000 # how many episodes to train
# MAX_T = 10000 # maximum steps per episode
# EPS_START = 1.0 # start values of epsilon (for epsilon greedy exploration)
# EPS_END = 0.01 # minimum value of epsilon
# EPS_DECAY = 0.995 # decay rate of epsilon new_eps = old_eps * eps_decay for each step
# GAMMA = 0.99 # discount factor
#
# # neural network hyper parameters
# TAU = 1e-1 # for soft update of target parameters
# LR = 5e-4 # learning rate
# UPDATE_EVERY = 8 # how often to update the network
# BATCH_SIZE = 64 # minibatch size
#
# # replay memory hyper parameters
# BUFFER_SIZE = int(1e4) # replay buffer size
#
# # environment hyper parameters
# STATE_SIZE = 37
# ACTION_SIZE = 4
# DDQN agent with prioritized experience replay
# agent hyper parameters
N_EPISODES = 2000 # how many episodes to train
MAX_T = 10000 # maximum steps per episode
EPS_START = 1.0 # start values of epsilon (for epsilon greedy exploration)
EPS_END = 0.01 # minimum value of epsilon
EPS_DECAY = 0.995 # decay rate of epsilon new_eps = old_eps * eps_decay for each step
GAMMA = 0.99 # discount factor
# neural network hyper parameters
TAU = 1e-1 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 8 # how often to update the network
BATCH_SIZE = 64 # minibatch size
# replay memory hyper parameters
BUFFER_SIZE = int(1e4) # replay buffer size
PROBABILITY_EXPONENT = 0.8
# environment hyper parameters
STATE_SIZE = 37
ACTION_SIZE = 4 | 0 | 0 | 0 |
c08004c116c9dc2c7e5db2068d81bd9605d565f8 | 2,495 | py | Python | dataset_scripts/xml_detects_creator.py | shpotes/self-driving-car | 7329e6213c483a7695ab4e97cf16c93ce6d0b25f | [
"MIT"
] | 1 | 2019-06-02T22:27:31.000Z | 2019-06-02T22:27:31.000Z | dataset_scripts/xml_detects_creator.py | shpotes/self-driving-car | 7329e6213c483a7695ab4e97cf16c93ce6d0b25f | [
"MIT"
] | null | null | null | dataset_scripts/xml_detects_creator.py | shpotes/self-driving-car | 7329e6213c483a7695ab4e97cf16c93ce6d0b25f | [
"MIT"
] | null | null | null | import imutils
# import dlib
import cv2
import datetime
import glob
import sys
if __name__ == '__main__':
main() | 28.352273 | 105 | 0.644088 | import imutils
# import dlib
import cv2
import datetime
import glob
import sys
def main():
# construct the argument parser and parse the arguments
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
#detector = dlib.get_frontal_face_detector()
#predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
#predictor = dlib.shape_predictor("predictor.dat")
# images_folder = 'fotos_oldSpice/'
# images_folder = 'train_llavero/'
# images_folder = 'test_llavero/'
images_folder = sys.argv[1]
files = glob.glob(images_folder + "*")
print('%d images for detection' % (len(files)))
font = cv2.FONT_HERSHEY_SIMPLEX
file = open(images_folder + "training.xml","w")
file.write("<?xml version='1.0' encoding='ISO-8859-1'?>\n")
file.write("<?xml-stylesheet type='text/xsl' href='image_metadata_stylesheet.xsl'?>\n")
file.write("<dataset>\n")
file.write("<name>Training examples</name>\n")
# file.write("<comment>CPS Images.\n")
# file.write(" This images are from CPS Dataset\n")
# file.write("</comment>\n")
file.write("<images>\n")
n = len(files)
for i,f in enumerate(files):
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(f)
if image is not(None):
image = imutils.resize(image, width=700)
if i % 1 == 0:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
fromCenter = False
# Select multiple rectangles
rects = cv2.selectROIs(str(i+1) + ' of ' + str(n), image, fromCenter)
cv2.destroyAllWindows()
#rects = cv2.selectROI("Output", image, False, fromCenter)
if len(rects) > 0:
filename = str(datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")+str(i))+".jpg"
cv2.imwrite(images_folder + str(filename),image)
file.write(" <image file='"+str(filename)+"'>\n")
# loop over the object detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
x,y,w,h = rect
#x,y,w,h = rect
file.write(" <box top='"+str(y)+"' left='"+str(x)+"' width='"+str(w)+"' height='"+str(h)+"'>\n")
# show the face number
#cv2.waitKey(10000)
file.write(" </box>\n")
file.write(" </image>\n")
file.write("</images>\n")
file.write("</dataset>\n")
if __name__ == '__main__':
main() | 2,356 | 0 | 23 |
8a95cbc9a629fdf7e574d8397ff368a4a0ca806b | 2,362 | py | Python | graph.py | lokkelvin2/dc_tts | cd6bb96904f25a3db11fc4ba30d42a49a5b2b98c | [
"Apache-2.0"
] | 25 | 2020-07-04T11:30:09.000Z | 2022-01-28T18:11:16.000Z | graph.py | lokkelvin2/dc_tts | cd6bb96904f25a3db11fc4ba30d42a49a5b2b98c | [
"Apache-2.0"
] | 5 | 2020-06-19T02:29:23.000Z | 2021-06-20T09:25:11.000Z | graph.py | lokkelvin2/dc_tts | cd6bb96904f25a3db11fc4ba30d42a49a5b2b98c | [
"Apache-2.0"
] | 4 | 2021-05-15T19:25:32.000Z | 2022-02-17T00:29:32.000Z |
from data_load import load_vocab
from hyperparams import Hyperparams as hp
from networks import TextEnc, AudioEnc, AudioDec, Attention, SSRN
import tensorflow as tf
| 39.366667 | 121 | 0.54276 |
from data_load import load_vocab
from hyperparams import Hyperparams as hp
from networks import TextEnc, AudioEnc, AudioDec, Attention, SSRN
import tensorflow as tf
class Graph:
def __init__(self, num=1, mode="train"):
'''
Args:
num: Either 1 or 2. 1 for Text2Mel 2 for SSRN.
mode: Either "train" or "synthesize".
'''
# Load vocabulary
self.char2idx, self.idx2char = load_vocab()
# Set flag
training = True if mode=="train" else False
# Graph
# Data Feeding
## L: Text. (B, N), int32
## mels: Reduced melspectrogram. (B, T/r, n_mels) float32
## mags: Magnitude. (B, T, n_fft//2+1) float32
self.L = tf.placeholder(tf.int32, shape=(None, None))
self.mels = tf.placeholder(tf.float32, shape=(None, None, hp.n_mels))
self.prev_max_attentions = tf.placeholder(tf.int32, shape=(None,))
with tf.variable_scope("Text2Mel"):
# Get S or decoder inputs. (B, T//r, n_mels)
self.S = tf.concat((tf.zeros_like(self.mels[:, :1, :]), self.mels[:, :-1, :]), 1)
# Networks
with tf.variable_scope("TextEnc"):
self.K, self.V = TextEnc(self.L, training=training) # (N, Tx, e)
with tf.variable_scope("AudioEnc"):
self.Q = AudioEnc(self.S, training=training)
with tf.variable_scope("Attention"):
# R: (B, T/r, 2d)
# alignments: (B, N, T/r)
# max_attentions: (B,)
self.R, self.alignments, self.max_attentions = Attention(self.Q, self.K, self.V,
mononotic_attention=(not training),
prev_max_attentions=self.prev_max_attentions)
with tf.variable_scope("AudioDec"):
self.Y_logits, self.Y = AudioDec(self.R, training=training) # (B, T/r, n_mels)
# During inference, the predicted melspectrogram values are fed.
with tf.variable_scope("SSRN"):
self.Z_logits, self.Z = SSRN(self.Y, training=training)
with tf.variable_scope("gs"):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
| 0 | 2,163 | 23 |
4014f038ccad19cb9b43c7a5f154e829057d1c39 | 443 | py | Python | app.py | 0xAurelius/playgrounds | 510bea031df6079e060b1bf3ba7399d45d00e050 | [
"MIT"
] | null | null | null | app.py | 0xAurelius/playgrounds | 510bea031df6079e060b1bf3ba7399d45d00e050 | [
"MIT"
] | 4 | 2021-11-17T20:18:55.000Z | 2022-01-12T18:06:58.000Z | app.py | 0xAurelius/playgrounds | 510bea031df6079e060b1bf3ba7399d45d00e050 | [
"MIT"
] | null | null | null | import dash
import dash_bootstrap_components as dbc
from utils import load_config
config = load_config()
protocol = config['protocol']
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.DARKLY],
suppress_callback_exceptions=True,
title=f"{protocol} Playgrounds",
meta_tags=[{
'name': 'viewport',
'content': 'width=device-width, initial-scale=1.0, maximum-scale=1.2, minimum-scale=0.5,'
}]
)
| 23.315789 | 97 | 0.69526 | import dash
import dash_bootstrap_components as dbc
from utils import load_config
config = load_config()
protocol = config['protocol']
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.DARKLY],
suppress_callback_exceptions=True,
title=f"{protocol} Playgrounds",
meta_tags=[{
'name': 'viewport',
'content': 'width=device-width, initial-scale=1.0, maximum-scale=1.2, minimum-scale=0.5,'
}]
)
| 0 | 0 | 0 |
ccef29cabcdead762aa3b1c5fa4b620d44ce3602 | 337 | py | Python | Codewars/8kyu/geometry-basics-circle-area-in-2d/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/8kyu/geometry-basics-circle-area-in-2d/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/8kyu/geometry-basics-circle-area-in-2d/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 3.6.0
test.assert_equals(round(circle_area(Circle(Point(10, 10), 30)), 6), 2827.433388)
test.assert_equals(round(circle_area(Circle(Point(25, -70), 30)), 6), 2827.433388)
test.assert_equals(round(circle_area(Circle(Point(-15, 5), 0)), 6), 0)
test.assert_equals(round(circle_area(Circle(Point(-15, 5), 12.5)), 6), 490.873852)
| 48.142857 | 82 | 0.715134 | # Python - 3.6.0
test.assert_equals(round(circle_area(Circle(Point(10, 10), 30)), 6), 2827.433388)
test.assert_equals(round(circle_area(Circle(Point(25, -70), 30)), 6), 2827.433388)
test.assert_equals(round(circle_area(Circle(Point(-15, 5), 0)), 6), 0)
test.assert_equals(round(circle_area(Circle(Point(-15, 5), 12.5)), 6), 490.873852)
| 0 | 0 | 0 |
2ecce8e6510586c097a7c41d462e2fca1a437b5d | 1,663 | py | Python | paddlenlp/taskflow/utils.py | a5116638/PaddleNLP | 37a95ae3c0d317aff09f76f79484208354db1e36 | [
"Apache-2.0"
] | 1 | 2021-09-29T06:05:13.000Z | 2021-09-29T06:05:13.000Z | paddlenlp/taskflow/utils.py | svs1984/PaddleNLP | 9eb9e23b01d044706c789158ac6cf0d365aea848 | [
"Apache-2.0"
] | null | null | null | paddlenlp/taskflow/utils.py | svs1984/PaddleNLP | 9eb9e23b01d044706c789158ac6cf0d365aea848 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from paddle.dataset.common import md5file
from ..utils.downloader import get_path_from_url
from ..utils.env import MODEL_HOME
def download_file(save_dir, filename, url, md5=None):
"""
Download the file from the url to specified directory.
Check md5 value when the file is exists, if the md5 value is the same as the existed file, just use
the older file, if not, will download the file from the url.
Args:
save_dir(string): The specified directory saving the file.
fiename(string): The specified filename saveing the file.
url(string): The url downling the file.
md5(string, optional): The md5 value that checking the version downloaded.
"""
default_root = os.path.join(MODEL_HOME, save_dir)
fullname = os.path.join(default_root, filename)
if os.path.exists(fullname):
if md5 and (not md5file(fullname) == md5):
get_path_from_url(url, default_root, md5)
else:
get_path_from_url(url, default_root, md5)
return fullname
| 39.595238 | 104 | 0.725195 | # coding:utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from paddle.dataset.common import md5file
from ..utils.downloader import get_path_from_url
from ..utils.env import MODEL_HOME
def download_file(save_dir, filename, url, md5=None):
"""
Download the file from the url to specified directory.
Check md5 value when the file is exists, if the md5 value is the same as the existed file, just use
the older file, if not, will download the file from the url.
Args:
save_dir(string): The specified directory saving the file.
fiename(string): The specified filename saveing the file.
url(string): The url downling the file.
md5(string, optional): The md5 value that checking the version downloaded.
"""
default_root = os.path.join(MODEL_HOME, save_dir)
fullname = os.path.join(default_root, filename)
if os.path.exists(fullname):
if md5 and (not md5file(fullname) == md5):
get_path_from_url(url, default_root, md5)
else:
get_path_from_url(url, default_root, md5)
return fullname
| 0 | 0 | 0 |
04ed3515e3b9ad34f17dc8d17749cddc196feff9 | 9,707 | py | Python | users/tests/test_forms.py | mmesiti/cogs3 | c48cd48629570f418b93aec73de49bc2fb59edc2 | [
"MIT"
] | 1 | 2020-03-28T23:55:02.000Z | 2020-03-28T23:55:02.000Z | users/tests/test_forms.py | mmesiti/cogs3 | c48cd48629570f418b93aec73de49bc2fb59edc2 | [
"MIT"
] | 60 | 2018-04-16T13:40:23.000Z | 2020-06-05T18:02:01.000Z | users/tests/test_forms.py | mmesiti/cogs3 | c48cd48629570f418b93aec73de49bc2fb59edc2 | [
"MIT"
] | 10 | 2018-03-14T22:25:50.000Z | 2020-01-09T21:32:22.000Z | import datetime
from django import forms
from django.test import TestCase
from django.utils.translation import activate
from institution.models import Institution
from users.forms import CustomUserChangeForm
from users.forms import CustomUserCreationForm
from users.forms import ProfileUpdateForm
from users.forms import RegisterForm
from users.models import CustomUser
from users.models import Profile
| 36.768939 | 99 | 0.608221 | import datetime
from django import forms
from django.test import TestCase
from django.utils.translation import activate
from institution.models import Institution
from users.forms import CustomUserChangeForm
from users.forms import CustomUserCreationForm
from users.forms import ProfileUpdateForm
from users.forms import RegisterForm
from users.models import CustomUser
from users.models import Profile
class ProfileUpdateFormTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
'users/fixtures/tests/users.json',
]
def setUp(self):
self.institution = Institution.objects.get(name='Example University')
self.shibboleth_user = CustomUser.objects.get(email='shibboleth.user@example.ac.uk')
self.guest_user = CustomUser.objects.get(email='guest.user@external.ac.uk')
def test_profile_update(self):
"""
Ensure the profile update form works for institutional and external users.
"""
test_cases = [
self.shibboleth_user,
self.guest_user,
]
for test_case in test_cases:
scw_username = 'x.test.username'
uid_number = 5000001
description = 'test user'
account_status = 1
form = ProfileUpdateForm(
data={
'user': test_case.pk,
'scw_username': scw_username,
'uid_number': uid_number,
'description': description,
'account_status': account_status,
},
instance=test_case.profile,
)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(test_case.profile.scw_username, scw_username)
self.assertEqual(test_case.profile.uid_number, uid_number)
self.assertEqual(test_case.profile.description, description)
self.assertEqual(test_case.profile.account_status, account_status)
def test_pre_approved_options(self):
"""
Ensure the correct account status options are available for accounts that are awaiting
approval.
"""
self.shibboleth_user.profile.account_status = Profile.AWAITING_APPROVAL
self.shibboleth_user.profile.save()
self.assertEqual(self.shibboleth_user.profile.account_status, Profile.AWAITING_APPROVAL)
form = ProfileUpdateForm(
data={
'user': self.shibboleth_user.pk,
'account_status': self.shibboleth_user.profile.account_status,
},
instance=self.shibboleth_user.profile,
)
self.assertTrue(form.is_valid())
expected_choices = Profile.PRE_APPROVED_OPTIONS
actual_choices = form.fields['account_status'].widget.choices
self.assertEqual(actual_choices, expected_choices)
def test_post_approved_options(self):
"""
Ensure the correct account status options are available for accounts that have been
approved.
"""
self.shibboleth_user.profile.account_status = Profile.APPROVED
self.shibboleth_user.profile.save()
self.assertEqual(self.shibboleth_user.profile.account_status, Profile.APPROVED)
form = ProfileUpdateForm(
data={
'user': self.shibboleth_user.pk,
'account_status': Profile.APPROVED,
},
instance=self.shibboleth_user.profile,
)
self.assertTrue(form.is_valid())
expected_choices = Profile.POST_APPROVED_OPTIONS
actual_choices = form.fields['account_status'].widget.choices
self.assertEqual(actual_choices, expected_choices)
class CustomUserCreationFormTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
]
def setUp(self):
self.institution = Institution.objects.get(name='Example University')
def test_create_user(self):
"""
Ensure the user creation form works for institutional and external users.
"""
test_cases = {
'@'.join(['shibboleth.user', self.institution.base_domain]): True,
'guest.user@external.ac.uk': False,
}
for email, shibboleth_required in test_cases.items():
form = CustomUserCreationForm(
data={
'email': email,
'first_name': 'Joe',
'last_name': 'Bloggs',
'is_shibboleth_login_required': shibboleth_required,
})
self.assertTrue(form.is_valid())
def test_invalid_institutional_email(self):
"""
Ensure an email address from an unsupported institution domain is caught via the
CustomUserCreationForm, if the user is required to login via a shibboleth IDP.
"""
form = CustomUserCreationForm(
data={
'email': 'joe.bloggs@invalid_base_domain.ac.uk',
'first_name': 'Joe',
'last_name': 'Bloggs',
'is_shibboleth_login_required': True,
})
self.assertFalse(form.is_valid())
def test_without_required_fields(self):
"""
Ensure a CustomUser instance can not be created without the required form fields.
"""
activate('en')
form = CustomUserCreationForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['email'], ['This field is required.'])
self.assertEqual(form.errors['first_name'], ['This field is required.'])
self.assertEqual(form.errors['last_name'], ['This field is required.'])
def test_password_generation(self):
"""
Ensure a random password is genereted new user accounts.
"""
test_cases = {
'@'.join(['shibboleth.user', self.institution.base_domain]): True,
'guest.user@external.ac.uk': False,
}
for email, shibboleth_required in test_cases.items():
form = CustomUserCreationForm(
data={
'email': email,
'first_name': 'Joe',
'last_name': 'Bloggs',
'is_shibboleth_login_required': shibboleth_required,
})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(CustomUser.objects.filter(email=email).count(), 1)
self.assertIsNotNone(CustomUser.objects.get(email=email).password)
class RegisterFormTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
]
def test_user_registration(self):
"""
Ensure the registration form works for shibboleth users.
"""
form = RegisterForm(
data={
'first_name': 'Joe',
'last_name': 'Bloggs',
'reason_for_account': 'HPC',
'accepted_terms_and_conditions': True,
})
self.assertTrue(form.is_valid())
def test_without_required_fields(self):
"""
Ensure the registration form fails if the required fields are missing.
"""
form = RegisterForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['first_name'], ['This field is required.'])
self.assertEqual(form.errors['last_name'], ['This field is required.'])
self.assertEqual(form.errors['reason_for_account'], ['This field is required.'])
self.assertEqual(form.errors['accepted_terms_and_conditions'], ['This field is required.'])
class CustomUserChangeFormTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
'users/fixtures/tests/users.json',
]
def setUp(self):
self.institution = Institution.objects.get(name='Example University')
self.shibboleth_user = CustomUser.objects.get(email='shibboleth.user@example.ac.uk')
def test_user_update(self):
"""
Ensure the user update form works.
"""
first_name = 'John'
last_name = 'Smith'
email = 'john.smith@example.ac.uk'
form = CustomUserChangeForm(
data={
'username': self.shibboleth_user.username,
'first_name': first_name,
'last_name': last_name,
'email': email,
'is_shibboleth_login_required': True,
'date_joined': datetime.date.today(),
},
instance=self.shibboleth_user,
)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.shibboleth_user.first_name, first_name)
self.assertEqual(self.shibboleth_user.last_name, last_name)
self.assertEqual(self.shibboleth_user.email, email)
def test_invalid_institutional_email(self):
"""
Ensure an email address from an unsupported institution domain is caught.
"""
with self.assertRaises(Institution.DoesNotExist):
form = CustomUserChangeForm(
data={
'username': self.shibboleth_user.username,
'first_name': self.shibboleth_user.first_name,
'last_name': self.shibboleth_user.last_name,
'email': 'john.smith@invalid-domain.ac.uk',
'is_shibboleth_login_required': True,
'date_joined': datetime.date.today(),
},
instance=self.shibboleth_user,
)
self.assertTrue(form.is_valid())
form.save()
| 489 | 8,717 | 92 |
37932ce90f945a2abf6aae9ca6759bd8c18500f4 | 13,160 | py | Python | build.py | secure-foundations/veri-titan | f7e4b434fd2ab85642aeb1fc4d7c34c28c678d3c | [
"MIT"
] | 10 | 2020-06-26T17:14:49.000Z | 2022-03-31T16:29:01.000Z | build.py | secure-foundations/veri-titan | f7e4b434fd2ab85642aeb1fc4d7c34c28c678d3c | [
"MIT"
] | 2 | 2021-04-06T14:06:34.000Z | 2022-03-09T00:01:14.000Z | build.py | secure-foundations/veri-titan | f7e4b434fd2ab85642aeb1fc4d7c34c28c678d3c | [
"MIT"
] | 4 | 2020-06-11T02:39:15.000Z | 2022-01-27T09:46:08.000Z | #!/usr/bin/env python
import sys, os, subprocess, re, platform
from subprocess import PIPE, Popen
from os.path import exists
TOOLS_DIR = "./tools"
DAFNY_PATH = "./tools/dafny/dafny"
VALE_PATH = "./tools/vale/bin/vale"
DAFNY_LIB_DIR = "./std_lib"
DAFNY_LIB_HASH = "84d160538b6442017a5401feb91265147bf34bfc"
DAFNY_ZIP_LINUX = "dafny-3.0.0-x64-ubuntu-16.04.zip"
DAFNY_ZIP_MACOS = "dafny-3.0.0-x64-osx-10.14.2.zip"
OT_PRINTER_DFY_PATH = "arch/otbn/printer.s.dfy"
OT_SIMULATOR_DFY_PATH = "arch/otbn/simulator.i.dfy"
DLL_SOURCES = {OT_PRINTER_DFY_PATH, OT_SIMULATOR_DFY_PATH}
OUTPUT_ASM_PATH = "gen/arch/otbn/printer.s.dll.out"
TEST_ASM_PATH = "impl/otbn/run_modexp.s"
OUTPUT_ELF_PATH = "gen/impl/otbn/run_modexp.elf"
NINJA_PATH = "build.ninja"
CODE_DIRS = ["arch", "impl", "lib"]
GEN_DIR = "gen"
NL_FILES = {
# "arch/riscv/vale.i.dfy",
"impl/riscv/sub_mod_nl_lemmas.i.dfy",
# "impl/riscv/sub_mod_lemmas.i.dfy",
"lib/bv_ops_nl.dfy"}
## misc utils
# run command
# convert path
## separate command: setup
# list dependecy
VAD_INCLUDE_PATTERN = re.compile('include\s+"(.+vad)"')
# list files
## main command (build)
# ## separate command: dd-gen
## separate command: proc
## separate command: ver
## separate command: dll-gen
## command line interface
if __name__ == "__main__":
main()
| 31.111111 | 150 | 0.629027 | #!/usr/bin/env python
import sys, os, subprocess, re, platform
from subprocess import PIPE, Popen
from os.path import exists
TOOLS_DIR = "./tools"
DAFNY_PATH = "./tools/dafny/dafny"
VALE_PATH = "./tools/vale/bin/vale"
DAFNY_LIB_DIR = "./std_lib"
DAFNY_LIB_HASH = "84d160538b6442017a5401feb91265147bf34bfc"
DAFNY_ZIP_LINUX = "dafny-3.0.0-x64-ubuntu-16.04.zip"
DAFNY_ZIP_MACOS = "dafny-3.0.0-x64-osx-10.14.2.zip"
def rules():
vale = "" if platform.system() == "Linux" else "mono"
vale += " " + VALE_PATH
return f"""
rule dafny
command = {DAFNY_PATH} /compile:0 /noNLarith /timeLimit:20 /vcsCores:2 $in && touch $out
rule dafny-nl
command = {DAFNY_PATH} /compile:0 /timeLimit:20 /vcsCores:2 $in && touch $out
rule vale
command = {vale} -dafnyText -in $in -out $out
rule dd-gen
command = python3 build.py dd-gen $in $out
rule dll-gen
command = python3 build.py dll-gen $in $out
rule dll-run
command = dotnet $in > $out
rule otbn-as
command = otbn-as $in -o $out
rule otbn-ld
command = otbn-ld $in -o $out
"""
OT_PRINTER_DFY_PATH = "arch/otbn/printer.s.dfy"
OT_SIMULATOR_DFY_PATH = "arch/otbn/simulator.i.dfy"
DLL_SOURCES = {OT_PRINTER_DFY_PATH, OT_SIMULATOR_DFY_PATH}
OUTPUT_ASM_PATH = "gen/arch/otbn/printer.s.dll.out"
TEST_ASM_PATH = "impl/otbn/run_modexp.s"
OUTPUT_ELF_PATH = "gen/impl/otbn/run_modexp.elf"
NINJA_PATH = "build.ninja"
CODE_DIRS = ["arch", "impl", "lib"]
GEN_DIR = "gen"
NL_FILES = {
# "arch/riscv/vale.i.dfy",
"impl/riscv/sub_mod_nl_lemmas.i.dfy",
# "impl/riscv/sub_mod_lemmas.i.dfy",
"lib/bv_ops_nl.dfy"}
## misc utils
# run command
def os_system(command):
print(command)
code = os.system(command)
sys.exit(code)
def subprocess_run(command, cwd=None):
# print(command)
output = subprocess.run(command, shell=True, stdout=PIPE, cwd=cwd).stdout
return output.decode("utf-8").strip()
# convert path
def get_ver_path(dfy_path):
dfy_path = os.path.relpath(dfy_path)
ver_path = dfy_path.replace(".dfy", ".ver")
if ver_path.startswith(GEN_DIR):
return ver_path
else:
return os.path.join(GEN_DIR, ver_path)
def get_dd_path(dfy_path):
dfy_path = os.path.relpath(dfy_path)
dd_path = dfy_path.replace(".dfy", ".dd")
if dd_path.startswith(GEN_DIR):
return dd_path
else:
return os.path.join(GEN_DIR, dd_path)
def get_gen_dfy_path(vad_path):
assert vad_path.endswith(".vad")
dfy_path = os.path.join(GEN_DIR, vad_path)
return dfy_path.replace(".vad", ".dfy")
def get_dll_path(dfy_path):
dfy_path = os.path.relpath(dfy_path)
dll_path = dfy_path.replace(".dfy", ".dll")
assert(not dll_path.startswith(GEN_DIR))
return os.path.join(GEN_DIR, dll_path)
def get_o_path(asm_path):
asm_path = os.path.relpath(asm_path)
# assert asm_path.endswith(".s")
if not asm_path.startswith(GEN_DIR):
asm_path = os.path.join(GEN_DIR, asm_path)
return asm_path.replace(".s", ".o")
## separate command: setup
def setup_tools():
os_type = platform.system()
# ninja
version = subprocess_run("ninja --version")
if not version.startswith("1.10."):
print("[WARN] ninja not found or unexpected version. Expected 1.10.*, found: " + version)
# dotnet
version = subprocess_run("dotnet --list-sdks")
if "5.0" not in version:
print("[WARN] dotnet not found or unexpected version. Expected 5.0, found: " + version)
else:
print("[INFO] Found dotnet version: " + version)
# nuget
version = subprocess_run("nuget help | grep Version")
if "5.5" not in version:
print("[WARN] nuget not found or unexpected version. Expected 5.5, found: " + version)
else:
print("[INFO] Found nuget version: " + version)
path = subprocess_run("which otbn-as")
if "otbn-as" not in path:
print("[WARN] otbn-as not found")
else:
print("[INFO] otbn-as found")
path = subprocess_run("which otbn-ld")
if "otbn-ld" not in path:
print("[WARN] otbn-ld not found")
else:
print("[INFO] otbn-ld found")
while 1:
print("confirm dependecies are installed [y/n] ", end='')
choice = input().lower()
if choice == "n":
return
elif choice == "y":
break
if not os.path.exists(TOOLS_DIR):
os.mkdir(TOOLS_DIR)
dafny_zip = DAFNY_ZIP_LINUX if os_type == "Linux" else DAFNY_ZIP_MACOS
if os.path.exists(DAFNY_PATH):
print("[INFO] dafny binary already exists")
else:
os.system(f"wget https://github.com/dafny-lang/dafny/releases/download/v3.0.0/{dafny_zip}")
os.system(f"unzip {dafny_zip} -d {TOOLS_DIR}")
os.system(f"rm {dafny_zip}")
if os.path.exists(VALE_PATH):
print("[INFO] vale binary already exists")
else:
os.system("cd tools && git clone git@github.com:project-everest/vale.git")
os.system("cd tools/vale && git checkout otbn-custom && bash ./run_scons.sh")
os.system("mv tools/vale/bin/vale.exe tools/vale/bin/vale")
if os.path.exists(DAFNY_LIB_DIR):
print("[INFO] dafny library already exists")
else:
os.system(f"git clone git@github.com:secure-foundations/libraries.git {DAFNY_LIB_DIR} && cd {DAFNY_LIB_DIR} && git checkout {DAFNY_LIB_HASH}")
# list dependecy
def list_dfy_deps(dfy_file):
command = f"{DAFNY_PATH} /printIncludes:Immediate %s" % dfy_file
outputs = subprocess.run(command, shell=True, stdout=PIPE).stdout
outputs = outputs.decode("utf-8")
if outputs == "":
return ""
outputs = outputs.splitlines()[0].split(";")
includes = []
for (i, include) in enumerate(outputs):
include = os.path.relpath(include)
if "std_lib" in include:
continue
if i == 0:
# print(dfy_file)
continue
else:
include = get_ver_path(include)
includes.append(include)
return " ".join(includes)
VAD_INCLUDE_PATTERN = re.compile('include\s+"(.+vad)"')
def list_vad_deps(vad_path):
# print("[WARNING] .vad transitive dependencies not included")
vad_path = os.path.relpath(vad_path)
vad_dir = os.path.dirname(vad_path)
# print(vad_dir)
vad_dependencies = []
f = open(vad_path)
for line in f:
line = line.strip()
if line == "#verbatim":
break
match = re.search(VAD_INCLUDE_PATTERN, line)
if match:
included = os.path.join(vad_dir, match.group(1))
included = os.path.relpath(included)
if not exists(included):
print(f"[ERROR] {vad_path} is importing {included} that doesn't exist")
sys.exit(-1)
vad_dependencies.append(included)
included = get_gen_dfy_path(included)
vad_dependencies.append(included)
return " ".join(vad_dependencies)
# list files
def get_dfy_files(include_gen):
dfy_files = list()
target_dirs = set(CODE_DIRS)
# do not include files in ./gen unless specified
if include_gen:
target_dirs.add(GEN_DIR)
# do not include special dfy files
for root, _, files in os.walk("."):
tpl = "." if root == "." else root.split("/")[1]
if tpl not in target_dirs:
continue
for file in files:
if file.endswith(".dfy"):
dfy_path = os.path.relpath(os.path.join(root, file))
if dfy_path in DLL_SOURCES:
continue
dfy_files.append(dfy_path)
return dfy_files
def get_vad_files():
vad_files = list()
target_dirs = set(CODE_DIRS)
for root, _, files in os.walk("."):
tpl = "." if root == "." else root.split("/")[1]
if tpl not in target_dirs:
continue
for file in files:
if file.endswith(".vad"):
vad_path = os.path.relpath(os.path.join(root, file))
vad_files.append(vad_path)
return vad_files
## main command (build)
class Generator():
def generate_vad_rules(self, vad_path):
# print(vad_path)
dfy_path = get_gen_dfy_path(vad_path)
vad_deps = list_vad_deps(vad_path)
# print(vad_path, dfy_path)
self.content.append(f"build {dfy_path}: vale {vad_path} | {vad_deps}\n")
# need to add this generated file as well
self.dfy_files.append(dfy_path)
def generate_dfy_rules(self, dfy_file):
ver_path = get_ver_path(dfy_file)
dd_path = get_dd_path(dfy_file)
self.content.append(f"build {dd_path}: dd-gen {dfy_file}\n")
if dfy_file in NL_FILES:
self.content.append(f"build {ver_path}: dafny-nl {dfy_file} || {dd_path}")
else:
self.content.append(f"build {ver_path}: dafny {dfy_file} || {dd_path}")
self.content.append(f" dyndep = {dd_path}\n")
def generate_dll_rules(self, dafny_path):
dfy_deps = list_dfy_deps(dafny_path)
dll_path = get_dll_path(dafny_path)
self.content.append(f"build {dll_path}: dll-gen {dafny_path} | {dfy_deps}\n")
dll_out_path = dll_path + ".out"
self.content.append(f"build {dll_out_path}: dll-run {dll_path} \n")
def generate_elf_rules(self):
output_o_path = get_o_path(OUTPUT_ASM_PATH)
self.content.append(f"build {output_o_path}: otbn-as {OUTPUT_ASM_PATH}\n")
test_o_path = get_o_path(TEST_ASM_PATH)
self.content.append(f"build {test_o_path}: otbn-as {TEST_ASM_PATH}\n")
self.content.append(f"build {OUTPUT_ELF_PATH}: otbn-ld {test_o_path} {output_o_path}\n")
def generate_rules(self):
# rules to build .dfy from .vad
vad_files = get_vad_files()
for vad_file in vad_files:
# print(vad_file)
self.generate_vad_rules(vad_file)
# rules to build .ver from .dfy
for dfy_file in self.dfy_files:
self.generate_dfy_rules(dfy_file)
# rules for the printer
for dll_source in DLL_SOURCES:
self.generate_dll_rules(dll_source)
# rules for the elf
self.generate_elf_rules()
def write_ninja(self):
with open(NINJA_PATH, "w") as f:
for line in self.content:
f.write(line + "\n")
def __init__(self):
self.content = [rules()]
# collect none generated .dfy first
self.dfy_files = get_dfy_files(False)
self.generate_rules()
self.write_ninja()
# ## separate command: dd-gen
def generate_dd(dfy_file, dd_file):
dfy_file = os.path.relpath(dfy_file)
result = "ninja_dyndep_version = 1\n"
result += "build " + get_ver_path(dfy_file) + " : dyndep"
outputs = list_dfy_deps(dfy_file)
open(dd_file, "w").write(result + " | " + outputs + "\n")
## separate command: proc
def verify_dafny_proc(proc):
dfy_files = get_dfy_files(True)
command = 'grep -e "\(method\|function\|lemma\|predicate\).%s" -l ' % proc + " ".join(dfy_files)
outputs = subprocess.run(command, shell=True, stdout=PIPE).stdout
outputs = outputs.decode("utf-8")
proc = proc.replace("_", "__")
for dfy_file in outputs.splitlines():
print("verify %s in %s" % (proc, dfy_file))
command = f"time -p {DAFNY_PATH} /trace /timeLimit:20 /compile:0 /proc:*%s " % proc + dfy_file
# r = subprocess.check_output(command, shell=True).decode("utf-8")
process = Popen(command, shell=True, stdout=PIPE)
output = process.communicate()[0].decode("utf-8")
print(output)
## separate command: ver
def verify_single_file(target):
if not os.path.exists(target):
return
generate_dot_ninja()
target = os.path.relpath(target)
if target.endswith(".dfy"):
target = get_ver_path(target)
os.system("ninja -v " + target)
elif target.endswith(".vad"):
target = get_gen_dfy_path(target)
target = get_ver_path(target)
# print(target)
os.system("ninja -v " + target)
## separate command: dll-gen
def generate_dll(dfy_path, dll_path):
dfy_path = os.path.realpath(dfy_path)
assert(dll_path.startswith(GEN_DIR) and dll_path.endswith(".dll"))
dll_dir = os.path.dirname(dll_path)
command = f"dafny /compile:1 /noNLarith /vcsCores:2 {dfy_path} /out:{dll_path}"
output = subprocess_run(command, cwd=dll_dir)
print(output)
## command line interface
def main():
# build everything
if len(sys.argv) == 1:
g = Generator()
print("Wrote out build.ninja. Now run: ninja -v -j4")
# os.system("ninja -v -j 4")
return
option = sys.argv[1]
if option == "ver":
verify_single_file(sys.argv[2])
elif option == "proc":
verify_dafny_proc(sys.argv[2])
elif option == "dd-gen":
generate_dd(sys.argv[2], sys.argv[3])
elif option == "dll-gen":
generate_dll(sys.argv[2], sys.argv[3])
elif option == "clean":
os.system(f"rm -r {GEN_DIR}")
os.system("rm " + NINJA_PATH)
elif option == "setup":
setup_tools()
if __name__ == "__main__":
main()
| 11,217 | -3 | 625 |
c014c57708ea670205d0e1b85a67761597da2bbc | 2,701 | py | Python | test.py | emboiko/Socket_Singleton | e44e8230daa5167c92f9519c73a2374a3d279cbc | [
"MIT"
] | 1 | 2021-08-01T06:12:49.000Z | 2021-08-01T06:12:49.000Z | test.py | emboiko/Socket_Singleton | e44e8230daa5167c92f9519c73a2374a3d279cbc | [
"MIT"
] | null | null | null | test.py | emboiko/Socket_Singleton | e44e8230daa5167c92f9519c73a2374a3d279cbc | [
"MIT"
] | null | null | null | import unittest
from time import sleep
from subprocess import run
from src.Socket_Singleton import Socket_Singleton, MultipleSingletonsError
if __name__ == "__main__":
unittest.main()
| 31.045977 | 86 | 0.670863 | import unittest
from time import sleep
from subprocess import run
from src.Socket_Singleton import Socket_Singleton, MultipleSingletonsError
class TestMain(unittest.TestCase):
def setUp(self):
self.app = Socket_Singleton()
self.traced_args = []
def test_default(self):
result = run("test_app.py default", shell=True, capture_output=True)
self.assertFalse(result.stdout)
def test_different_port(self):
result = run("test_app.py different_port", shell=True, capture_output=True)
self.assertTrue(result.stdout)
def test_no_client(self):
run("test_app.py no_client foo bar baz", shell=True, capture_output=True)
self.assertNotIn("noclient", self.app.arguments)
self.assertNotIn("foo", self.app.arguments)
self.assertNotIn("bar", self.app.arguments)
self.assertNotIn("baz", self.app.arguments)
def test_client(self):
run("test_app.py default foo bar baz", shell=True, capture_output=True)
self.assertIn("default", self.app.arguments)
self.assertIn("foo", self.app.arguments)
self.assertIn("bar", self.app.arguments)
self.assertIn("baz", self.app.arguments)
def test_context(self):
result = run("test_app.py context", shell=True, capture_output=True)
self.assertFalse(result.stdout)
def test_context_no_strict(self):
result = run("test_app.py context_no_strict", shell=True, capture_output=True)
self.assertEqual(result.stdout.decode("UTF-8"), "MultipleSingletonsError\r\n")
def test_no_strict(self):
result = run("test_app.py no_strict", shell=True, capture_output=True)
self.assertEqual(result.stdout.decode("UTF-8"), "MultipleSingletonsError\r\n")
def test_trace(self):
self.app.trace(self.traced)
run("test_app.py default foo bar baz", shell=True, capture_output=True)
self.assertEqual(len(self.traced_args), 4)
def test_untrace(self):
self.app.trace(self.traced)
run("test_app.py default foo bar baz", shell=True, capture_output=True)
self.app.untrace(self.traced)
run("test_app.py default foo bar baz", shell=True, capture_output=True)
self.assertEqual(len(self.traced_args), 4)
def traced(self, argument):
self.traced_args.append(argument)
def test_slam_args(self):
self.app.arguments.clear()
for _ in range(10):
run("test_app.py default foo bar bin baz", shell=True)
self.assertEqual(len(self.app.arguments), 50)
def tearDown(self):
self.app.close()
sleep(1)
if __name__ == "__main__":
unittest.main()
| 2,100 | 13 | 377 |
14c7ea14a7df31fd177851177e74b5d33195f582 | 533 | py | Python | Wikipedia search.py | Behordeun/simple-python-projects | c2d088a2c1ebd842ca4d9817d569da4fd6b7f637 | [
"Apache-2.0"
] | 1 | 2021-09-09T10:55:23.000Z | 2021-09-09T10:55:23.000Z | Wikipedia search.py | Behordeun/simple-python-projects | c2d088a2c1ebd842ca4d9817d569da4fd6b7f637 | [
"Apache-2.0"
] | null | null | null | Wikipedia search.py | Behordeun/simple-python-projects | c2d088a2c1ebd842ca4d9817d569da4fd6b7f637 | [
"Apache-2.0"
] | null | null | null | from tensorboard import summary
from tkinter import *
import wikipedia
root = Tk()
root.title("Wikipedia Search")
root.geometry("400x400")
frame = Frame(root)
input = Entry(frame, width = 30)
input.pack()
result = ""
text = Text(root, font = ("arial", 20))
button = Button(frame, text="Search", command=search)
button.pack(side = RIGHT)
frame.pack(side = TOP)
text.pack()
root.mainloop() | 21.32 | 53 | 0.69606 | from tensorboard import summary
from tkinter import *
import wikipedia
root = Tk()
root.title("Wikipedia Search")
root.geometry("400x400")
frame = Frame(root)
input = Entry(frame, width = 30)
input.pack()
result = ""
text = Text(root, font = ("arial", 20))
def search():
global result
result = input.get()
summary = wikipedia.summary(result, sentences=3)
text.insert("1.0", summary)
button = Button(frame, text="Search", command=search)
button.pack(side = RIGHT)
frame.pack(side = TOP)
text.pack()
root.mainloop() | 120 | 0 | 23 |
7afd3df22ba47b8d1930eeb3a5534f705fbda846 | 456 | py | Python | students/k3342/laboratory_works/Kocheshkova_Kseniia/laboratory_work_1/flights/migrations/0004_auto_20201101_2206.py | Derimeer/ITMO_ICT_WebProgramming_2020 | afb4999d20d59c5d47e4f380e8ba06204a42c729 | [
"MIT"
] | null | null | null | students/k3342/laboratory_works/Kocheshkova_Kseniia/laboratory_work_1/flights/migrations/0004_auto_20201101_2206.py | Derimeer/ITMO_ICT_WebProgramming_2020 | afb4999d20d59c5d47e4f380e8ba06204a42c729 | [
"MIT"
] | null | null | null | students/k3342/laboratory_works/Kocheshkova_Kseniia/laboratory_work_1/flights/migrations/0004_auto_20201101_2206.py | Derimeer/ITMO_ICT_WebProgramming_2020 | afb4999d20d59c5d47e4f380e8ba06204a42c729 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-01 19:06
from django.db import migrations, models
| 24 | 120 | 0.596491 | # Generated by Django 3.1.2 on 2020-11-01 19:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flights', '0003_flight_number_flight'),
]
operations = [
migrations.AlterField(
model_name='flight',
name='type',
field=models.CharField(choices=[('to hotel', 'to hotel'), ('to home city', 'to home city')], max_length=20),
),
]
| 0 | 342 | 23 |
739ce0612c9ab100da3c259aaf5a9f24e447aef4 | 357 | py | Python | three/mathutils/__init__.py | jpiland16/three.py-packaged | 53026f1637eff31bbdbeb32dac6bb4ec608ff4a6 | [
"MIT"
] | null | null | null | three/mathutils/__init__.py | jpiland16/three.py-packaged | 53026f1637eff31bbdbeb32dac6bb4ec608ff4a6 | [
"MIT"
] | null | null | null | three/mathutils/__init__.py | jpiland16/three.py-packaged | 53026f1637eff31bbdbeb32dac6bb4ec608ff4a6 | [
"MIT"
] | null | null | null | from three.mathutils.MatrixFactory import *
from three.mathutils.Matrix import *
from three.mathutils.Curve import *
from three.mathutils.CurveFactory import *
from three.mathutils.Multicurve import *
from three.mathutils.Surface import *
from three.mathutils.Hilbert3D import *
from three.mathutils.RandomUtils import *
from three.mathutils.Tween import *
| 35.7 | 43 | 0.823529 | from three.mathutils.MatrixFactory import *
from three.mathutils.Matrix import *
from three.mathutils.Curve import *
from three.mathutils.CurveFactory import *
from three.mathutils.Multicurve import *
from three.mathutils.Surface import *
from three.mathutils.Hilbert3D import *
from three.mathutils.RandomUtils import *
from three.mathutils.Tween import *
| 0 | 0 | 0 |
ed46fa2cc80b4b07d8ccd05414bb5d2f39219bbd | 14,476 | py | Python | ddnet/ddnet.py | fzqneo/DD-Net | bde4c01d7378582dfa84f98a3affa84931f64ca1 | [
"MIT"
] | null | null | null | ddnet/ddnet.py | fzqneo/DD-Net | bde4c01d7378582dfa84f98a3affa84931f64ca1 | [
"MIT"
] | null | null | null | ddnet/ddnet.py | fzqneo/DD-Net | bde4c01d7378582dfa84f98a3affa84931f64ca1 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.ndimage.interpolation as inter
import tensorflow as tf
from keras import backend as K
from keras import regularizers
from keras.layers import *
from keras.layers.convolutional import *
from keras.layers.core import *
from keras.models import Model, load_model
from keras.optimizers import *
from scipy.signal import medfilt
from scipy.spatial.distance import cdist
#######################################################
## Public functions
#######################################################
#######################################################
## OpenPose data cleaning
#######################################################
OP_HAND_PICKED_GOOD_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 15, 16]
# COMMON_JOINTS_FROM_JHMDB = np.array([1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) - 1
COMMON_JOINTS_FROM_OP = [1, 2, 5, 9, 12, 3, 6, 10, 13, 4, 7, 11, 14] # 0-based
COMMON_GOOD_JOINTS_FROM_OP = list(set(COMMON_JOINTS_FROM_OP).intersection(OP_HAND_PICKED_GOOD_JOINTS))
OP_UPPER_BODY_JOINTS = [0,1,2,3,4,5,6,7,8,15,16]
def nan_helper(y):
"""Helper function to handle real indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
#######################################################
## DDNet preprocessing and helper function
#######################################################
def infer_DDNet(net, C, batch, *args, **kwargs):
"""Infer on a batch of clips
Arguments:
net {Model} -- a DDNet instance created by create_DDNet
C {DDNetConfig} -- a config object
batch {list or array} -- Each element represents the joint coordinates of a clip
args, kwargs -- will be passed to Modle.predict()
"""
X0, X1 = preprocess_batch(batch, C)
return net.predict([X0, X1], *args, **kwargs)
def preprocess_point(p, C):
"""Preprocess a single point (a clip).
WARN: NAN-preserving
Arguments:
p {ndarray} -- shape = (variable, C.joint_n, C.joint_d)
C {DDNetConfig} -- A Config object
Returns:
ndarray, ndarray -- X0, X1 to input to the net
"""
assert p.shape[1:] == (C.joint_n, C.joint_d)
p = zoom(p,target_l=C.frame_l,joints_num=C.joint_n,joints_dim=C.joint_d)
# interploate to the right number of frames
assert p.shape == (C.frame_l, C.joint_n, C.joint_d)
M = get_CG(p, C)
return M, p
def preprocess_batch(batch, C, preprocess_point_fn=preprocess_point):
"""Preprocesss a batch of points (clips)
Arguments:
batch {ndarray or list or tuple} -- List of arrays as input to preprocess_point
C {DDNetConfig} -- A DDNetConfig object
Returns:
ndarray, ndarray -- X0, X1 to input to the net
"""
assert type(batch) in (np.ndarray, list, tuple)
X0 = []
X1 = []
for p in batch:
px0, px1 = preprocess_point_fn(p, C)
X0.append(px0)
X1.append(px1)
X0 = np.stack(X0)
X1 = np.stack(X1)
return X0, X1
#######################################################
## Private functions
#######################################################
#######################################################
### Preprocessing functions
#######################################################
# Interpolate the joint coordinates of a group of frames to be target_l frames
def zoom(p,target_l=64,joints_num=25,joints_dim=3):
"""Rescale and interploate the joint coordinates of a variable number of frames to be target_l frames.
Used prepare a fixed-size input to the net.
Arguments:
p {ndarray} -- shape = (num_frames, num_joints, joints_dim)
Keyword Arguments:
target_l {int} -- [description] (default: {64})
joints_num {int} -- [description] (default: {25})
joints_dim {int} -- [description] (default: {3})
Returns:
ndarray -- Rescaled array of size (target_l, num_joints, joints_dim)
"""
l = p.shape[0]
# if l == target_l: # need do nothing
# return p
p_new = np.empty([target_l,joints_num,joints_dim])
for m in range(joints_num):
for n in range(joints_dim):
p_new[:,m,n] = inter.zoom(p[:,m,n],target_l/l)
p_new[:,m,n] = medfilt(p_new[:,m,n],3)
return p_new
def get_CG(p,C):
"""Compute the Joint Collection Distances (JCD, refer to the paper) of a group of frames
and normalize them to 0 mean.
Arguments:
p {ndarray} -- size = (C.frame_l, C.num_joints, C.joints_dim)
C {Config} -- [description]
Returns:
ndarray -- shape = (C.frame_l, C.fead_d)
"""
# return JCD of a point, normalized to 0 mean
M = []
iu = np.triu_indices(C.joint_n,1,C.joint_n)
for f in range(C.frame_l):
d_m = cdist(p[f],p[f],'euclidean')
d_m = d_m[iu]
M.append(d_m)
M = np.stack(M)
M = norm_scale(M)
return M
#######################################################
### Model architecture
#######################################################
# used for Keras save/load model
_custom_objs = {
'poses_diff': poses_diff,
'pose_motion': pose_motion,
'c1D': c1D,
'block': block,
'd1D': d1D,
'build_FM': build_FM,
'build_DD_Net': build_DD_Net
}
| 32.677201 | 133 | 0.579649 | import numpy as np
import scipy.ndimage.interpolation as inter
import tensorflow as tf
from keras import backend as K
from keras import regularizers
from keras.layers import *
from keras.layers.convolutional import *
from keras.layers.core import *
from keras.models import Model, load_model
from keras.optimizers import *
from scipy.signal import medfilt
from scipy.spatial.distance import cdist
#######################################################
## Public functions
#######################################################
#######################################################
## OpenPose data cleaning
#######################################################
OP_HAND_PICKED_GOOD_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 15, 16]
# COMMON_JOINTS_FROM_JHMDB = np.array([1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) - 1
COMMON_JOINTS_FROM_OP = [1, 2, 5, 9, 12, 3, 6, 10, 13, 4, 7, 11, 14] # 0-based
COMMON_GOOD_JOINTS_FROM_OP = list(set(COMMON_JOINTS_FROM_OP).intersection(OP_HAND_PICKED_GOOD_JOINTS))
OP_UPPER_BODY_JOINTS = [0,1,2,3,4,5,6,7,8,15,16]
def nan_helper(y):
"""Helper function to handle real indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
class OpenPoseDataCleaner(object):
def __init__(self, copy=True, filter_joint_idx=OP_HAND_PICKED_GOOD_JOINTS):
super().__init__()
self.copy = copy
self.filter_joint_idx = filter_joint_idx
def transform_point(self, p):
"""Clean a point output by OpenPose
Arguments:
p {ndarray} -- OpenPose output containing 0s representing unknown joints
"""
p = self.make_nan(p, self.copy)
if self.filter_joint_idx is not None :
p = self.filter_joints(p, self.filter_joint_idx)
p = self.temporal_interp(p, self.copy)
p = self.per_video_normalize(p, self.copy)
return p
def augment_XY(self, X, Y, factor=5):
"""Take a training set X, Y and augment it by factor of `factor`.
Augmentation comes from the use of randomized functions like `fill_nan_uniform`
Arguments:
X {list of ndarray} -- [description]
Y {ndarray} -- shape (num_points, num_classes)
Keyword Arguments:
factor {int} -- [description] (default: {5})
"""
Xa = []
Ya = []
for p1, y1 in zip(X, Y):
Xa.extend([self.transform_point(p1) for _ in range(factor)])
Ya.extend([y1] * factor)
Ya = np.stack(Ya)
assert len(Xa) == Ya.shape[0]
return Xa, Ya
@staticmethod
def make_nan(p, copy=True):
"""
Convert 0 values to np.nan
"""
assert isinstance(p, np.ndarray)
q = p.copy() if copy else p
q[q == 0] = np.nan
return q
@staticmethod
def has_nan(p):
assert isinstance(p, np.ndarray)
return np.isnan(p).any()
@staticmethod
def count_nan(p):
assert isinstance(p, np.ndarray)
return np.isnan(p).sum()
@staticmethod
def filter_joints(p, good_joint_idx):
"""
Filter a point by only keeping joints in good_joint_idx
"""
return p[:, good_joint_idx, :].copy()
@staticmethod
def temporal_interp(p, copy=True, known_ratio_thresh=0.1):
"""
If a joint is detected in at least `known_ratio_thres` frames in a video,
we interpolate the nan coordinates from other frames.
This is done independently for each joint.
Note: it can still leave some nan-filled columns if a joint is not detected in most frames.
"""
q = p.copy() if copy else p
for j in range(q.shape[1]): # joint
for coord in range(q.shape[2]): # x, y (,z)
view = q[:, j, coord]
if np.count_nonzero(~np.isnan(view)) / view.size < known_ratio_thresh or not np.isnan(view).any():
continue
nans, idx = nan_helper(view)
view[nans]= np.interp(idx(nans), idx(~nans), view[~nans])
return q
@staticmethod
def per_video_normalize(p, copy=True):
"""
For x,y[, z] independently:
Normalize into approximately between -0.5~0.5
"""
q = p.copy() if copy else p
# use the same demoniator so aspect ratio is preserved
W = np.nanmax(q[:, :, 0]) - np.nanmin(q[:, :, 0])
for coord in range(p.shape[2]):
view = q[:, :, coord]
a, b = np.nanmin(view), np.nanmax(view)
view[:] = ((view - a) / W) - 0.5
return q
@staticmethod
def fill_nan_random(p, copy=True, sigma=.5):
"""
Fill nan values with normal distribution
"""
q = p.copy() if copy else p
q[np.isnan(q)] = np.random.randn(np.count_nonzero(np.isnan(q))) * sigma
return q
@staticmethod
def fill_nan_uniform(p, copy=True, a=-0.5, b=0.5):
"""
Fill nan values with normal distribution
"""
q = p.copy() if copy else p
q[np.isnan(q)] = np.random.random((np.count_nonzero(np.isnan(q)),)) * (b-a) + a
return q
@staticmethod
def fill_nan_constant(p, copy=True, fill_value=0):
"""
Fill nan values with normal distribution
"""
q = p.copy() if copy else p
q[np.isnan(q)] = fill_value
return q
#######################################################
## DDNet preprocessing and helper function
#######################################################
class DDNetConfig():
def __init__(self, frame_length=32, num_joints=15, joint_dim=2, num_classes=21, num_filters=16):
"""Stores configuration of DDNet
Keyword Arguments:
frame_length {int} -- Frame length of a data point (a clip) (default: {32})
num_joints {int} -- Number of joints detected in each frame (default: {15})
joint_dim {int} -- Joint coordinate dimensions, should be 2 or 3 (default: {2})
num_classes {int} -- Number of activity classes to recognize (default: {21})
num_filters {int} -- Controls the complexity of DDNet, higher is more accurate but more compute intensive (default: {16})
"""
self.frame_l = frame_length
self.joint_n = num_joints
self.joint_d = joint_dim
self.clc_num = num_classes
self.feat_d = int(num_joints * (num_joints-1) / 2) # the (flatten) diemsnion of JCD
self.filters = num_filters
def infer_DDNet(net, C, batch, *args, **kwargs):
"""Infer on a batch of clips
Arguments:
net {Model} -- a DDNet instance created by create_DDNet
C {DDNetConfig} -- a config object
batch {list or array} -- Each element represents the joint coordinates of a clip
args, kwargs -- will be passed to Modle.predict()
"""
X0, X1 = preprocess_batch(batch, C)
return net.predict([X0, X1], *args, **kwargs)
def fit_DDNet(net, C, X, Y, *args, **kwargs):
if type(X) in (list, tuple):
# assume preprocessed-input
X0, X1 = X
else:
print(f"Preprocessing input {type(X)}")
X0, X1 = preprocess_batch(X, C)
net.fit([X0, X1], Y, *args, **kwargs)
def create_DDNet(C):
assert isinstance(C, DDNetConfig)
return build_DD_Net(C)
def save_DDNet(net, path):
net.save(path)
def load_DDNet(path):
return load_model(path, custom_objects=_custom_objs) # custom_objects is necessary
def preprocess_point(p, C):
"""Preprocess a single point (a clip).
WARN: NAN-preserving
Arguments:
p {ndarray} -- shape = (variable, C.joint_n, C.joint_d)
C {DDNetConfig} -- A Config object
Returns:
ndarray, ndarray -- X0, X1 to input to the net
"""
assert p.shape[1:] == (C.joint_n, C.joint_d)
p = zoom(p,target_l=C.frame_l,joints_num=C.joint_n,joints_dim=C.joint_d)
# interploate to the right number of frames
assert p.shape == (C.frame_l, C.joint_n, C.joint_d)
M = get_CG(p, C)
return M, p
def preprocess_batch(batch, C, preprocess_point_fn=preprocess_point):
"""Preprocesss a batch of points (clips)
Arguments:
batch {ndarray or list or tuple} -- List of arrays as input to preprocess_point
C {DDNetConfig} -- A DDNetConfig object
Returns:
ndarray, ndarray -- X0, X1 to input to the net
"""
assert type(batch) in (np.ndarray, list, tuple)
X0 = []
X1 = []
for p in batch:
px0, px1 = preprocess_point_fn(p, C)
X0.append(px0)
X1.append(px1)
X0 = np.stack(X0)
X1 = np.stack(X1)
return X0, X1
#######################################################
## Private functions
#######################################################
#######################################################
### Preprocessing functions
#######################################################
# Interpolate the joint coordinates of a group of frames to be target_l frames
def zoom(p,target_l=64,joints_num=25,joints_dim=3):
"""Rescale and interploate the joint coordinates of a variable number of frames to be target_l frames.
Used prepare a fixed-size input to the net.
Arguments:
p {ndarray} -- shape = (num_frames, num_joints, joints_dim)
Keyword Arguments:
target_l {int} -- [description] (default: {64})
joints_num {int} -- [description] (default: {25})
joints_dim {int} -- [description] (default: {3})
Returns:
ndarray -- Rescaled array of size (target_l, num_joints, joints_dim)
"""
l = p.shape[0]
# if l == target_l: # need do nothing
# return p
p_new = np.empty([target_l,joints_num,joints_dim])
for m in range(joints_num):
for n in range(joints_dim):
p_new[:,m,n] = inter.zoom(p[:,m,n],target_l/l)
p_new[:,m,n] = medfilt(p_new[:,m,n],3)
return p_new
def norm_scale(x):
return (x-np.nanmean(x))/np.nanmean(x)
def get_CG(p,C):
"""Compute the Joint Collection Distances (JCD, refer to the paper) of a group of frames
and normalize them to 0 mean.
Arguments:
p {ndarray} -- size = (C.frame_l, C.num_joints, C.joints_dim)
C {Config} -- [description]
Returns:
ndarray -- shape = (C.frame_l, C.fead_d)
"""
# return JCD of a point, normalized to 0 mean
M = []
iu = np.triu_indices(C.joint_n,1,C.joint_n)
for f in range(C.frame_l):
d_m = cdist(p[f],p[f],'euclidean')
d_m = d_m[iu]
M.append(d_m)
M = np.stack(M)
M = norm_scale(M)
return M
#######################################################
### Model architecture
#######################################################
def poses_diff(x):
H, W = x.get_shape()[1],x.get_shape()[2]
x = tf.subtract(x[:,1:,...],x[:,:-1,...])
x = tf.image.resize_nearest_neighbor(x,size=[H.value,W.value],align_corners=False) # should not alignment here
return x
def pose_motion(P,frame_l):
P_diff_slow = Lambda(lambda x: poses_diff(x))(P)
P_diff_slow = Reshape((frame_l,-1))(P_diff_slow)
P_fast = Lambda(lambda x: x[:,::2,...])(P)
P_diff_fast = Lambda(lambda x: poses_diff(x))(P_fast)
P_diff_fast = Reshape((int(frame_l/2),-1))(P_diff_fast)
return P_diff_slow,P_diff_fast
def c1D(x,filters,kernel):
x = Conv1D(filters, kernel_size=kernel,padding='same',use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
return x
def block(x,filters):
x = c1D(x,filters,3)
x = c1D(x,filters,3)
return x
def d1D(x,filters):
x = Dense(filters,use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
return x
def build_FM(frame_l=32,joint_n=22,joint_d=2,feat_d=231,filters=16):
M = Input(shape=(frame_l,feat_d))
P = Input(shape=(frame_l,joint_n,joint_d))
diff_slow,diff_fast = pose_motion(P,frame_l)
x = c1D(M,filters*2,1)
x = SpatialDropout1D(0.1)(x)
x = c1D(x,filters,3)
x = SpatialDropout1D(0.1)(x)
x = c1D(x,filters,1)
x = MaxPooling1D(2)(x)
x = SpatialDropout1D(0.1)(x)
x_d_slow = c1D(diff_slow,filters*2,1)
x_d_slow = SpatialDropout1D(0.1)(x_d_slow)
x_d_slow = c1D(x_d_slow,filters,3)
x_d_slow = SpatialDropout1D(0.1)(x_d_slow)
x_d_slow = c1D(x_d_slow,filters,1)
x_d_slow = MaxPool1D(2)(x_d_slow)
x_d_slow = SpatialDropout1D(0.1)(x_d_slow)
x_d_fast = c1D(diff_fast,filters*2,1)
x_d_fast = SpatialDropout1D(0.1)(x_d_fast)
x_d_fast = c1D(x_d_fast,filters,3)
x_d_fast = SpatialDropout1D(0.1)(x_d_fast)
x_d_fast = c1D(x_d_fast,filters,1)
x_d_fast = SpatialDropout1D(0.1)(x_d_fast)
x = concatenate([x,x_d_slow,x_d_fast])
x = block(x,filters*2)
x = MaxPool1D(2)(x)
x = SpatialDropout1D(0.1)(x)
x = block(x,filters*4)
x = MaxPool1D(2)(x)
x = SpatialDropout1D(0.1)(x)
x = block(x,filters*8)
x = SpatialDropout1D(0.1)(x)
return Model(inputs=[M,P],outputs=x)
def build_DD_Net(C):
M = Input(name='M', shape=(C.frame_l,C.feat_d)) # JCD
P = Input(name='P', shape=(C.frame_l,C.joint_n,C.joint_d)) # Cartesian
# M_ = SpatialDropout1D(0.1)(M)
# P_ = Permute((1,3,2))(SpatialDropout2D(0.1, data_format='channels_last')(Permute((1,3,2))(P)))
FM = build_FM(C.frame_l,C.joint_n,C.joint_d,C.feat_d,C.filters)
x = FM([M,P])
x = GlobalMaxPool1D()(x)
x = d1D(x,128)
x = Dropout(0.5)(x)
x = d1D(x,128)
x = Dropout(0.5)(x)
x = Dense(C.clc_num, activation='softmax')(x)
######################Self-supervised part
model = Model(inputs=[M,P],outputs=x)
return model
# used for Keras save/load model
_custom_objs = {
'poses_diff': poses_diff,
'pose_motion': pose_motion,
'c1D': c1D,
'block': block,
'd1D': d1D,
'build_FM': build_FM,
'build_DD_Net': build_DD_Net
}
| 3,574 | 4,869 | 330 |
758c8e0bb0787b888692896184c923a6e803e43e | 1,188 | py | Python | tests/flask/test_template.py | odahu/odahuPackager | 35839d257c7a471541026bb9418072110190d29f | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2020-01-27T12:44:54.000Z | 2021-07-21T02:22:26.000Z | tests/flask/test_template.py | odahu/odahuPackager | 35839d257c7a471541026bb9418072110190d29f | [
"ECL-2.0",
"Apache-2.0"
] | 19 | 2019-11-28T18:45:27.000Z | 2022-01-14T08:41:09.000Z | tests/flask/test_template.py | odahu/odahuPackager | 35839d257c7a471541026bb9418072110190d29f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2020 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from odahuflow.packager.flask.template import render_packager_template
from odahuflow.packager.helpers.constants import ENTRYPOINT_TEMPLATE
| 36 | 80 | 0.740741 | # Copyright 2020 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from odahuflow.packager.flask.template import render_packager_template
from odahuflow.packager.helpers.constants import ENTRYPOINT_TEMPLATE
def test_render_packager_template():
values = dict(
model_location="model_location_value",
timeout='timeout_value',
host='host_value',
port='port_num',
workers='workers_num',
threads='threads_value',
wsgi_handler='wsgi_handler_value'
)
rendered_description = render_packager_template(ENTRYPOINT_TEMPLATE, values)
for _, value in values.items():
assert value in rendered_description
| 441 | 0 | 23 |
b4d3f9914e42a85151f9ba1a4493866d2ca72d28 | 536 | py | Python | test/ResultsAndPrizes/zodiac/test_zodiac_results_of_the_last_draw.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | 1 | 2019-12-05T06:50:54.000Z | 2019-12-05T06:50:54.000Z | test/ResultsAndPrizes/zodiac/test_zodiac_results_of_the_last_draw.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | null | null | null | test/ResultsAndPrizes/zodiac/test_zodiac_results_of_the_last_draw.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | null | null | null | # зодиак + Результаты последнего тиража
| 41.230769 | 82 | 0.826493 | # зодиак + Результаты последнего тиража
def test_zodiac_results_last_draw(app):
app.ResultAndPrizes.open_page_results_and_prizes()
app.ResultAndPrizes.click_game_zodiac()
app.ResultAndPrizes.click_results_of_the_last_draw()
app.ResultAndPrizes.button_get_report_winners()
app.ResultAndPrizes.parser_report_text_winners()
assert "РЕЗУЛЬТАТЫ ТИРАЖА" in app.ResultAndPrizes.parser_report_text_winners()
app.ResultAndPrizes.message_id_33_zodiac_results_last_draw()
app.ResultAndPrizes.comeback_main_page() | 488 | 0 | 23 |
066863fb285e7985e22baf254e538f1c8ce1832e | 2,082 | py | Python | src/lobster.py | gdifiore/lobster | ca556ee70ad579b95ac78d525233e0b851cbeb53 | [
"MIT"
] | null | null | null | src/lobster.py | gdifiore/lobster | ca556ee70ad579b95ac78d525233e0b851cbeb53 | [
"MIT"
] | null | null | null | src/lobster.py | gdifiore/lobster | ca556ee70ad579b95ac78d525233e0b851cbeb53 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# lobster.py - lobster
#
# (c) gdifiore 2018 <difioregabe@gmail.com>
#
import os
import sys
import json
from lobster_json import *
from bs4 import BeautifulSoup
type = sys.argv[1]
file = sys.argv[2]
theme = sys.argv[3]
if type == "simple":
lobster_data = readJSON(file)
title = getTitle(lobster_data)
header = getHeader(lobster_data)
content= getContent(lobster_data)
writeToHTML(title, header, content)
if type == "blog":
lobster_data = readJSON(file)
title = getTitle(lobster_data)
header = getHeader(lobster_data)
content= getContent(lobster_data)
author = getAuthor(lobster_data)
date = getDate(lobster_data)
writeToHTMLBlog(title, header, content, author, date)
else:
print(sys.argv[1])
print("failure") | 28.520548 | 62 | 0.612872 | #!/usr/bin/env python
#
# lobster.py - lobster
#
# (c) gdifiore 2018 <difioregabe@gmail.com>
#
import os
import sys
import json
from lobster_json import *
from bs4 import BeautifulSoup
type = sys.argv[1]
file = sys.argv[2]
theme = sys.argv[3]
if type == "simple":
def writeToHTML(title, header, content):
html_file = theme + ".html"
path = "themes\\" + html_file
soup = BeautifulSoup(open(path), "html.parser")
for i in soup.find_all('title'):
i.string = title
for i in soup.find_all(class_='header'):
i.string = header
for i in soup.find_all(class_='content'):
i.string = content
#print(soup)
finished = theme + "_finished.html"
with open(finished, "w") as text_file:
text_file.write(str(soup))
lobster_data = readJSON(file)
title = getTitle(lobster_data)
header = getHeader(lobster_data)
content= getContent(lobster_data)
writeToHTML(title, header, content)
if type == "blog":
def writeToHTMLBlog(title, header, content, author, date):
html_file = theme + ".html"
path = "themes\\" + html_file
soup = BeautifulSoup(open(path), "html.parser")
for i in soup.find_all('title'):
i.string = title
for i in soup.find_all(class_='header'):
i.string = header
for i in soup.find_all(class_='content'):
i.string = content
for i in soup.find_all(class_='author'):
i.string = author
for i in soup.find_all(class_='date'):
i.string = date
#print(soup)
finished = theme + "_finished.html"
with open(finished, "w") as text_file:
text_file.write(str(soup))
lobster_data = readJSON(file)
title = getTitle(lobster_data)
header = getHeader(lobster_data)
content= getContent(lobster_data)
author = getAuthor(lobster_data)
date = getDate(lobster_data)
writeToHTMLBlog(title, header, content, author, date)
else:
print(sys.argv[1])
print("failure") | 1,234 | 0 | 52 |
0161925bdf1e38b609660308a47257b50d5a5327 | 46 | py | Python | reelib/__init__.py | reeve0930/reelib | 0010c0179448dd4d3f3a82280beade4936bab8ff | [
"MIT"
] | null | null | null | reelib/__init__.py | reeve0930/reelib | 0010c0179448dd4d3f3a82280beade4936bab8ff | [
"MIT"
] | null | null | null | reelib/__init__.py | reeve0930/reelib | 0010c0179448dd4d3f3a82280beade4936bab8ff | [
"MIT"
] | null | null | null | from . import timestamp
from . import contjson | 23 | 23 | 0.804348 | from . import timestamp
from . import contjson | 0 | 0 | 0 |
12aa58faa493370b575a7ad6f15f43ea90c3f41c | 11,111 | py | Python | Pyrado/tests/test_sampling.py | jacarvalho/SimuRLacra | a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5 | [
"BSD-3-Clause"
] | null | null | null | Pyrado/tests/test_sampling.py | jacarvalho/SimuRLacra | a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5 | [
"BSD-3-Clause"
] | null | null | null | Pyrado/tests/test_sampling.py | jacarvalho/SimuRLacra | a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import random
import time
from torch.distributions.multivariate_normal import MultivariateNormal
from matplotlib import pyplot as plt
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperLive
from pyrado.environments.pysim.ball_on_beam import BallOnBeamSim
from pyrado.environments.pysim.quanser_ball_balancer import QBallBalancerSim
from pyrado.policies.fnn import FNNPolicy
from pyrado.sampling.data_format import to_format
from pyrado.sampling.hyper_sphere import sample_from_hyper_sphere_surface
from pyrado.sampling.parallel_sampler import ParallelSampler
from pyrado.sampling.parameter_exploration_sampler import ParameterExplorationSampler
from pyrado.sampling.rollout import rollout
from pyrado.sampling.step_sequence import StepSequence
from pyrado.sampling.sampler_pool import *
from pyrado.sampling.sequences import *
from pyrado.sampling.bootstrapping import bootstrap_ci
from pyrado.policies.linear import LinearPolicy
from pyrado.policies.features import *
from pyrado.sampling.cvar_sampler import select_cvar
from pyrado.utils.data_types import RenderMode
from tests.conftest import m_needs_cuda
@pytest.mark.parametrize(
'arg', [
[1],
[2, 3],
[4, 6, 2, 88, 3, 45, 7, 21, 22, 23, 24, 44, 45, 56, 67, 78, 89],
]
)
@pytest.mark.sampling
@pytest.mark.parametrize(
'n_threads', [1, 2, 4]
)
@pytest.mark.parametrize(
'min_samples', [10, 20, 40]
)
@pytest.mark.sampling
@pytest.mark.parametrize(
'n_threads', [1, 2, 4]
)
@pytest.mark.parametrize(
'min_samples', [10, 20, 40]
)
@pytest.mark.parametrize(
'min_runs', [10, 20, 40]
)
@pytest.mark.sampling
@pytest.mark.parametrize(
'data_type', [
(None, None), (to.int32, np.int32),
]
)
@pytest.mark.sampling
@pytest.mark.parametrize(
'epsilon', [
1, 0.5, 0.1,
]
)
@pytest.mark.parametrize(
'num_ro', [
10, 20,
]
)
@pytest.mark.sampling
@pytest.mark.parametrize(
'num_dim, method', [
(1, 'uniform'), (1, 'uniform'),
(3, 'uniform'), (3, 'normal'), (3, 'Marsaglia'),
(4, 'uniform'), (4, 'normal'), (4, 'Marsaglia'),
(15, 'uniform'), (15, 'normal')
]
)
@pytest.mark.sampling
@pytest.mark.parametrize(
'env, policy', [
(BallOnBeamSim(dt=0.02, max_steps=100),
LinearPolicy(BallOnBeamSim(dt=0.02, max_steps=100).spec,
FeatureStack([const_feat, identity_feat, squared_feat]))),
(QBallBalancerSim(dt=0.02, max_steps=100),
LinearPolicy(QBallBalancerSim(dt=0.02, max_steps=100).spec,
FeatureStack([const_feat, identity_feat, squared_feat])))
], ids=['bob_linpol', 'qbb_linpol']
)
@pytest.mark.parametrize(
'mean, cov', [
(to.tensor([5., 7.]), to.tensor([[2., 0.], [0., 2.]])),
], ids=['2dim']
)
@pytest.mark.sampling
@pytest.mark.visualization
@pytest.mark.parametrize(
'sequence, x_init', [
# (sequence_const, np.array([2])),
# (sequence_plus_one, np.array([2])),
# (sequence_add_init, np.array([2])),
# (sequence_rec_double, np.array([2])),
# (sequence_rec_sqrt, np.array([2])),
# (sequence_nlog2, np.array([2])),
(sequence_const, np.array([1, 2, 3])),
(sequence_plus_one, np.array([1, 2, 3])),
(sequence_add_init, np.array([1, 2, 3])),
(sequence_rec_double, np.array([1, 2, 3])),
(sequence_rec_sqrt, np.array([1, 2, 3])),
(sequence_nlog2, np.array([1, 2, 3])),
]
)
@m_needs_cuda
| 32.488304 | 117 | 0.676447 | import pytest
import random
import time
from torch.distributions.multivariate_normal import MultivariateNormal
from matplotlib import pyplot as plt
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperLive
from pyrado.environments.pysim.ball_on_beam import BallOnBeamSim
from pyrado.environments.pysim.quanser_ball_balancer import QBallBalancerSim
from pyrado.policies.fnn import FNNPolicy
from pyrado.sampling.data_format import to_format
from pyrado.sampling.hyper_sphere import sample_from_hyper_sphere_surface
from pyrado.sampling.parallel_sampler import ParallelSampler
from pyrado.sampling.parameter_exploration_sampler import ParameterExplorationSampler
from pyrado.sampling.rollout import rollout
from pyrado.sampling.step_sequence import StepSequence
from pyrado.sampling.sampler_pool import *
from pyrado.sampling.sequences import *
from pyrado.sampling.bootstrapping import bootstrap_ci
from pyrado.policies.linear import LinearPolicy
from pyrado.policies.features import *
from pyrado.sampling.cvar_sampler import select_cvar
from pyrado.utils.data_types import RenderMode
from tests.conftest import m_needs_cuda
@pytest.mark.parametrize(
'arg', [
[1],
[2, 3],
[4, 6, 2, 88, 3, 45, 7, 21, 22, 23, 24, 44, 45, 56, 67, 78, 89],
]
)
def test_sampler_pool(arg):
pool = SamplerPool(len(arg))
result = pool.invoke_all_map(_cb_test_eachhandler, arg)
pool.stop()
assert result == list(map(lambda x: x*2, arg))
def _cb_test_eachhandler(G, arg):
time.sleep(random.randint(1, 5))
return arg*2
def _cb_test_collecthandler(G):
nsample = random.randint(5, 15)
return nsample, nsample
@pytest.mark.sampling
@pytest.mark.parametrize(
'n_threads', [1, 2, 4]
)
@pytest.mark.parametrize(
'min_samples', [10, 20, 40]
)
def test_sampler_collect(n_threads, min_samples):
pool = SamplerPool(n_threads)
# Run the collector
cr, cn = pool.run_collect(min_samples, _cb_test_collecthandler)
pool.stop()
assert min_samples <= cn
assert min_samples <= sum(cr)
@pytest.mark.sampling
@pytest.mark.parametrize(
'n_threads', [1, 2, 4]
)
@pytest.mark.parametrize(
'min_samples', [10, 20, 40]
)
@pytest.mark.parametrize(
'min_runs', [10, 20, 40]
)
def test_sampler_collect_minrun(n_threads, min_samples, min_runs):
pool = SamplerPool(n_threads)
# Run the collector
cr, cn = pool.run_collect(min_samples, _cb_test_collecthandler, min_runs=min_runs)
pool.stop()
assert min_samples <= cn
assert min_samples <= sum(cr)
assert min_runs <= len(cr)
@pytest.mark.sampling
@pytest.mark.parametrize(
'data_type', [
(None, None), (to.int32, np.int32),
]
)
def test_to_format(data_type):
# Create some tensors to convert
ndarray = np.random.rand(3, 2).astype(dtype=np.float64)
tensor = to.rand(3, 2).type(dtype=to.float64)
# Test the conversion and typing from numpy to PyTorch
converted_ndarray = to_format(ndarray, 'torch', data_type[0])
assert isinstance(converted_ndarray, to.Tensor)
new_type = to.float64 if data_type[0] is None else data_type[0] # passing None must not change the type
assert converted_ndarray.dtype == new_type
# Test the conversion and typing from PyTorch to numpy
converted_tensor = to_format(tensor, 'numpy', data_type[1])
assert isinstance(converted_tensor, np.ndarray)
new_type = np.float64 if data_type[1] is None else data_type[1] # passing None must not change the type
assert converted_tensor.dtype == new_type
@pytest.mark.sampling
@pytest.mark.parametrize(
'epsilon', [
1, 0.5, 0.1,
]
)
@pytest.mark.parametrize(
'num_ro', [
10, 20,
]
)
def test_select_cvar(epsilon, num_ro):
# Create rollouts with known discounted rewards
rollouts = [
StepSequence(rewards=[i], observations=[i], actions=[i])
for i in range(num_ro)
]
# Shuffle data to put in
ro_shuf = list(rollouts)
random.shuffle(ro_shuf)
# Select cvar quantile
ro_cv = select_cvar(ro_shuf, epsilon, 1)
# Compute expected return of subselection
cv = sum(map(lambda ro: ro.discounted_return(1), ro_cv))/len(ro_cv)
# This should be equal to the epsilon-quantile of the integer sequence
nq = int(num_ro*epsilon)
cv_expected = sum(range(nq))/nq
assert cv == cv_expected
@pytest.mark.sampling
@pytest.mark.parametrize(
'num_dim, method', [
(1, 'uniform'), (1, 'uniform'),
(3, 'uniform'), (3, 'normal'), (3, 'Marsaglia'),
(4, 'uniform'), (4, 'normal'), (4, 'Marsaglia'),
(15, 'uniform'), (15, 'normal')
]
)
def test_sample_from_unit_sphere_surface(num_dim, method):
s = sample_from_hyper_sphere_surface(num_dim, method)
assert 0.95 <= to.norm(s, p=2) <= 1.05
@pytest.mark.sampling
@pytest.mark.parametrize(
'env, policy', [
(BallOnBeamSim(dt=0.02, max_steps=100),
LinearPolicy(BallOnBeamSim(dt=0.02, max_steps=100).spec,
FeatureStack([const_feat, identity_feat, squared_feat]))),
(QBallBalancerSim(dt=0.02, max_steps=100),
LinearPolicy(QBallBalancerSim(dt=0.02, max_steps=100).spec,
FeatureStack([const_feat, identity_feat, squared_feat])))
], ids=['bob_linpol', 'qbb_linpol']
)
def test_rollout_wo_exploration(env, policy):
ro = rollout(env, policy, render_mode=RenderMode())
assert isinstance(ro, StepSequence)
assert len(ro) <= env.max_steps
@pytest.mark.parametrize(
'mean, cov', [
(to.tensor([5., 7.]), to.tensor([[2., 0.], [0., 2.]])),
], ids=['2dim']
)
def test_reparametrization_trick(mean, cov):
for seed in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
# Sampling the the PyTorch distribution class
distr_mvn = MultivariateNormal(mean, cov)
to.manual_seed(seed)
smpl_distr = distr_mvn.sample()
# The reparametrization trick done by PyTorch
to.manual_seed(seed)
smpl_distr_reparam = distr_mvn.sample()
# The reparametrization trick done by hand
to.manual_seed(seed)
smpl_reparam = mean + to.cholesky(cov, upper=False).mv(to.randn_like(mean))
to.testing.assert_allclose(smpl_distr, smpl_distr_reparam)
to.testing.assert_allclose(smpl_distr, smpl_reparam)
to.testing.assert_allclose(smpl_distr_reparam, smpl_reparam)
@pytest.mark.sampling
@pytest.mark.visualization
@pytest.mark.parametrize(
'sequence, x_init', [
# (sequence_const, np.array([2])),
# (sequence_plus_one, np.array([2])),
# (sequence_add_init, np.array([2])),
# (sequence_rec_double, np.array([2])),
# (sequence_rec_sqrt, np.array([2])),
# (sequence_nlog2, np.array([2])),
(sequence_const, np.array([1, 2, 3])),
(sequence_plus_one, np.array([1, 2, 3])),
(sequence_add_init, np.array([1, 2, 3])),
(sequence_rec_double, np.array([1, 2, 3])),
(sequence_rec_sqrt, np.array([1, 2, 3])),
(sequence_nlog2, np.array([1, 2, 3])),
]
)
def test_sequences(sequence, x_init):
# Get the full sequence
_, x_full = sequence(x_init, 5, float)
# Plot the sequences
for i in range(x_full.shape[1]):
plt.stem(x_full[:, i], label=str(x_init[i]))
plt.legend()
# plt.show()
def test_bootsrapping():
# Why you should operate on the deltas and not directly on the statistic from the resampled data
sample = np.array([30, 37, 36, 43, 42, 43, 43, 46, 41, 42])
mean = np.mean(sample)
print(mean)
m, ci = bootstrap_ci(sample, np.mean, num_reps=20, alpha=0.1, ci_sides=2, seed=123)
print(m, ci)
np.random.seed(123)
resampled = np.random.choice(sample, (sample.shape[0], 20), replace=True)
means = np.apply_along_axis(np.mean, 0, resampled)
print(np.sort(means))
ci_lo, ci_up = np.percentile(means, [100*0.05, 100*0.95])
print(ci_lo, ci_up)
x = np.random.normal(10, 1, 40)
# x = np.random.uniform(5, 15, 20)
# x = np.random.poisson(5, 30)
np.random.seed(1)
# print(bs.bootstrap(x, stat_func=bs_stats.mean))
np.random.seed(1)
m, ci = bootstrap_ci(x, np.mean, num_reps=1000, alpha=0.05, ci_sides=2, studentized=False, bias_correction=False)
print('[use_t_for_ci=False] mean: ', m)
print('[use_t_for_ci=False] CI: ', ci)
np.random.seed(1)
m, ci = bootstrap_ci(x, np.mean, num_reps=1000, alpha=0.05, ci_sides=2, studentized=False, bias_correction=True)
print('[bias_correction=True] mean: ', m)
m, ci = bootstrap_ci(x, np.mean, num_reps=2*384, alpha=0.05, ci_sides=1, studentized=False)
print('[use_t_for_ci=False] mean: ', m)
print('[use_t_for_ci=False] CI: ', ci)
m, ci = bootstrap_ci(x, np.mean, num_reps=2*384, alpha=0.05, ci_sides=1, studentized=True)
print('[use_t_for_ci=True] mean: ', m)
print('[use_t_for_ci=True] CI: ', ci)
print('Matlab example:')
# https://de.mathworks.com/help/stats/bootci.htmls
x_matlab = np.random.normal(1, 1, 40)
m, ci = bootstrap_ci(x_matlab, np.mean, num_reps=2000, alpha=0.05, ci_sides=2, studentized=False)
print('[use_t_for_ci=False] mean: ', m)
print('[use_t_for_ci=False] CI: ', ci)
m, ci = bootstrap_ci(x_matlab, np.mean, num_reps=2000, alpha=0.05, ci_sides=2, studentized=True)
print('[use_t_for_ci=True] mean: ', m)
print('[use_t_for_ci=True] CI: ', ci)
def test_param_expl_sampler(default_bob, bob_pert):
# Add randomizer
env = DomainRandWrapperLive(default_bob, bob_pert)
# Use a simple policy
policy = FNNPolicy(env.spec, hidden_sizes=[8], hidden_nonlin=to.tanh)
# Create the sampler
num_rollouts_per_param = 12
sampler = ParameterExplorationSampler(
env,
policy,
num_envs=1,
num_rollouts_per_param=num_rollouts_per_param,
)
# Use some random parameters
num_ps = 12
params = to.rand(num_ps, policy.num_param)
# Do the sampling
samples = sampler.sample(params)
assert num_ps == len(samples)
for ps in samples:
assert len(ps.rollouts) == num_rollouts_per_param
# Compare rollouts that should be matching
for ri in range(num_rollouts_per_param):
# Use the first paramset as pivot
piter = iter(samples)
pivot = next(piter).rollouts[ri]
# Iterate through others
for ops in piter:
ro = ops.rollouts[ri]
# Compare domain params
assert pivot.rollout_info['domain_param'] == ro.rollout_info['domain_param']
# Compare first observation a.k.a. init state
assert pivot[0].observation == pytest.approx(ro[0].observation)
@m_needs_cuda
def test_cuda_sampling_w_dr(default_bob, bob_pert):
# Add randomizer
env = DomainRandWrapperLive(default_bob, bob_pert)
# Use a simple policy
policy = FNNPolicy(env.spec, hidden_sizes=[8], hidden_nonlin=to.tanh, use_cuda=True)
# Create the sampler
sampler = ParallelSampler(env, policy, num_envs=2, min_rollouts=10)
samples = sampler.sample()
assert samples is not None
| 7,262 | 0 | 312 |
b41b891967812c7db46e79974d3319938dfddaf5 | 573 | py | Python | lib/nms_cython/setup.py | PJunhyuk/exercise-pose-guide | a2793ede6b150e5ae20185e14f8b4ad3a08f4196 | [
"Apache-2.0"
] | 161 | 2018-02-22T15:15:47.000Z | 2022-02-10T16:40:06.000Z | Chapter04/lib/nms_cython/setup.py | mayurmorin/Computer-Vision-Projects-with-OpenCV-and-Python-3 | bf9041d2804fd76d6a59a8b6f2feb8d50f80c9d3 | [
"MIT"
] | 15 | 2018-03-01T23:18:00.000Z | 2021-05-15T06:23:15.000Z | Chapter04/lib/nms_cython/setup.py | mayurmorin/Computer-Vision-Projects-with-OpenCV-and-Python-3 | bf9041d2804fd76d6a59a8b6f2feb8d50f80c9d3 | [
"MIT"
] | 41 | 2018-03-01T13:03:54.000Z | 2022-02-17T14:32:22.000Z | from setuptools import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from sys import platform as _platform
import os
import numpy as np
#openmp_arg = '-fopenmp'
#if _platform == "win32":
# openmp_arg = '-openmp'
extensions = [
Extension(
'nms_grid', ['nms_grid.pyx'],
language="c++",
include_dirs=[np.get_include(), '.','include'],
extra_compile_args=['-DILOUSESTL','-DIL_STD','-std=c++11','-O3'],
extra_link_args=['-std=c++11']
)
]
setup(
name = 'nms_grid',
ext_modules = cythonize(extensions)
)
| 22.038462 | 69 | 0.673647 | from setuptools import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from sys import platform as _platform
import os
import numpy as np
#openmp_arg = '-fopenmp'
#if _platform == "win32":
# openmp_arg = '-openmp'
extensions = [
Extension(
'nms_grid', ['nms_grid.pyx'],
language="c++",
include_dirs=[np.get_include(), '.','include'],
extra_compile_args=['-DILOUSESTL','-DIL_STD','-std=c++11','-O3'],
extra_link_args=['-std=c++11']
)
]
setup(
name = 'nms_grid',
ext_modules = cythonize(extensions)
)
| 0 | 0 | 0 |
85fd73311ab29b216fc7b92dd55084fe9aacf91e | 9,815 | py | Python | test/test_union.py | Mortal/scalgoproto | b9acfbdcf7cc75d6b673fde64ecd94dbe56738a8 | [
"MIT"
] | null | null | null | test/test_union.py | Mortal/scalgoproto | b9acfbdcf7cc75d6b673fde64ecd94dbe56738a8 | [
"MIT"
] | 26 | 2018-11-18T19:38:09.000Z | 2020-04-14T03:31:06.000Z | test/test_union.py | Mortal/scalgoproto | b9acfbdcf7cc75d6b673fde64ecd94dbe56738a8 | [
"MIT"
] | 2 | 2019-01-03T16:08:15.000Z | 2019-09-23T05:16:55.000Z | # -*- mode: python: return False tab-width: 4: return False indent-tabs-mode: nil: return False python-indent-offset: 4: return False coding: utf-8 -*-
import sys
import scalgoproto
import union
from test_base import require2, require, read_in, validate_out, get_v, require_some
if __name__ == "__main__":
main()
| 30.576324 | 151 | 0.576872 | # -*- mode: python: return False tab-width: 4: return False indent-tabs-mode: nil: return False python-indent-offset: 4: return False coding: utf-8 -*-
import sys
import scalgoproto
import union
from test_base import require2, require, read_in, validate_out, get_v, require_some
def for_copy() -> union.Table3In:
w = scalgoproto.Writer()
root = w.construct_table(union.Table3Out)
v1 = root.add_v1()
v1.a.v1 = "ctext1"
v1.b.v1 = "ctext2"
v2 = root.add_v2()
v2.a.v2 = b"cbytes1"
v2.b.v2 = b"cbytes2"
v3 = root.add_v3()
v3.a.add_v3().a = 101
v3.b.add_v3().a = 102
v4 = root.add_v4()
v4.a.add_v4().a = 103
v4.b.add_v4().a = 104
v5 = root.add_v5()
v5.a.add_v5(1)[0] = "ctext3"
v5.b.add_v5(1)[0] = "ctext4"
v6 = root.add_v6()
v6.a.add_v6(1)[0] = b"cbytes3"
v6.b.add_v6(1)[0] = b"cbytes4"
v7 = root.add_v7()
v7.a.add_v7(1).add(0).a = 105
v7.b.add_v7(1).add(0).a = 106
v8 = root.add_v8()
v8.a.add_v8(1).add(0).a = 107
v8.b.add_v8(1).add(0).a = 108
v9 = root.add_v9()
v9.a.add_v9(1)[0] = 109
v9.b.add_v9(1)[0] = 110
v10 = root.add_v10()
v10.a.add_v10(1)[0] = True
v10.b.add_v10(1)[0] = True
d = w.finalize(root)
r = scalgoproto.Reader(d)
return r.root(union.Table3In)
def test_out_union(path: str) -> bool:
i = for_copy()
w = scalgoproto.Writer()
root = w.construct_table(union.Table3Out)
v1 = root.add_v1()
v1.a.v1 = "text1"
v1.b.v1 = "text2"
v1.c.v1 = w.construct_text("text3")
v1.d.v1 = i.v1.a.v1
v1.e.v1 = i.v1.b.v1
v2 = root.add_v2()
v2.a.v2 = b"bytes1"
v2.b.v2 = b"bytes2"
v2.c.v2 = w.construct_bytes(b"bytes3")
v2.d.v2 = i.v2.a.v2
v2.e.v2 = i.v2.b.v2
v3 = root.add_v3()
v3.a.add_v3().a = 1
v3.b.add_v3().a = 2
t1 = w.construct_table(union.Table1Out)
t1.a = 3
v3.c.v3 = t1
v3.d.v3 = i.v3.a.v3
v3.e.v3 = i.v3.b.v3
v4 = root.add_v4()
v4.a.add_v4().a = 4
v4.b.add_v4().a = 5
t4 = w.construct_table(union.Union1V4Out)
t4.a = 6
v4.c.v4 = t4
v4.d.v4 = i.v4.a.v4
v4.e.v4 = i.v4.b.v4
v5 = root.add_v5()
v5.a.add_v5(1)[0] = "text4"
v5.b.add_v5(1)[0] = "text5"
t5 = w.construct_text_list(1)
t5[0] = "text6"
v5.c.v5 = t5
v5.d.v5 = i.v5.a.v5
v5.e.v5 = i.v5.b.v5
v6 = root.add_v6()
v6.a.add_v6(1)[0] = b"bytes4"
tt6 = v6.b.add_v6(1)
tt6[0] = w.construct_bytes(b"bytes5")
t6 = w.construct_bytes_list(1)
t6[0] = w.construct_bytes(b"bytes6")
v6.c.v6 = t6
v6.d.v6 = i.v6.a.v6
v6.e.v6 = i.v6.b.v6
v7 = root.add_v7()
v7.a.add_v7(1).add(0).a = 7
v7.b.add_v7(1).add(0).a = 8
t7 = w.construct_table_list(union.Table1Out, 1)
t7.add(0).a = 9
v7.c.v7 = t7
v7.d.v7 = i.v7.a.v7
v7.e.v7 = i.v7.b.v7
v8 = root.add_v8()
v8.a.add_v8(1).add(0).a = 10
v8.b.add_v8(1).add(0).a = 11
t8 = w.construct_table_list(union.Union1V8Out, 1)
t8.add(0).a = 12
v8.c.v8 = t8
v8.d.v8 = i.v8.a.v8
v8.e.v8 = i.v8.b.v8
v9 = root.add_v9()
v9.a.add_v9(1)[0] = 13
v9.b.add_v9(1)[0] = 14
t9 = w.construct_uint32_list(1)
t9[0] = 15
v9.c.v9 = t9
v9.d.v9 = i.v9.a.v9
v9.e.v9 = i.v9.b.v9
v10 = root.add_v10()
v10.a.add_v10(1)[0] = True
v10.b.add_v10(1)[0] = False
t10 = w.construct_bool_list(1)
t10[0] = True
v10.c.v10 = t10
v10.d.v10 = i.v10.a.v10
v10.e.v10 = i.v10.b.v10
data = w.finalize(root)
return validate_out(data, path)
def test_in_union(path: str) -> bool:
o = read_in(path)
r = scalgoproto.Reader(o)
i = r.root(union.Table3In)
print(i)
if require_some(i.v1):
return False
v1 = i.v1
if require2(v1.a is not None and v1.a.is_v1, v1.a.v1, "text1"):
return False
if require2(v1.b is not None and v1.b.is_v1, v1.b.v1, "text2"):
return False
if require2(v1.c is not None and v1.c.is_v1, v1.c.v1, "text3"):
return False
if require2(v1.d is not None and v1.d.is_v1, v1.d.v1, "ctext1"):
return False
if require2(v1.e is not None and v1.e.is_v1, v1.e.v1, "ctext2"):
return False
if require_some(i.v2):
return False
v2 = i.v2
if require2(v2.a is not None and v2.a.is_v2, v2.a.v2, b"bytes1"):
return False
if require2(v2.b is not None and v2.b.is_v2, v2.b.v2, b"bytes2"):
return False
if require2(v2.c is not None and v2.c.is_v2, v2.c.v2, b"bytes3"):
return False
if require2(v2.d is not None and v2.d.is_v2, v2.d.v2, b"cbytes1"):
return False
if require2(v2.e is not None and v2.e.is_v2, v2.e.v2, b"cbytes2"):
return False
if require_some(i.v3):
return False
v3 = i.v3
if require2(v3.a is not None and v3.a.is_v3, v3.a.v3.a, 1):
return False
if require2(v3.b is not None and v3.b.is_v3, v3.b.v3.a, 2):
return False
if require2(v3.c is not None and v3.c.is_v3, v3.c.v3.a, 3):
return False
if require2(v3.d is not None and v3.d.is_v3, v3.d.v3.a, 101):
return False
if require2(v3.e is not None and v3.e.is_v3, v3.e.v3.a, 102):
return False
if require_some(i.v4):
return False
v4 = i.v4
if require2(v4.a is not None and v4.a.is_v4, v4.a.v4.a, 4):
return False
if require2(v4.b is not None and v4.b.is_v4, v4.b.v4.a, 5):
return False
if require2(v4.c is not None and v4.c.is_v4, v4.c.v4.a, 6):
return False
if require2(v4.d is not None and v4.d.is_v4, v4.d.v4.a, 103):
return False
if require2(v4.e is not None and v4.e.is_v4, v4.e.v4.a, 104):
return False
if require_some(i.v5):
return False
v5 = i.v5
if require2(v5.a is not None and v5.a.is_v5 and len(v5.a.v5) == 1, v5.a.v5[0], "text4"):
return False
if require2(v5.b is not None and v5.b.is_v5 and len(v5.b.v5) == 1, v5.b.v5[0], "text5"):
return False
if require2(v5.c is not None and v5.c.is_v5 and len(v5.c.v5) == 1, v5.c.v5[0], "text6"):
return False
if require2(v5.d is not None and v5.d.is_v5 and len(v5.d.v5) == 1, v5.d.v5[0], "ctext3"):
return False
if require2(v5.e is not None and v5.e.is_v5 and len(v5.e.v5) == 1, v5.e.v5[0], "ctext4"):
return False
if require_some(i.v6):
return False
v6 = i.v6
if require2(v6.a is not None and v6.a.is_v6 and len(v6.a.v6) == 1, v6.a.v6[0], b"bytes4"):
return False
if require2(v6.b is not None and v6.b.is_v6 and len(v6.b.v6) == 1, v6.b.v6[0], b"bytes5"):
return False
if require2(v6.c is not None and v6.c.is_v6 and len(v6.c.v6) == 1, v6.c.v6[0], b"bytes6"):
return False
if require2(v6.d is not None and v6.d.is_v6 and len(v6.d.v6) == 1, v6.d.v6[0], b"cbytes3"):
return False
if require2(v6.e is not None and v6.e.is_v6 and len(v6.e.v6) == 1, v6.e.v6[0], b"cbytes4"):
return False
if require_some(i.v7):
return False
v7 = i.v7
if require2(v7.a is not None and v7.a.is_v7 and len(v7.a.v7) == 1, v7.a.v7[0].a, 7):
return False
if require2(v7.b is not None and v7.b.is_v7 and len(v7.b.v7) == 1, v7.b.v7[0].a, 8):
return False
if require2(v7.c is not None and v7.c.is_v7 and len(v7.c.v7) == 1, v7.c.v7[0].a, 9):
return False
if require2(v7.d is not None and v7.d.is_v7 and len(v7.d.v7) == 1, v7.d.v7[0].a, 105):
return False
if require2(v7.e is not None and v7.e.is_v7 and len(v7.e.v7) == 1, v7.e.v7[0].a, 106):
return False
if require_some(i.v8):
return False
v8 = i.v8
if require2(v8.a is not None and v8.a.is_v8 and len(v8.a.v8) == 1, v8.a.v8[0].a, 10):
return False
if require2(v8.b is not None and v8.b.is_v8 and len(v8.b.v8) == 1, v8.b.v8[0].a, 11):
return False
if require2(v8.c is not None and v8.c.is_v8 and len(v8.c.v8) == 1, v8.c.v8[0].a, 12):
return False
if require2(v8.d is not None and v8.d.is_v8 and len(v8.d.v8) == 1, v8.d.v8[0].a, 107):
return False
if require2(v8.e is not None and v8.e.is_v8 and len(v8.e.v8) == 1, v8.e.v8[0].a, 108):
return False
if require_some(i.v9):
return False
v9 = i.v9
if require2(v9.a is not None and v9.a.is_v9 and len(v9.a.v9) == 1, v9.a.v9[0], 13):
return False
if require2(v9.b is not None and v9.b.is_v9 and len(v9.b.v9) == 1, v9.b.v9[0], 14):
return False
if require2(v9.c is not None and v9.c.is_v9 and len(v9.c.v9) == 1, v9.c.v9[0], 15):
return False
if require2(v9.d is not None and v9.d.is_v9 and len(v9.d.v9) == 1, v9.d.v9[0], 109):
return False
if require2(v9.e is not None and v9.e.is_v9 and len(v9.e.v9) == 1, v9.e.v9[0], 110):
return False
if require_some(i.v10):
return False
v10 = i.v10
if require2(v10.a is not None and v10.a.is_v10 and len(v10.a.v10) == 1, v10.a.v10[0], True):
return False
if require2(
v10.b is not None and v10.b.is_v10 and len(v10.b.v10) == 1, v10.b.v10[0], False
):
return False
if require2(v10.c is not None and v10.c.is_v10 and len(v10.c.v10) == 1, v10.c.v10[0], True):
return False
if require2(v10.d is not None and v10.d.is_v10 and len(v10.d.v10) == 1, v10.d.v10[0], True):
return False
if require2(v10.e is not None and v10.e.is_v10 and len(v10.e.v10) == 1, v10.e.v10[0], True):
return False
return True
def main() -> None:
ans = False
test = sys.argv[1]
path = sys.argv[2]
if test == "out_union":
ans = test_out_union(path)
elif test == "in_union":
ans = test_in_union(path)
if not ans:
sys.exit(1)
if __name__ == "__main__":
main()
| 9,400 | 0 | 92 |
40e41cf6a305104b3eb63bfef1e1c24cdb5f902e | 2,278 | py | Python | python_modules/dagster-pandas/dagster_pandas_tests/test_dagstermill_pandas_solids.py | vishvananda/dagster | f6aa44714246bc770fe05a9c986fe8b7d848956b | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-pandas/dagster_pandas_tests/test_dagstermill_pandas_solids.py | vishvananda/dagster | f6aa44714246bc770fe05a9c986fe8b7d848956b | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-pandas/dagster_pandas_tests/test_dagstermill_pandas_solids.py | vishvananda/dagster | f6aa44714246bc770fe05a9c986fe8b7d848956b | [
"Apache-2.0"
] | null | null | null | import sys
import pandas as pd
import pytest
from dagster import execute_pipeline
from dagster.utils import script_relative_path
from dagster_pandas.examples import (
define_pandas_papermill_pandas_hello_world_pipeline,
define_papermill_pandas_hello_world_pipeline,
)
@pytest.mark.skip('Must ship over run id to notebook process')
@notebook_test
@notebook_test
| 34 | 97 | 0.668569 | import sys
import pandas as pd
import pytest
from dagster import execute_pipeline
from dagster.utils import script_relative_path
from dagster_pandas.examples import (
define_pandas_papermill_pandas_hello_world_pipeline,
define_papermill_pandas_hello_world_pipeline,
)
def notebook_test(f):
# mark this with the "notebook_test" tag so that they can be all be skipped
# (for performance reasons) and mark them as python3 only
return pytest.mark.notebook_test(
pytest.mark.skipif(
sys.version_info < (3, 5),
reason='''Notebooks execute in their own process and hardcode what "kernel" they use.
All of the development notebooks currently use the python3 "kernel" so they will
not be executable in a container that only have python2.7 (e.g. in CircleCI)
''',
)(f)
)
@pytest.mark.skip('Must ship over run id to notebook process')
@notebook_test
def test_pandas_papermill_pandas_hello_world_pipeline():
pipeline = define_pandas_papermill_pandas_hello_world_pipeline()
pipeline_result = execute_pipeline(
pipeline,
{
'solids': {
'pandas_input_transform_test': {
'inputs': {'df': {'csv': {'path': script_relative_path('num.csv')}}}
}
}
},
)
in_df = pd.DataFrame({'num': [3, 5, 7]})
solid_result = pipeline_result.result_for_solid('pandas_input_transform_test')
expected_sum_result = ((in_df + 1)['num']).sum()
sum_result = solid_result.transformed_value()
assert sum_result == expected_sum_result
@notebook_test
def test_papermill_pandas_hello_world_pipeline():
pipeline = define_papermill_pandas_hello_world_pipeline()
pipeline_result = execute_pipeline(
pipeline,
{
'solids': {
'papermill_pandas_hello_world': {
'inputs': {'df': {'csv': {'path': script_relative_path('num_prod.csv')}}}
}
}
},
)
assert pipeline_result.success
solid_result = pipeline_result.result_for_solid('papermill_pandas_hello_world')
expected = pd.read_csv(script_relative_path('num_prod.csv')) + 1
assert solid_result.transformed_value().equals(expected)
| 1,834 | 0 | 67 |
4f006e344b7da6ee2a5647dd3a58fe2711c71e5d | 2,011 | py | Python | createdb.py | rakeshr4/Travelogue | 4f8c1506fe6a5a6ac5229db5c137235efd7b01c6 | [
"MIT"
] | null | null | null | createdb.py | rakeshr4/Travelogue | 4f8c1506fe6a5a6ac5229db5c137235efd7b01c6 | [
"MIT"
] | null | null | null | createdb.py | rakeshr4/Travelogue | 4f8c1506fe6a5a6ac5229db5c137235efd7b01c6 | [
"MIT"
] | null | null | null | import csv | 37.943396 | 221 | 0.655893 | import csv
def addData(db):
addUsers(db)
addGuides(db)
addInterests(db)
userInterests(db)
guideInterests(db)
addEvents(db)
def addUsers(db):
with open('models/user.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
db.execute('insert into users (id, firstname, lastname, email, password) values (?, ?, ?, ?, ?)', [row[0], row[1], row[2], row[3], row[4]])
db.commit()
def addGuides(db):
with open('models/guide.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
db.execute('insert into guides (id, firstname, lastname, email, contact, password, address, charge, rating) values (?, ?, ?, ?, ?, ?, ?, ?, ?)', [row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]])
db.commit()
def addInterests(db):
with open('models/interests.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
db.execute('insert into interests (id, name) values (?, ?)', [row[0], row[1]])
db.commit()
def userInterests(db):
with open('models/user_interests.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
db.execute('insert into user_interests (user_id, interest_id) values (?, ?)', [row[0], row[1]])
db.commit()
def guideInterests(db):
with open('models/guide_interests.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
db.execute('insert into guide_interests (guide_id, interest_id) values (?, ?)', [row[0], row[1]])
db.commit()
def addEvents(db):
with open('models/event.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
address = row[3]
location = address.split(':')[1]
db.execute('insert into events (id, description, interest_id, location, guide_id, start_time, end_time) values (?, ?, ?, ?, ?, ?, ?)', [row[0], row[1], row[2], location, row[4], row[5], row[6]])
db.commit() | 1,840 | 0 | 161 |
77ed1d01a8b54b550a0d5ac17cedf2c08e1e0481 | 6,880 | py | Python | rrc scripts/scripts/networkRGAN.py | wq13552463699/Real_Robot_Challenge_Phase2_AE_attemp | 280736589077a2179254099ddaf2327752d9321c | [
"MIT"
] | 1 | 2021-11-02T10:48:55.000Z | 2021-11-02T10:48:55.000Z | rrc scripts/scripts/networkRGAN.py | wq13552463699/Real_Robot_Challenge_Phase2_AE_attemp | 280736589077a2179254099ddaf2327752d9321c | [
"MIT"
] | null | null | null | rrc scripts/scripts/networkRGAN.py | wq13552463699/Real_Robot_Challenge_Phase2_AE_attemp | 280736589077a2179254099ddaf2327752d9321c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 05:31:02 2021
@author: 14488
"""
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
from torch.autograd import Variable
import torch
import rrc_example_package.scripts.convolutional_rnn
from torch.nn.utils.rnn import pack_padded_sequence
asize = 1
''' Generator network for 128x128 RGB images '''
''' Discriminator network for 128x128 RGB images '''
# class CRNN(nn.Module):
# def __init__(self):
# super(CRNN, self).__init__()
# self.main = convolutional_rnn.Conv2dLSTM(in_channels=in_channels, # Corresponds to input size
# out_channels=5, # Corresponds to hidden size
# kernel_size=3, # Int or List[int]
# num_layers=2,
# bidirectional=True,
# dilation=2, stride=2, dropout=0.5,
# batch_first=True)
| 32 | 105 | 0.461483 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 05:31:02 2021
@author: 14488
"""
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
from torch.autograd import Variable
import torch
import rrc_example_package.scripts.convolutional_rnn
from torch.nn.utils.rnn import pack_padded_sequence
asize = 1
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
''' Generator network for 128x128 RGB images '''
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.main = nn.Sequential(
# Input HxW = 128x128
nn.Conv2d(3, 16, 4, 2, 0), # Output HxW = 134
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(16, 32, 4, 2, 0), # Output HxW = 66
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 0), # Output HxW = 32
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.Conv2d(64, 128, 4, 2, 1), # Output HxW = 16
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.Conv2d(128, 256, 4, 2, 1), # Output HxW = 8
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.Conv2d(256, 512, 4, 2, 1), # Output HxW = 4
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.Conv2d(512, 1024, 4, 2, 1), # Output HxW = 2
nn.MaxPool2d((2,2)),
# At this point, we arrive at our low D representation vector, which is 512 dimensional.
)
def forward(self, input):
output = self.main(input)
return output
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.main = nn.Sequential(
nn.ConvTranspose2d(1024,512, 4, 1, 0, bias = False), # Output HxW = 4x4
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias = False), # Output HxW = 8x8
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias = False), # Output HxW = 16x16
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias = False), # Output HxW = 32x32
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 0, bias = False), # Output HxW = 66
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.ConvTranspose2d(32, 16, 4, 2, 0, bias = False), # Output HxW = 134
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.ConvTranspose2d(16, 3, 4, 2, 0, bias = False), # Output HxW = 270
nn.Tanh()
)
def forward(self, input):
output = self.main(input)
return output
class Rnn(nn.Module):
def __init__(self):
super(Rnn, self).__init__()
self.main = nn.GRU(1024, 1024, 1)
def forward(self, input,hx):
input = input.view(1,1, 1024)
output,hn = self.main(input,hx)
output = output.view(1, 1024, 1, 1)
return output,hn
''' Discriminator network for 128x128 RGB images '''
# class CRNN(nn.Module):
# def __init__(self):
# super(CRNN, self).__init__()
# self.main = convolutional_rnn.Conv2dLSTM(in_channels=in_channels, # Corresponds to input size
# out_channels=5, # Corresponds to hidden size
# kernel_size=3, # Int or List[int]
# num_layers=2,
# bidirectional=True,
# dilation=2, stride=2, dropout=0.5,
# batch_first=True)
class Dis(nn.Module):
def __init__(self):
super(Dis, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(3, 16, 4, 2, 0),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(16, 32, 4, 2, 0),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(32, 64, 4, 2, 0),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(64, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(128, 256, 4, 2, 1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(256, 512, 4, 2, 1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(512, 1024, 4, 2, 1),
nn.BatchNorm2d(1024),
nn.LeakyReLU(0.2, inplace = True),
nn.Conv2d(1024, 1, 4, 2, 1, bias = False),
nn.Sigmoid()
)
def forward(self, input):
output = self.main(input)
return output.view(-1)
class Dec(nn.Module):
def __init__(self):
super(Dec, self).__init__()
self.main = nn.Linear(1024, 512)
def forward(self, input):
input = input.view(-1, 1024)
output = self.main(input)
return output
class Inc(nn.Module):
def __init__(self):
super(Inc, self).__init__()
self.main = nn.Linear(512, 1024)
def forward(self, input):
output = self.main(input)
output = output.view(1, 1024, 1, 1)
return output
class Dec2(nn.Module):
def __init__(self):
super(Dec2, self).__init__()
self.main = nn.Linear(256, 128)
def forward(self, input):
output = self.main(input)
return output
class Inc2(nn.Module):
def __init__(self):
super(Inc2, self).__init__()
self.main = nn.Linear(128, 256)
def forward(self, input):
output = self.main(input)
return output
| 5,007 | 10 | 763 |
f7a85f4ba484b62374b57b5478bfaa096d4ed62c | 1,021 | py | Python | Python/text_to_speech.py | BlackTimber-Labs/DemoPr | 6495b5307323ce17be5071006f1de90a1120edf4 | [
"MIT"
] | 10 | 2021-10-01T15:11:27.000Z | 2021-10-03T10:41:36.000Z | Python/text_to_speech.py | BlackTimber-Labs/DemoPr | 6495b5307323ce17be5071006f1de90a1120edf4 | [
"MIT"
] | 102 | 2021-10-01T14:49:50.000Z | 2021-10-31T17:30:15.000Z | Python/text_to_speech.py | BlackTimber-Labs/DemoPr | 6495b5307323ce17be5071006f1de90a1120edf4 | [
"MIT"
] | 88 | 2021-10-01T14:28:10.000Z | 2021-10-31T12:02:42.000Z | # Import the gTTS module for text
# to speech conversion
from gtts import gTTS
# This module is imported so that we can
# play the converted audio
from playsound import playsound
# It is a text value that we want to convert to audio
text_val = 'Welcome to hacktoberfest 21.Hacktoberfest, in its 8th year, is a month-long celebration of open source software run by DigitalOcean. During the month of October, we invite you to join open-source software enthusiasts, beginners, and the developer community by contributing to open-source projects. '
# Here are converting in English Language
language = 'en'
# Passing the text and language to the engine,
# here we have assign slow=False. Which denotes
# the module that the transformed audio should
# have a high speed
obj = gTTS(text=text_val, lang=language, slow=False)
#Here we are saving the transformed audio in a mp3 file name
obj.save("hactoberfest21.mp3")
# Play the .mp3 file
playsound("hactoberfest21.mp3")
| 37.814815 | 313 | 0.740451 | # Import the gTTS module for text
# to speech conversion
from gtts import gTTS
# This module is imported so that we can
# play the converted audio
from playsound import playsound
# It is a text value that we want to convert to audio
text_val = 'Welcome to hacktoberfest 21.Hacktoberfest, in its 8th year, is a month-long celebration of open source software run by DigitalOcean. During the month of October, we invite you to join open-source software enthusiasts, beginners, and the developer community by contributing to open-source projects. '
# Here are converting in English Language
language = 'en'
# Passing the text and language to the engine,
# here we have assign slow=False. Which denotes
# the module that the transformed audio should
# have a high speed
obj = gTTS(text=text_val, lang=language, slow=False)
#Here we are saving the transformed audio in a mp3 file name
obj.save("hactoberfest21.mp3")
# Play the .mp3 file
playsound("hactoberfest21.mp3")
| 0 | 0 | 0 |
f224bda332d1ba774feb2b9787bb81d6d7f8b0a1 | 384 | py | Python | pytmc/__init__.py | jsheppard95/pytmc | d9383d104393d67df54f5c43cb6a2d552405d5f8 | [
"BSD-3-Clause-LBNL"
] | null | null | null | pytmc/__init__.py | jsheppard95/pytmc | d9383d104393d67df54f5c43cb6a2d552405d5f8 | [
"BSD-3-Clause-LBNL"
] | null | null | null | pytmc/__init__.py | jsheppard95/pytmc | d9383d104393d67df54f5c43cb6a2d552405d5f8 | [
"BSD-3-Clause-LBNL"
] | null | null | null | import logging
from ._version import get_versions # noqa
from .xml_obj import Symbol, DataType, SubItem # noqa
from .xml_collector import TmcFile # noqa
from . import epics # noqa
logger = logging.getLogger(__name__)
__version__ = get_versions()['version']
del get_versions
__all__ = [
'DataType',
'SubItem',
'Symbol',
'TmcFile',
'epics',
'logger',
]
| 17.454545 | 54 | 0.684896 | import logging
from ._version import get_versions # noqa
from .xml_obj import Symbol, DataType, SubItem # noqa
from .xml_collector import TmcFile # noqa
from . import epics # noqa
logger = logging.getLogger(__name__)
__version__ = get_versions()['version']
del get_versions
__all__ = [
'DataType',
'SubItem',
'Symbol',
'TmcFile',
'epics',
'logger',
]
| 0 | 0 | 0 |
9d7b0b014026c589a321d25b4e6588f897fcd81c | 405 | py | Python | app/schemas/pokemon.py | dmontag23/pokedex-api | b16b25493a08698f617b8afa2bd4f14b2bfc21e6 | [
"MIT"
] | null | null | null | app/schemas/pokemon.py | dmontag23/pokedex-api | b16b25493a08698f617b8afa2bd4f14b2bfc21e6 | [
"MIT"
] | null | null | null | app/schemas/pokemon.py | dmontag23/pokedex-api | b16b25493a08698f617b8afa2bd4f14b2bfc21e6 | [
"MIT"
] | null | null | null | from pydantic import BaseModel
| 21.315789 | 59 | 0.493827 | from pydantic import BaseModel
class Pokemon(BaseModel):
name: str
description: str
habitat: str
isLegendary: str
class Config:
schema_extra = {
"example": {
"name": "mewtwo",
"description": "I am mewtwo hear me roar!",
"habitat": "Someone's mancave",
"isLegendary": "true"
}
}
| 0 | 350 | 23 |
c065834fb36e57bb45b56bc2d41ed8afa0ca7ba6 | 15,194 | py | Python | tests/test_plot.py | chrisburr/hist | d10132ab8d03f41152f0b934a18291ce699453b2 | [
"BSD-3-Clause"
] | null | null | null | tests/test_plot.py | chrisburr/hist | d10132ab8d03f41152f0b934a18291ce699453b2 | [
"BSD-3-Clause"
] | null | null | null | tests/test_plot.py | chrisburr/hist | d10132ab8d03f41152f0b934a18291ce699453b2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from hist import Hist, NamedHist, axis
import pytest
import numpy as np
unp = pytest.importorskip("uncertainties.unumpy")
plt = pytest.importorskip("matplotlib.pyplot")
def test_general_plot1d():
"""
Test general plot1d -- whether 1d-Hist can be plotted properly.
"""
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10))
assert h.plot1d(color="green", ls="--", lw=3)
plt.close("all")
# dimension error
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
with pytest.raises(Exception):
h.plot1d()
# wrong kwargs names
with pytest.raises(Exception):
h.project("A").plot1d(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.project("B").plot1d(ls="red")
plt.close("all")
def test_general_plot2d():
"""
Test general plot2d -- whether 2d-Hist can be plotted properly.
"""
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
assert h.plot2d(cmap="cividis")
# dimension error
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
with pytest.raises(Exception):
h.project("A").plot2d()
# wrong kwargs names
with pytest.raises(Exception):
h.plot2d(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.plot2d(cmap=0.1)
plt.close("all")
def test_general_plot2d_full():
"""
Test general plot2d_full -- whether 2d-Hist can be fully plotted properly.
"""
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
assert h.plot2d_full(
main_cmap="cividis",
top_ls="--",
top_color="orange",
top_lw=2,
side_ls="-.",
side_lw=1,
side_color="steelblue",
)
# dimension error
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
with pytest.raises(Exception):
h.project("A").plot2d_full()
# wrong kwargs names
with pytest.raises(Exception):
h.plot2d_full(abc="red")
with pytest.raises(Exception):
h.plot2d_full(color="red")
# wrong kwargs type
with pytest.raises(Exception):
h.plot2d_full(main_cmap=0.1, side_lw="autumn")
plt.close("all")
def test_general_plot():
"""
Test general plot -- whether Hist can be plotted properly.
"""
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10))
assert h.plot(color="green", ls="--", lw=3)
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
assert h.plot(cmap="cividis")
# dimension error
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="C", label="c [units]", underflow=False, overflow=False
),
).fill(
np.random.normal(size=10), np.random.normal(size=10), np.random.normal(size=10)
)
with pytest.raises(Exception):
h.plot()
# wrong kwargs names
with pytest.raises(Exception):
h.project("A").plot(abc="red")
with pytest.raises(Exception):
h.project("A", "C").plot(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.project("B").plot(ls="red")
with pytest.raises(Exception):
h.project("A", "C").plot(cmap=0.1)
plt.close("all")
def test_general_plot_pull():
"""
Test general plot_pull -- whether 1d-Hist can be plotted pull properly.
"""
h = Hist(
axis.Regular(
50, -4, 4, name="S", label="s [units]", underflow=False, overflow=False
)
).fill(np.random.normal(size=10))
assert h.plot_pull(
pdf,
eb_ecolor="crimson",
eb_mfc="crimson",
eb_mec="crimson",
eb_fmt="o",
eb_ms=6,
eb_capsize=1,
eb_capthick=2,
eb_alpha=0.8,
fp_c="chocolate",
fp_ls="-",
fp_lw=3,
fp_alpha=1.0,
bar_fc="orange",
pp_num=6,
pp_fc="orange",
pp_alpha=0.618,
pp_ec=None,
)
# dimension error
hh = Hist(
axis.Regular(
50, -4, 4, name="X", label="s [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="Y", label="s [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
with pytest.raises(Exception):
hh.plot_pull(pdf)
# not callable
with pytest.raises(Exception):
h.plot_pull("1")
with pytest.raises(Exception):
h.plot_pull(1)
with pytest.raises(Exception):
h.plot_pull(0.1)
with pytest.raises(Exception):
h.plot_pull((1, 2))
with pytest.raises(Exception):
h.plot_pull([1, 2])
with pytest.raises(Exception):
h.plot_pull({"a": 1})
# wrong kwargs names
with pytest.raises(Exception):
h.plot_pull(pdf, abc="crimson", xyz="crimson")
with pytest.raises(Exception):
h.plot_pull(pdf, ecolor="crimson", mfc="crimson")
# not disabled params
h.plot_pull(pdf, eb_label="value")
h.plot_pull(pdf, fp_label="value")
h.plot_pull(pdf, ub_label="value")
h.plot_pull(pdf, bar_label="value")
h.plot_pull(pdf, pp_label="value")
# disabled params
with pytest.raises(Exception):
h.plot_pull(pdf, bar_width="value")
# wrong kwargs types
with pytest.raises(Exception):
h.plot_pull(pdf, eb_ecolor=1.0, eb_mfc=1.0) # kwargs should be str
plt.close("all")
def test_named_plot1d():
"""
Test named plot1d -- whether 1d-NamedHist can be plotted properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
).fill(A=np.random.normal(size=10))
assert h.plot1d(color="green", ls="--", lw=3)
plt.close("all")
# dimension error
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
with pytest.raises(Exception):
h.plot1d()
# wrong kwargs names
with pytest.raises(Exception):
h.project("A").plot1d(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.project("B").plot1d(ls="red")
plt.close("all")
def test_named_plot2d():
"""
Test named plot2d -- whether 2d-NamedHist can be plotted properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
assert h.plot2d(cmap="cividis")
plt.close("all")
# dimension error
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
with pytest.raises(Exception):
h.project("A").plot2d()
# wrong kwargs names
with pytest.raises(Exception):
h.plot2d(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.plot2d(cmap=0.1)
plt.close("all")
def test_named_plot2d_full():
"""
Test named plot2d_full -- whether 2d-NamedHist can be fully plotted properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
assert h.plot2d_full(
main_cmap="cividis",
top_ls="--",
top_color="orange",
top_lw=2,
side_ls="-.",
side_lw=1,
side_color="steelblue",
)
plt.close("all")
# dimension error
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
with pytest.raises(Exception):
h.project("A").plot2d_full()
# wrong kwargs names
with pytest.raises(Exception):
h.plot2d_full(abc="red")
with pytest.raises(Exception):
h.plot2d_full(color="red")
# wrong kwargs type
with pytest.raises(Exception):
h.plot2d_full(main_cmap=0.1, side_lw="autumn")
plt.close("all")
def test_named_plot():
"""
Test named plot -- whether NamedHist can be plotted properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
).fill(A=np.random.normal(size=10))
assert h.plot(color="green", ls="--", lw=3)
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
assert h.plot(cmap="cividis")
plt.close("all")
# dimension error
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="C", label="c [units]", underflow=False, overflow=False
),
).fill(
A=np.random.normal(size=10),
B=np.random.normal(size=10),
C=np.random.normal(size=10),
)
with pytest.raises(Exception):
h.plot()
# wrong kwargs names
with pytest.raises(Exception):
h.project("A").plot(abc="red")
with pytest.raises(Exception):
h.project("A", "C").plot(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.project("B").plot(ls="red")
with pytest.raises(Exception):
h.project("A", "C").plot(cmap=0.1)
plt.close("all")
def test_named_plot_pull():
"""
Test named plot_pull -- whether 1d-NamedHist can be plotted pull properly.
"""
h = NamedHist(
axis.Regular(
50, -4, 4, name="S", label="s [units]", underflow=False, overflow=False
)
).fill(S=np.random.normal(size=10))
assert h.plot_pull(
pdf,
eb_ecolor="crimson",
eb_mfc="crimson",
eb_mec="crimson",
eb_fmt="o",
eb_ms=6,
eb_capsize=1,
eb_capthick=2,
eb_alpha=0.8,
fp_c="chocolate",
fp_ls="-",
fp_lw=3,
fp_alpha=1.0,
bar_fc="orange",
pp_num=6,
pp_fc="orange",
pp_alpha=0.618,
pp_ec=None,
)
# dimension error
hh = NamedHist(
axis.Regular(
50, -4, 4, name="X", label="s [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="Y", label="s [units]", underflow=False, overflow=False
),
).fill(X=np.random.normal(size=10), Y=np.random.normal(size=10))
with pytest.raises(Exception):
hh.plot_pull(pdf)
# not callable
with pytest.raises(Exception):
h.plot_pull("1")
with pytest.raises(Exception):
h.plot_pull(1)
with pytest.raises(Exception):
h.plot_pull(0.1)
with pytest.raises(Exception):
h.plot_pull((1, 2))
with pytest.raises(Exception):
h.plot_pull([1, 2])
with pytest.raises(Exception):
h.plot_pull({"a": 1})
plt.close("all")
# wrong kwargs names
with pytest.raises(Exception):
h.plot_pull(pdf, abc="crimson", xyz="crimson")
with pytest.raises(Exception):
h.plot_pull(pdf, ecolor="crimson", mfc="crimson")
# not disabled params
h.plot_pull(pdf, eb_label="value")
h.plot_pull(pdf, fp_label="value")
h.plot_pull(pdf, ub_label="value")
h.plot_pull(pdf, bar_label="value")
h.plot_pull(pdf, pp_label="value")
# disabled params
with pytest.raises(Exception):
h.plot_pull(pdf, bar_width="value")
# wrong kwargs types
with pytest.raises(Exception):
h.plot_pull(pdf, eb_ecolor=1.0, eb_mfc=1.0) # kwargs should be str
plt.close("all")
| 25.97265 | 87 | 0.561735 | # -*- coding: utf-8 -*-
from hist import Hist, NamedHist, axis
import pytest
import numpy as np
unp = pytest.importorskip("uncertainties.unumpy")
plt = pytest.importorskip("matplotlib.pyplot")
def test_general_plot1d():
"""
Test general plot1d -- whether 1d-Hist can be plotted properly.
"""
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10))
assert h.plot1d(color="green", ls="--", lw=3)
plt.close("all")
# dimension error
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
with pytest.raises(Exception):
h.plot1d()
# wrong kwargs names
with pytest.raises(Exception):
h.project("A").plot1d(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.project("B").plot1d(ls="red")
plt.close("all")
def test_general_plot2d():
"""
Test general plot2d -- whether 2d-Hist can be plotted properly.
"""
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
assert h.plot2d(cmap="cividis")
# dimension error
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
with pytest.raises(Exception):
h.project("A").plot2d()
# wrong kwargs names
with pytest.raises(Exception):
h.plot2d(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.plot2d(cmap=0.1)
plt.close("all")
def test_general_plot2d_full():
"""
Test general plot2d_full -- whether 2d-Hist can be fully plotted properly.
"""
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
assert h.plot2d_full(
main_cmap="cividis",
top_ls="--",
top_color="orange",
top_lw=2,
side_ls="-.",
side_lw=1,
side_color="steelblue",
)
# dimension error
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
with pytest.raises(Exception):
h.project("A").plot2d_full()
# wrong kwargs names
with pytest.raises(Exception):
h.plot2d_full(abc="red")
with pytest.raises(Exception):
h.plot2d_full(color="red")
# wrong kwargs type
with pytest.raises(Exception):
h.plot2d_full(main_cmap=0.1, side_lw="autumn")
plt.close("all")
def test_general_plot():
"""
Test general plot -- whether Hist can be plotted properly.
"""
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10))
assert h.plot(color="green", ls="--", lw=3)
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
assert h.plot(cmap="cividis")
# dimension error
h = Hist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="C", label="c [units]", underflow=False, overflow=False
),
).fill(
np.random.normal(size=10), np.random.normal(size=10), np.random.normal(size=10)
)
with pytest.raises(Exception):
h.plot()
# wrong kwargs names
with pytest.raises(Exception):
h.project("A").plot(abc="red")
with pytest.raises(Exception):
h.project("A", "C").plot(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.project("B").plot(ls="red")
with pytest.raises(Exception):
h.project("A", "C").plot(cmap=0.1)
plt.close("all")
def test_general_plot_pull():
"""
Test general plot_pull -- whether 1d-Hist can be plotted pull properly.
"""
h = Hist(
axis.Regular(
50, -4, 4, name="S", label="s [units]", underflow=False, overflow=False
)
).fill(np.random.normal(size=10))
def pdf(x, a=1 / np.sqrt(2 * np.pi), x0=0, sigma=1, offset=0):
exp = unp.exp if a.dtype == np.dtype("O") else np.exp
return a * exp(-((x - x0) ** 2) / (2 * sigma ** 2)) + offset
assert h.plot_pull(
pdf,
eb_ecolor="crimson",
eb_mfc="crimson",
eb_mec="crimson",
eb_fmt="o",
eb_ms=6,
eb_capsize=1,
eb_capthick=2,
eb_alpha=0.8,
fp_c="chocolate",
fp_ls="-",
fp_lw=3,
fp_alpha=1.0,
bar_fc="orange",
pp_num=6,
pp_fc="orange",
pp_alpha=0.618,
pp_ec=None,
)
# dimension error
hh = Hist(
axis.Regular(
50, -4, 4, name="X", label="s [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="Y", label="s [units]", underflow=False, overflow=False
),
).fill(np.random.normal(size=10), np.random.normal(size=10))
with pytest.raises(Exception):
hh.plot_pull(pdf)
# not callable
with pytest.raises(Exception):
h.plot_pull("1")
with pytest.raises(Exception):
h.plot_pull(1)
with pytest.raises(Exception):
h.plot_pull(0.1)
with pytest.raises(Exception):
h.plot_pull((1, 2))
with pytest.raises(Exception):
h.plot_pull([1, 2])
with pytest.raises(Exception):
h.plot_pull({"a": 1})
# wrong kwargs names
with pytest.raises(Exception):
h.plot_pull(pdf, abc="crimson", xyz="crimson")
with pytest.raises(Exception):
h.plot_pull(pdf, ecolor="crimson", mfc="crimson")
# not disabled params
h.plot_pull(pdf, eb_label="value")
h.plot_pull(pdf, fp_label="value")
h.plot_pull(pdf, ub_label="value")
h.plot_pull(pdf, bar_label="value")
h.plot_pull(pdf, pp_label="value")
# disabled params
with pytest.raises(Exception):
h.plot_pull(pdf, bar_width="value")
# wrong kwargs types
with pytest.raises(Exception):
h.plot_pull(pdf, eb_ecolor=1.0, eb_mfc=1.0) # kwargs should be str
plt.close("all")
def test_named_plot1d():
"""
Test named plot1d -- whether 1d-NamedHist can be plotted properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
).fill(A=np.random.normal(size=10))
assert h.plot1d(color="green", ls="--", lw=3)
plt.close("all")
# dimension error
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
with pytest.raises(Exception):
h.plot1d()
# wrong kwargs names
with pytest.raises(Exception):
h.project("A").plot1d(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.project("B").plot1d(ls="red")
plt.close("all")
def test_named_plot2d():
"""
Test named plot2d -- whether 2d-NamedHist can be plotted properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
assert h.plot2d(cmap="cividis")
plt.close("all")
# dimension error
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
with pytest.raises(Exception):
h.project("A").plot2d()
# wrong kwargs names
with pytest.raises(Exception):
h.plot2d(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.plot2d(cmap=0.1)
plt.close("all")
def test_named_plot2d_full():
"""
Test named plot2d_full -- whether 2d-NamedHist can be fully plotted properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
assert h.plot2d_full(
main_cmap="cividis",
top_ls="--",
top_color="orange",
top_lw=2,
side_ls="-.",
side_lw=1,
side_color="steelblue",
)
plt.close("all")
# dimension error
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
with pytest.raises(Exception):
h.project("A").plot2d_full()
# wrong kwargs names
with pytest.raises(Exception):
h.plot2d_full(abc="red")
with pytest.raises(Exception):
h.plot2d_full(color="red")
# wrong kwargs type
with pytest.raises(Exception):
h.plot2d_full(main_cmap=0.1, side_lw="autumn")
plt.close("all")
def test_named_plot():
"""
Test named plot -- whether NamedHist can be plotted properly.
"""
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
).fill(A=np.random.normal(size=10))
assert h.plot(color="green", ls="--", lw=3)
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
).fill(B=np.random.normal(size=10), A=np.random.normal(size=10))
assert h.plot(cmap="cividis")
plt.close("all")
# dimension error
h = NamedHist(
axis.Regular(
50, -5, 5, name="A", label="a [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="B", label="b [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="C", label="c [units]", underflow=False, overflow=False
),
).fill(
A=np.random.normal(size=10),
B=np.random.normal(size=10),
C=np.random.normal(size=10),
)
with pytest.raises(Exception):
h.plot()
# wrong kwargs names
with pytest.raises(Exception):
h.project("A").plot(abc="red")
with pytest.raises(Exception):
h.project("A", "C").plot(abc="red")
# wrong kwargs type
with pytest.raises(Exception):
h.project("B").plot(ls="red")
with pytest.raises(Exception):
h.project("A", "C").plot(cmap=0.1)
plt.close("all")
def test_named_plot_pull():
"""
Test named plot_pull -- whether 1d-NamedHist can be plotted pull properly.
"""
h = NamedHist(
axis.Regular(
50, -4, 4, name="S", label="s [units]", underflow=False, overflow=False
)
).fill(S=np.random.normal(size=10))
def pdf(x, a=1 / np.sqrt(2 * np.pi), x0=0, sigma=1, offset=0):
exp = unp.exp if a.dtype == np.dtype("O") else np.exp
return a * exp(-((x - x0) ** 2) / (2 * sigma ** 2)) + offset
assert h.plot_pull(
pdf,
eb_ecolor="crimson",
eb_mfc="crimson",
eb_mec="crimson",
eb_fmt="o",
eb_ms=6,
eb_capsize=1,
eb_capthick=2,
eb_alpha=0.8,
fp_c="chocolate",
fp_ls="-",
fp_lw=3,
fp_alpha=1.0,
bar_fc="orange",
pp_num=6,
pp_fc="orange",
pp_alpha=0.618,
pp_ec=None,
)
# dimension error
hh = NamedHist(
axis.Regular(
50, -4, 4, name="X", label="s [units]", underflow=False, overflow=False
),
axis.Regular(
50, -4, 4, name="Y", label="s [units]", underflow=False, overflow=False
),
).fill(X=np.random.normal(size=10), Y=np.random.normal(size=10))
with pytest.raises(Exception):
hh.plot_pull(pdf)
# not callable
with pytest.raises(Exception):
h.plot_pull("1")
with pytest.raises(Exception):
h.plot_pull(1)
with pytest.raises(Exception):
h.plot_pull(0.1)
with pytest.raises(Exception):
h.plot_pull((1, 2))
with pytest.raises(Exception):
h.plot_pull([1, 2])
with pytest.raises(Exception):
h.plot_pull({"a": 1})
plt.close("all")
# wrong kwargs names
with pytest.raises(Exception):
h.plot_pull(pdf, abc="crimson", xyz="crimson")
with pytest.raises(Exception):
h.plot_pull(pdf, ecolor="crimson", mfc="crimson")
# not disabled params
h.plot_pull(pdf, eb_label="value")
h.plot_pull(pdf, fp_label="value")
h.plot_pull(pdf, ub_label="value")
h.plot_pull(pdf, bar_label="value")
h.plot_pull(pdf, pp_label="value")
# disabled params
with pytest.raises(Exception):
h.plot_pull(pdf, bar_width="value")
# wrong kwargs types
with pytest.raises(Exception):
h.plot_pull(pdf, eb_ecolor=1.0, eb_mfc=1.0) # kwargs should be str
plt.close("all")
| 344 | 0 | 54 |
6a2d2f548e7bfb36954f0ec0ef6f275ccfc9aa16 | 1,966 | py | Python | resources/PTZgrid/calcInitialCond.py | sebalander/sebaPhD | 0260094bd5143843ef372ce52aceb568834f90f4 | [
"BSD-3-Clause"
] | 6 | 2017-10-03T15:10:14.000Z | 2020-08-06T06:39:14.000Z | resources/PTZgrid/calcInitialCond.py | sebalander/sebaPhD | 0260094bd5143843ef372ce52aceb568834f90f4 | [
"BSD-3-Clause"
] | 1 | 2017-02-09T21:13:13.000Z | 2017-02-09T21:13:13.000Z | resources/PTZgrid/calcInitialCond.py | sebalander/sebaPhD | 0260094bd5143843ef372ce52aceb568834f90f4 | [
"BSD-3-Clause"
] | 4 | 2017-02-09T19:46:00.000Z | 2019-11-21T12:47:55.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 20:21:33 2016
generate the camera's pose conditions by hand
@author: sebalander
"""
# %%
import cv2
import numpy as np
import numpy.linalg as lin
from scipy.linalg import sqrtm, inv
import matplotlib.pyplot as plt
# %%
tVecFile = "PTZsheetTvecInitial.npy"
rVecFile = "PTZsheetRvecInitial.npy"
# %% Initial TRASLATION VECTOR
tVec = np.array([[0], [0], [2.5]])
# %% ROTATION MATRIX
# center of image points to grid point:
center = np.array([3*0.21, 5*0.297, 0])
z = center - tVec[:,0]
z /= lin.norm(z)
# la tercera coordenada no la se, la dejo en cero
x = np.array([6*21, -1*29.7, 0])
y = np.array([-1*21, -7*29.7, 0])
# hacer que x,y sean perp a z, agregar la tercera componente
x = x - z * np.dot(x,z) # hago perpendicular a z
x /= lin.norm(x)
y = y - z * np.dot(y,z) # hago perpendicular a z
y /= lin.norm(y)
# %% test ortogonal
np.dot(x,z)
np.dot(y,z)
np.dot(x,y) # ok if not perfectly 0
# %% make into versor matrix
rMatrix = np.array([x,y,z])
# find nearest ortogonal matrix
# http://stackoverflow.com/questions/13940056/orthogonalize-matrix-numpy
rMatrix = rMatrix.dot(inv(sqrtm(rMatrix.T.dot(rMatrix))))
# %% SAVE PARAMETERS
# convert to rodrigues vector
rVec, _ = cv2.Rodrigues(rMatrix)
np.save(tVecFile, tVec)
np.save(rVecFile, rVec)
# %% PLOT VECTORS
[x,y,z] = rMatrix # get from ortogonal matrix
tvec = tVec[:,0]
fig = plt.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = fig.gca(projection='3d')
ax.plot([0, tvec[0]],
[0, tvec[1]],
[0, tvec[2]])
ax.plot([tvec[0], tvec[0] + x[0]],
[tvec[1], tvec[1] + x[1]],
[tvec[2], tvec[2] + x[2]])
ax.plot([tvec[0], tvec[0] + y[0]],
[tvec[1], tvec[1] + y[1]],
[tvec[2], tvec[2] + y[2]])
ax.plot([tvec[0], tvec[0] + z[0]],
[tvec[1], tvec[1] + z[1]],
[tvec[2], tvec[2] + z[2]])
#ax.legend()
#ax.set_xlim3d(0, 1)
#ax.set_ylim3d(0, 1)
#ax.set_zlim3d(0, 1)
plt.show() | 21.844444 | 72 | 0.61648 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 20:21:33 2016
generate the camera's pose conditions by hand
@author: sebalander
"""
# %%
import cv2
import numpy as np
import numpy.linalg as lin
from scipy.linalg import sqrtm, inv
import matplotlib.pyplot as plt
# %%
tVecFile = "PTZsheetTvecInitial.npy"
rVecFile = "PTZsheetRvecInitial.npy"
# %% Initial TRASLATION VECTOR
tVec = np.array([[0], [0], [2.5]])
# %% ROTATION MATRIX
# center of image points to grid point:
center = np.array([3*0.21, 5*0.297, 0])
z = center - tVec[:,0]
z /= lin.norm(z)
# la tercera coordenada no la se, la dejo en cero
x = np.array([6*21, -1*29.7, 0])
y = np.array([-1*21, -7*29.7, 0])
# hacer que x,y sean perp a z, agregar la tercera componente
x = x - z * np.dot(x,z) # hago perpendicular a z
x /= lin.norm(x)
y = y - z * np.dot(y,z) # hago perpendicular a z
y /= lin.norm(y)
# %% test ortogonal
np.dot(x,z)
np.dot(y,z)
np.dot(x,y) # ok if not perfectly 0
# %% make into versor matrix
rMatrix = np.array([x,y,z])
# find nearest ortogonal matrix
# http://stackoverflow.com/questions/13940056/orthogonalize-matrix-numpy
rMatrix = rMatrix.dot(inv(sqrtm(rMatrix.T.dot(rMatrix))))
# %% SAVE PARAMETERS
# convert to rodrigues vector
rVec, _ = cv2.Rodrigues(rMatrix)
np.save(tVecFile, tVec)
np.save(rVecFile, rVec)
# %% PLOT VECTORS
[x,y,z] = rMatrix # get from ortogonal matrix
tvec = tVec[:,0]
fig = plt.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = fig.gca(projection='3d')
ax.plot([0, tvec[0]],
[0, tvec[1]],
[0, tvec[2]])
ax.plot([tvec[0], tvec[0] + x[0]],
[tvec[1], tvec[1] + x[1]],
[tvec[2], tvec[2] + x[2]])
ax.plot([tvec[0], tvec[0] + y[0]],
[tvec[1], tvec[1] + y[1]],
[tvec[2], tvec[2] + y[2]])
ax.plot([tvec[0], tvec[0] + z[0]],
[tvec[1], tvec[1] + z[1]],
[tvec[2], tvec[2] + z[2]])
#ax.legend()
#ax.set_xlim3d(0, 1)
#ax.set_ylim3d(0, 1)
#ax.set_zlim3d(0, 1)
plt.show() | 0 | 0 | 0 |
b6dd96934dde46ba7c8e268255f6ccb3b47bf7e8 | 985 | py | Python | setup.py | MrIncredibuell/clerius | e3f482754892ae32b3862d2a283b54d4ed955b9a | [
"MIT"
] | null | null | null | setup.py | MrIncredibuell/clerius | e3f482754892ae32b3862d2a283b54d4ed955b9a | [
"MIT"
] | null | null | null | setup.py | MrIncredibuell/clerius | e3f482754892ae32b3862d2a283b54d4ed955b9a | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="clericus",
version="0.0.3a27",
author="Joseph L Buell V",
author_email="jlrbuellv@gmail.com",
description=
"An async webserver focused on being predictable and self documenting.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mrincredibuell/clericus",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
],
install_requires=[
"aiohttp>=3.5.4",
"pyjwt>=1.7.1",
"motor>=2.0.0",
"python-dateutil>=2.8.0",
"bcrypt>=3.1.6",
"dnspython>=1.16.0",
"faker>=1.0.7",
"markdown>=3.1.1",
"ansicolors>=1.1.8",
],
) | 28.970588 | 76 | 0.59797 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="clericus",
version="0.0.3a27",
author="Joseph L Buell V",
author_email="jlrbuellv@gmail.com",
description=
"An async webserver focused on being predictable and self documenting.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mrincredibuell/clericus",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
],
install_requires=[
"aiohttp>=3.5.4",
"pyjwt>=1.7.1",
"motor>=2.0.0",
"python-dateutil>=2.8.0",
"bcrypt>=3.1.6",
"dnspython>=1.16.0",
"faker>=1.0.7",
"markdown>=3.1.1",
"ansicolors>=1.1.8",
],
) | 0 | 0 | 0 |
b98e6632fd9bb96d3dde1a83e1c4c80e452a716a | 1,433 | py | Python | test/make_global_settings/env-wrapper/gyptest-wrapper.py | Herjar/gyp | 4d467626b0b9f59a85fb81ca4d7ea9eca99b9d8f | [
"BSD-3-Clause"
] | 75 | 2015-02-03T14:54:27.000Z | 2022-03-24T06:44:38.000Z | test/make_global_settings/env-wrapper/gyptest-wrapper.py | Herjar/gyp | 4d467626b0b9f59a85fb81ca4d7ea9eca99b9d8f | [
"BSD-3-Clause"
] | 3 | 2016-08-22T10:35:24.000Z | 2019-07-16T19:47:20.000Z | test/make_global_settings/env-wrapper/gyptest-wrapper.py | Herjar/gyp | 4d467626b0b9f59a85fb81ca4d7ea9eca99b9d8f | [
"BSD-3-Clause"
] | 43 | 2015-02-02T04:26:11.000Z | 2021-09-07T06:06:58.000Z | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies *_wrapper in environment.
"""
import os
import sys
import TestGyp
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test_format = ['ninja']
os.environ['CC_wrapper'] = 'distcc'
os.environ['LINK_wrapper'] = 'distlink'
os.environ['CC.host_wrapper'] = 'ccache'
test = TestGyp.TestGyp(formats=test_format)
old_env = dict(os.environ)
os.environ['GYP_CROSSCOMPILE'] = '1'
test.run_gyp('wrapper.gyp')
os.environ.clear()
os.environ.update(old_env)
if test.format == 'ninja':
cc_expected = ('cc = ' + os.path.join('..', '..', 'distcc') + ' ' +
os.path.join('..', '..', 'clang'))
cc_host_expected = ('cc_host = ' + os.path.join('..', '..', 'ccache') + ' ' +
os.path.join('..', '..', 'clang'))
ld_expected = 'ld = ../../distlink $cc'
if sys.platform != 'win32':
ldxx_expected = 'ldxx = ../../distlink $cxx'
if sys.platform == 'win32':
ld_expected = 'link.exe'
test.must_contain('out/Default/build.ninja', cc_expected)
test.must_contain('out/Default/build.ninja', cc_host_expected)
test.must_contain('out/Default/build.ninja', ld_expected)
if sys.platform != 'win32':
test.must_contain('out/Default/build.ninja', ldxx_expected)
test.pass_test()
| 28.66 | 79 | 0.651779 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies *_wrapper in environment.
"""
import os
import sys
import TestGyp
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test_format = ['ninja']
os.environ['CC_wrapper'] = 'distcc'
os.environ['LINK_wrapper'] = 'distlink'
os.environ['CC.host_wrapper'] = 'ccache'
test = TestGyp.TestGyp(formats=test_format)
old_env = dict(os.environ)
os.environ['GYP_CROSSCOMPILE'] = '1'
test.run_gyp('wrapper.gyp')
os.environ.clear()
os.environ.update(old_env)
if test.format == 'ninja':
cc_expected = ('cc = ' + os.path.join('..', '..', 'distcc') + ' ' +
os.path.join('..', '..', 'clang'))
cc_host_expected = ('cc_host = ' + os.path.join('..', '..', 'ccache') + ' ' +
os.path.join('..', '..', 'clang'))
ld_expected = 'ld = ../../distlink $cc'
if sys.platform != 'win32':
ldxx_expected = 'ldxx = ../../distlink $cxx'
if sys.platform == 'win32':
ld_expected = 'link.exe'
test.must_contain('out/Default/build.ninja', cc_expected)
test.must_contain('out/Default/build.ninja', cc_host_expected)
test.must_contain('out/Default/build.ninja', ld_expected)
if sys.platform != 'win32':
test.must_contain('out/Default/build.ninja', ldxx_expected)
test.pass_test()
| 0 | 0 | 0 |