hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1a3232723908fc855045a3607bc9137c6b489fe5 | 875 | py | Python | ndtt/mp/mpmanager.py | HMEIatJHU/neural-datalog-through-time | 725e02f4edf970e63d1c4b82d71b3720fd7938ca | [
"MIT"
] | 18 | 2020-07-01T06:25:03.000Z | 2021-10-05T23:25:21.000Z | ndtt/mp/mpmanager.py | matthewhammer/neural-datalog-through-time | 2e8abd33dfabe65869df5fddc44094e64aca6ca7 | [
"MIT"
] | 1 | 2021-05-21T08:53:25.000Z | 2021-05-21T08:53:25.000Z | ndtt/mp/mpmanager.py | matthewhammer/neural-datalog-through-time | 2e8abd33dfabe65869df5fddc44094e64aca6ca7 | [
"MIT"
] | 4 | 2020-07-15T14:06:43.000Z | 2021-01-03T13:54:44.000Z | import torch
from torch import nn
import torch.optim as optim
import torch.multiprocessing as mp
import numpy as np
import time
class MPManager(object):
def __init__(self, num_workers):
"""
manage a single-instruction-multiple-data (SIMD) scheme
:param int num_workers: The number of processors to run.
"""
mp.set_start_method('spawn')
# Counting the current batch size
self.num_workers = num_workers
# A pool of processes
self.pool = mp.Pool(processes=num_workers)
def run(self, function, arguments):
"""
:param function : the instruction
:param arguments : list of things processors loop over
can be anything the function works on, e.g. model + data
"""
output_and_grads = self.pool.map(function, arguments)
return output_and_grads
| 28.225806 | 64 | 0.657143 | 743 | 0.849143 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.445714 |
1a337ebf3d2a86cd14f219f3c9b89af5616c8f7e | 1,766 | py | Python | link.py | EthanC2/broken-link-finder | 1a699c6c1f56e0575781abfdfff767bf94345dca | [
"BSD-3-Clause"
] | null | null | null | link.py | EthanC2/broken-link-finder | 1a699c6c1f56e0575781abfdfff767bf94345dca | [
"BSD-3-Clause"
] | null | null | null | link.py | EthanC2/broken-link-finder | 1a699c6c1f56e0575781abfdfff767bf94345dca | [
"BSD-3-Clause"
] | null | null | null | # Link class
class Link:
## Constructor ##
def __init__(self, text = "None", url = "None", status_code = 000): # Not the keyword 'None' so it will still print something
# Dictionary of URL-related content
self.text = text
self.url = url
self.status_code = status_code
# Trims the inside of a string (removing extra whitespace between words)
def trim_inner(self, text):
return " ".join( [string.strip() for string in text.split()] ) # Separate the indiv. words and trim them individually
## OVERLOADS ##
# String representation of the 'Link' class
def __str__(self):
# Format: status code, hyperlinked text, then url (CSV)
text = self.trim_inner(self.text) # Trim internal whitespace;
if not text: # If the string is blank,
text = "N/A" # Give it some text
return f"{self.status_code}, {text}, {self.url}"
# Relational Operators, compared by status code for sorting
# > (less than)
def __lt__(self, other):
return self.status_code < other.status_code
# >= (less than or equal to)
def __le__(self, other):
return self.status_code <= other.status_code
# == (is equal to)
def __eq__(self, other):
return self.staus_code == other.status_code
# != (is not equal to)
def __ne__(self, other):
return self.status_code != other.status_code
# < (greater than)
def __gt__(self, other):
return self.status_code > other.status_code
# <= (greater than or equal to)
def __ge__(self, other):
return self.status_code >= other.status_code
# End of Link class
| 32.703704 | 132 | 0.596263 | 1,731 | 0.980181 | 0 | 0 | 0 | 0 | 0 | 0 | 703 | 0.398075 |
1a33943d2cf0f6c01fc1fd72edefaa54e0e682d5 | 3,911 | py | Python | distnet/keras_models/self_attention.py | jeanollion/dlutils | ea419e79486e1212219dc06d39c3a4f4c305ff49 | [
"Apache-2.0"
] | 4 | 2020-05-27T01:39:44.000Z | 2021-09-03T18:20:33.000Z | distnet/keras_models/self_attention.py | jeanollion/dlutils | ea419e79486e1212219dc06d39c3a4f4c305ff49 | [
"Apache-2.0"
] | null | null | null | distnet/keras_models/self_attention.py | jeanollion/dlutils | ea419e79486e1212219dc06d39c3a4f4c305ff49 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D
from tensorflow.keras.models import Model
import numpy as np
class SelfAttention(Model):
def __init__(self, d_model, spatial_dims, positional_encoding=True, name="self_attention"):
'''
d_model : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
'''
super().__init__(name=name)
self.d_model = d_model
self.spatial_dims=spatial_dims
self.spatial_dim = np.prod(spatial_dims)
self.wq = Dense(self.d_model, name=name+"_q")
self.wk = Dense(self.d_model, name=name+"_k")
self.wv = Dense(self.d_model, name=name+"_w")
self.positional_encoding=positional_encoding
if positional_encoding:
self.pos_embedding = Embedding(self.spatial_dim, d_model, name=name+"pos_enc") # TODO test other positional encoding. in particular that encodes X and Y
def call(self, x):
'''
x : tensor with shape (batch_size, y, x, channels)
'''
shape = tf.shape(x)
batch_size = shape[0]
#spatial_dims = shape[1:-1]
#spatial_dim = tf.reduce_prod(spatial_dims)
depth_dim = shape[3]
if self.positional_encoding:
x_index = tf.range(self.spatial_dim, dtype=tf.int32)
pos_emb = self.pos_embedding(x_index) # (spa_dim, d_model)
pos_emb = tf.reshape(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.d_model)) #for broadcasting purpose
x = x + pos_emb # broadcast
q = self.wq(x) # (batch_size, *spa_dims, d_model)
k = self.wk(x) # (batch_size, *spa_dims, d_model)
v = self.wv(x) # (batch_size, *spa_dims, d_model)
q = tf.reshape(q, (batch_size, -1, depth_dim)) # (batch_size, spa_dim, d_model)
k = tf.reshape(k, (batch_size, -1, depth_dim))
v = tf.reshape(v, (batch_size, -1, depth_dim))
# scaled_attention.shape == (batch_size, spa_dims, depth)
# attention_weights.shape == (batch_size, spa_dims, spa_dims)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v)
output = tf.reshape(scaled_attention, (batch_size, self.spatial_dims[0], self.spatial_dims[1], self.d_model))
tf.identity(attention_weights, name=self.name+"_attention_weights")
return output, attention_weights
def compute_output_shape(self, input_shape):
return input_shape[:-1]+(self.d_model,), (input_shape[0],self.spatial_dim,self.spatial_dim)
def scaled_dot_product_attention(q, k, v):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
Returns:
output, attention_weights
from : https://www.tensorflow.org/tutorials/text/transformer
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
| 43.455556 | 164 | 0.662746 | 2,604 | 0.665814 | 0 | 0 | 0 | 0 | 0 | 0 | 1,626 | 0.41575 |
1a3455e2c8f1c28a1b5a8b75f7074c1334a2652b | 62,335 | py | Python | src/UQpy/SampleMethods.py | bsaakash/new_repo | c188689d12a5e3f7ff3c81ff76524bdbc2569de9 | [
"Apache-2.0"
] | null | null | null | src/UQpy/SampleMethods.py | bsaakash/new_repo | c188689d12a5e3f7ff3c81ff76524bdbc2569de9 | [
"Apache-2.0"
] | null | null | null | src/UQpy/SampleMethods.py | bsaakash/new_repo | c188689d12a5e3f7ff3c81ff76524bdbc2569de9 | [
"Apache-2.0"
] | null | null | null | """This module contains functionality for all the sampling methods supported in UQpy."""
import sys
import copy
import numpy as np
from scipy.spatial.distance import pdist
import scipy.stats as sp
import random
from UQpy.Distributions import *
import warnings
def init_sm(data):
################################################################################################################
# Add available sampling methods Here
valid_methods = ['mcs', 'lhs', 'mcmc', 'pss', 'sts', 'SuS']
################################################################################################################
# Check if requested method is available
if 'method' in data:
if data['method'] not in valid_methods:
raise NotImplementedError("method - %s not available" % data['method'])
else:
raise NotImplementedError("No sampling method was provided")
################################################################################################################
# Monte Carlo simulation block.
# Mandatory properties(4): 1. Number of parameters, 2. distribution, 3. distribution parameters 4. Number of samples
# Optional properties(0):
if data['method'] == 'mcs':
# Mandatory
if 'number of samples' not in data:
data['number of samples'] = None
if 'distribution type' not in data:
raise NotImplementedError("Distributions not defined. Exit code")
if 'distribution parameters' not in data:
raise NotImplementedError("Distribution parameters not provided. Exit code")
if 'number of parameters' not in data:
data['number of parameters'] = None
################################################################################################################
# Latin Hypercube simulation block.
# Mandatory properties(4): 1. Number of parameters, 2. distribution, 3. distribution parameters 4. Number of samples
# Optional properties(3): 1. Criterion, 2. Metric, 3. Iterations
if data['method'] == 'lhs':
# Mandatory
if 'number of parameters' not in data:
data['number of parameters'] = None
if 'number of samples' not in data:
data['number of samples'] = None
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
if 'distribution parameters' not in data:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
# Optional
if 'criterion' not in data:
data['criterion'] = None
if 'distance' not in data:
data['distance'] = None
if 'iterations' not in data:
data['iterations'] = None
####################################################################################################################
# Markov Chain Monte Carlo simulation block.
# Mandatory properties(4): 1. target distribution, 2. target distribution parameters, 3. Number of samples,
# 4. Number of parameters
# Optional properties(5): 1. Proposal distribution, 2. proposal width, 3. Seed, 4. skip samples (avoid burn-in),
# 5. algorithm
if data['method'] == 'mcmc':
# Mandatory
if 'number of parameters' not in data:
raise NotImplementedError('Exit code: Number of parameters not defined.')
if 'target distribution type' not in data:
raise NotImplementedError("Exit code: Target distribution type not defined.")
if 'target distribution parameters' not in data:
raise NotImplementedError("Exit code: Target distribution parameters not defined.")
if 'number of samples' not in data:
raise NotImplementedError('Exit code: Number of samples not defined.')
# Optional
if 'seed' not in data:
data['seed'] = None
if 'skip' not in data:
data['skip'] = None
if 'proposal distribution type' not in data:
data['proposal distribution type'] = None
#else:
# if data['proposal distribution type'] not in ['Uniform', 'Normal']:
# raise ValueError('Exit code: Unrecognized type of proposal distribution type. Supported distributions: '
# 'Uniform, '
# 'Normal.')
if 'proposal distribution width' not in data:
data['proposal distribution width'] = None
if 'algorithm' not in data:
data['algorithm'] = None
################################################################################################################
# Partially stratified sampling block.
# Mandatory properties (4): 1. distribution, 2. distribution parameters, 3. design, 4. strata
# Optional properties(1): 1. Number of parameters
if data['method'] == 'pss':
# Mandatory
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
elif 'distribution parameters' not in data:
raise NotImplementedError("Exit code: distribution parameters not defined.")
if 'design' not in data:
raise NotImplementedError("Exit code: pss design not defined.")
if 'strata' not in data:
raise NotImplementedError("Exit code: pss strata not defined.")
# Optional
if 'number of parameters' not in data:
data['number of parameters'] = None
################################################################################################################
# Stratified sampling block.
# Mandatory properties(3): 1. distribution, 2. distribution parameters, 3. design
# Optional properties(1): 1. Number of parameters
if data['method'] == 'sts':
# Mandatory
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
elif 'distribution parameters' not in data:
raise NotImplementedError("Exit code: distribution parameters not defined.")
if 'design' not in data:
raise NotImplementedError("Exit code: sts design not defined.")
# Optional
if 'number of parameters' not in data:
data['number of parameters'] = None
####################################################################################################################
# Stochastic reduced order model block
# Mandatory properties(2): 1. moments, 2. error function weights
# Optional properties(2): 1.properties to match, 2. sample weights
# if 'SROM' in data and data['SROM'] is True:
# # Mandatory
# if 'moments' not in data:
# raise NotImplementedError("Exit code: Moments not provided.")
# if 'error function weights' not in data:
# raise NotImplementedError("Exit code: Error function weights not provided.")
#
# # Optional
# if 'properties to match' not in data:
# data['properties to match'] = None
# if 'correlation' not in data:
# data['correlation'] = None
# if 'weights for distribution' not in data:
# data['weights for distribution'] = None
# if 'weights for moments' not in data:
# data['weights for moments'] = None
# if 'weights for correlation' not in data:
# data['weights for correlation'] = None
####################################################################################################################
# Check any NEW METHOD HERE
#
#
####################################################################################################################
# Check any NEW METHOD HERE
#
#
########################################################################################################################
########################################################################################################################
########################################################################################################################
def run_sm(data):
################################################################################################################
# Run Monte Carlo simulation
if data['method'] == 'mcs':
print("\nRunning %k \n", data['method'])
rvs = MCS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
nsamples=data['number of samples'])
################################################################################################################
# Run Latin Hypercube sampling
elif data['method'] == 'lhs':
print("\nRunning %k \n", data['method'])
rvs = LHS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
nsamples=data['number of samples'], lhs_metric=data['distance'],
lhs_iter=data['iterations'], lhs_criterion=data['criterion'])
################################################################################################################
# Run partially stratified sampling
elif data['method'] == 'pss':
print("\nRunning %k \n", data['method'])
rvs = PSS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
pss_design=data['design'], pss_strata=data['strata'])
################################################################################################################
# Run STS sampling
elif data['method'] == 'sts':
print("\nRunning %k \n", data['method'])
rvs = STS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'], sts_design=data['design'])
################################################################################################################
# Run Markov Chain Monte Carlo sampling
elif data['method'] == 'mcmc':
print("\nRunning %k \n", data['method'])
rvs = MCMC(dimension=data['number of parameters'], pdf_target_type=data['target distribution type'],
algorithm=data['algorithm'], pdf_proposal_type=data['proposal distribution type'],
pdf_proposal_width=data['proposal distribution width'],
pdf_target_params=data['target distribution parameters'], seed=data['seed'],
skip=data['skip'], nsamples=data['number of samples'])
################################################################################################################
# Run Stochastic Reduce Order Model
# if 'SROM' in data:
# if data['SROM'] == 'Yes':
# print("\nImplementing SROM to samples")
# rvs = SROM(samples=rvs.samples, pdf_type=data['distribution type'], moments=data['moments'],
# weights_errors=data['error function weights'],
# weights_distribution=data['weights for distribution'],
# weights_moments=data['weights for moments'],
# weights_correlation=data['weights for correlation'], properties=data['properties to match'],
# pdf_params=data['distribution parameters'], correlation=data['correlation'])
################################################################################################################
# Run ANY NEW METHOD HERE
return rvs
########################################################################################################################
########################################################################################################################
# Monte Carlo simulation
########################################################################################################################
class MCS:
"""
A class used to perform brute force Monte Carlo design of experiment (MCS).
SamplesU01 belong in hypercube [0, 1]^n while samples belong to the parameter space
:param dimension: Number of parameters
:type dimension: int
:param nsamples: Number of samples to be generated
:type nsamples: int
:param pdf_type: Type of distributions
:type pdf_type: list
:param pdf_params: Distribution parameters
:type pdf_params: list
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, nsamples=None):
self.dimension = dimension
self.nsamples = nsamples
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.init_mcs()
self.samplesU01, self.samples = self.run_mcs()
def run_mcs(self):
samples = np.random.rand(self.nsamples, self.dimension)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
################################################################################################################
# Initialize Monte Carlo simulation.
# Necessary parameters: 1. Probability distribution, 2. Probability distribution parameters 3. Number of samples
# Optional: dimension, names of random variables
def init_mcs(self):
if self.nsamples is None:
raise NotImplementedError("Exit code: Number of samples not defined.")
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distributions not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', "
"'Weibull', 'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.dimension is None:
if len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
else:
self.dimension = len(self.pdf_type)
else:
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions")
########################################################################################################################
########################################################################################################################
# Latin hypercube sampling (LHS)
########################################################################################################################
class LHS:
"""
A class that creates a Latin Hypercube Design for experiments.
SamplesU01 belong in hypercube [0, 1]^n while samples belong to the parameter space
:param pdf_type: Distribution of the parameters
:type pdf_type: list
:param pdf_params: Distribution parameters
:type pdf_params: list
:param lhs_criterion: The criterion for generating sample points
Options:
1. random - completely random \n
2. centered - points only at the centre \n
3. maximin - maximising the minimum distance between points \n
4. correlate - minimizing the correlation between the points \n
:type lhs_criterion: str
:param lhs_iter: The number of iteration to run. Only for maximin, correlate and criterion
:type lhs_iter: int
:param lhs_metric: The distance metric to use. Supported metrics are
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', \n
'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', \n
'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', \n
'yule'.
:type lhs_metric: str
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, lhs_criterion=None, lhs_metric=None,
lhs_iter=None, nsamples=None):
self.dimension = dimension
self.nsamples = nsamples
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.lhs_criterion = lhs_criterion
self.lhs_metric = lhs_metric
self.lhs_iter = lhs_iter
self.init_lhs()
self.samplesU01, self.samples = self.run_lhs()
def run_lhs(self):
print('Running LHS for ' + str(self.lhs_iter) + ' iterations')
cut = np.linspace(0, 1, self.nsamples + 1)
a = cut[:self.nsamples]
b = cut[1:self.nsamples + 1]
if self.lhs_criterion == 'random':
samples = self._random(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'centered':
samples = self._centered(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'maximin':
samples = self._max_min(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'correlate':
samples = self._correlate(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
def _random(self, a, b):
"""
:return: The samples points for the random LHS design
"""
u = np.random.rand(self.nsamples, self.dimension)
samples = np.zeros_like(u)
for i in range(self.dimension):
samples[:, i] = u[:, i] * (b - a) + a
for j in range(self.dimension):
order = np.random.permutation(self.nsamples)
samples[:, j] = samples[order, j]
return samples
def _centered(self, a, b):
samples = np.zeros([self.nsamples, self.dimension])
centers = (a + b) / 2
for i in range(self.dimension):
samples[:, i] = np.random.permutation(centers)
return samples
def _max_min(self, a, b):
max_min_dist = 0
samples = self._random(a, b)
for _ in range(self.lhs_iter):
samples_try = self._random(a, b)
d = pdist(samples_try, metric=self.lhs_metric)
if max_min_dist < np.min(d):
max_min_dist = np.min(d)
samples = copy.deepcopy(samples_try)
print('Achieved max_min distance of ', max_min_dist)
return samples
def _correlate(self, a, b):
min_corr = np.inf
samples = self._random(a, b)
for _ in range(self.lhs_iter):
samples_try = self._random(a, b)
R = np.corrcoef(np.transpose(samples_try))
np.fill_diagonal(R, 1)
R1 = R[R != 1]
if np.max(np.abs(R1)) < min_corr:
min_corr = np.max(np.abs(R1))
samples = copy.deepcopy(samples_try)
print('Achieved minimum correlation of ', min_corr)
return samples
################################################################################################################
# Latin hypercube checks.
# Necessary parameters: 1. Probability distribution, 2. Probability distribution parameters
# Optional: number of samples (default 100), criterion, metric, iterations
def init_lhs(self):
if self.nsamples is None:
raise NotImplementedError("Exit code: Number of samples not defined.")
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distributions not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'.")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.dimension is None:
if len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
else:
self.dimension = len(self.pdf_type)
else:
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
if self.lhs_criterion is None:
self.lhs_criterion = 'random'
else:
if self.lhs_criterion not in ['random', 'centered', 'maximin', 'correlate']:
raise NotImplementedError("Exit code: Supported lhs criteria: 'random', 'centered', 'maximin', "
"'correlate'")
if self.lhs_metric is None:
self.lhs_metric = 'euclidean'
else:
if self.lhs_metric not in ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine',
'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean']:
raise NotImplementedError("Exit code: Supported lhs distances: 'braycurtis', 'canberra', 'chebyshev', "
"'cityblock',"
" 'correlation', 'cosine','dice', 'euclidean', 'hamming', 'jaccard', "
"'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',"
"'russellrao', 'seuclidean','sokalmichener', 'sokalsneath', 'sqeuclidean'")
if self.lhs_iter is None or self.lhs_iter == 0:
self.lhs_iter = 1000
elif self.lhs_iter is not None:
self.lhs_iter = int(self.lhs_iter)
########################################################################################################################
########################################################################################################################
# Partially Stratified Sampling (PSS)
########################################################################################################################
class PSS:
"""
This class generates a partially stratified sample set on U(0,1) as described in:
Shields, M.D. and Zhang, J. "The generalization of Latin hypercube sampling" Reliability Engineering and
System Safety. 148: 96-108
:param pss_design: Vector defining the subdomains to be used.
Example: 5D problem with 2x2D + 1x1D subdomains using pss_design = [2,2,1]. \n
Note: The sum of the values in the pss_design vector equals the dimension of the problem.
:param pss_strata: Vector defining how each dimension should be stratified.
Example: 5D problem with 2x2D + 1x1D subdomains with 625 samples using
pss_pss_stratum = [25,25,625].\n
Note: pss_pss_stratum(i)^pss_design(i) = number of samples (for all i)
:return: pss_samples: Generated samples Array (nSamples x nRVs)
:type pss_design: list
:type pss_strata: list
Created by: Jiaxin Zhang
Last modified: 24/01/2018 by D.G. Giovanis
"""
# TODO: Jiaxin - Add documentation to this subclass
# TODO: the pss_design = [[1,4], [2,5], [3]] - then reorder the sequence of RVs
# TODO: Add the sample check and pss_design check in the beginning
# TODO: Create a list that contains all element info - parent structure
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, pss_design=None, pss_strata=None):
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.pss_design = pss_design
self.pss_strata = pss_strata
self.dimension = dimension
self.init_pss()
self.nsamples = self.pss_strata[0] ** self.pss_design[0]
self.samplesU01, self.samples = self.run_pss()
def run_pss(self):
samples = np.zeros((self.nsamples, self.dimension))
samples_u_to_x = np.zeros((self.nsamples, self.dimension))
col = 0
for i in range(len(self.pss_design)):
n_stratum = self.pss_strata[i] * np.ones(self.pss_design[i], dtype=np.int)
sts = STS(pdf_type=self.pdf_type, pdf_params=self.pdf_params, sts_design=n_stratum, pss_=True)
index = list(range(col, col + self.pss_design[i]))
samples[:, index] = sts.samplesU01
samples_u_to_x[:, index] = sts.samples
arr = np.arange(self.nsamples).reshape((self.nsamples, 1))
samples[:, index] = samples[np.random.permutation(arr), index]
samples_u_to_x[:, index] = samples_u_to_x[np.random.permutation(arr), index]
col = col + self.pss_design[i]
return samples, samples_u_to_x
################################################################################################################
# Partially Stratified sampling (PSS) checks.
# Necessary parameters: 1. pdf, 2. pdf parameters 3. pss design 4. pss strata
# Optional:
def init_pss(self):
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distribution not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.pss_design is None:
raise NotImplementedError("Exit code: pss design not defined.")
elif self.pss_strata is None:
raise NotImplementedError("Exit code: pss strata not defined.")
else:
if len(self.pss_design) != len(self.pss_strata):
raise ValueError('Exit code: "pss design" and "pss strata" must be the same length.')
sample_check = np.zeros((len(self.pss_strata), len(self.pss_design)))
for i in range(len(self.pss_strata)):
for j in range(len(self.pss_design)):
sample_check[i, j] = self.pss_strata[i] ** self.pss_design[j]
if np.max(sample_check) != np.min(sample_check):
raise ValueError('Exit code: All dimensions must have the same number of samples/strata.')
if self.dimension is None:
self.dimension = np.sum(self.pss_design)
else:
if self.dimension != np.sum(self.pss_design):
raise NotImplementedError("Exit code: Incompatible dimensions.")
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
########################################################################################################################
########################################################################################################################
# Stratified Sampling (sts)
########################################################################################################################
class STS:
# TODO: MDS - Add documentation to this subclass
"""
:param dimension:
:param pdf_type:
:param pdf_params:
:param sts_design:
:param pss_:
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, sts_design=None, pss_=None):
self.dimension = dimension
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.sts_design = sts_design
if pss_ is None:
self.init_sts()
strata = Strata(nstrata=self.sts_design)
self.origins = strata.origins
self.widths = strata.widths
self.weights = strata.weights
self.samplesU01, self.samples = self.run_sts()
def run_sts(self):
samples = np.empty([self.origins.shape[0], self.origins.shape[1]], dtype=np.float32)
for i in range(0, self.origins.shape[0]):
for j in range(0, self.origins.shape[1]):
samples[i, j] = np.random.uniform(self.origins[i, j], self.origins[i, j] + self.widths[i, j])
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
def init_sts(self):
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distribution not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.sts_design is None:
raise NotImplementedError("Exit code: sts design not defined.")
if self.dimension is None:
self.dimension = len(self.sts_design)
else:
if self.dimension != len(self.sts_design):
raise NotImplementedError("Exit code: Incompatible dimensions.")
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
# TODO: Create a list that contains all element info - parent structure
# e.g. SS_samples = [STS[j] for j in range(0,nsamples)]
# hstack
########################################################################################################################
########################################################################################################################
# Class Strata
########################################################################################################################
class Strata:
"""
Define a rectilinear stratification of the n-dimensional unit hypercube with N strata.
:param nstrata: array-like
An array of dimension 1 x n defining the number of strata in each of the n dimensions
Creates an equal stratification with strata widths equal to 1/nstrata
The total number of strata, N, is the product of the terms of nstrata
Example -
nstrata = [2, 3, 2] creates a 3d stratification with:
2 strata in dimension 0 with stratum widths 1/2
3 strata in dimension 1 with stratum widths 1/3
2 strata in dimension 2 with stratum widths 1/2
:param input_file: string
File path to input file specifying stratum origins and stratum widths
:param origins: array-like
An array of dimension N x n specifying the origins of all strata
The origins of the strata are the coordinates of the stratum orthotope nearest the global origin
Example - A 2D stratification with 2 strata in each dimension
origins = [[0, 0]
[0, 0.5]
[0.5, 0]
[0.5, 0.5]]
:param widths: array-like
An array of dimension N x n specifying the widths of all strata in each dimension
Example - A 2D stratification with 2 strata in each dimension
widths = [[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]]
"""
def __init__(self, nstrata=None, input_file=None, origins=None, widths=None):
"""
Class defines a rectilinear stratification of the n-dimensional unit hypercube with N strata
:param nstrata: array-like
An array of dimension 1 x n defining the number of strata in each of the n dimensions
Creates an equal stratification with strata widths equal to 1/nstrata
The total number of strata, N, is the product of the terms of nstrata
Example -
nstrata = [2, 3, 2] creates a 3d stratification with:
2 strata in dimension 0 with stratum widths 1/2
3 strata in dimension 1 with stratum widths 1/3
2 strata in dimension 2 with stratum widths 1/2
:param input_file: string
File path to input file specifying stratum origins and stratum widths
See documentation ######## for input file format
:param origins: array-like
An array of dimension N x n specifying the origins of all strata
The origins of the strata are the coordinates of the stratum orthotope nearest the global origin
Example - A 2D stratification with 2 strata in each dimension
origins = [[0, 0]
[0, 0.5]
[0.5, 0]
[0.5, 0.5]]
:param widths: array-like
An array of dimension N x n specifying the widths of all strata in each dimension
Example - A 2D stratification with 2 strata in each dimension
widths = [[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]]
Created by: Michael D. Shields
Last modified: 11/4/2017
Last modified by: Michael D. Shields
"""
self.input_file = input_file
self.nstrata = nstrata
self.origins = origins
self.widths = widths
if self.nstrata is None:
if self.input_file is None:
if self.widths is None or self.origins is None:
sys.exit('Error: The strata are not fully defined. Must provide [nstrata], '
'input file, or [origins] and [widths]')
else:
# Read the strata from the specified input file
# See documentation for input file formatting
array_tmp = np.loadtxt(input_file)
self.origins = array_tmp[:, 0:array_tmp.shape[1] // 2]
self.width = array_tmp[:, array_tmp.shape[1] // 2:]
# Check to see that the strata are space-filling
space_fill = np.sum(np.prod(self.width, 1))
if 1 - space_fill > 1e-5:
sys.exit('Error: The stratum design is not space-filling.')
if 1 - space_fill < -1e-5:
sys.exit('Error: The stratum design is over-filling.')
# TODO: MDS - Add a check for disjointness of strata
# Check to see that the strata are disjoint
# ncorners = 2**self.strata.shape[1]
# for i in range(0,len(self.strata)):
# for j in range(0,ncorners):
else:
# Use nstrata to assign the origin and widths of a specified rectilinear stratification.
self.origins = np.divide(self.fullfact(self.nstrata), self.nstrata)
self.widths = np.divide(np.ones(self.origins.shape), self.nstrata)
self.weights = np.prod(self.widths, axis=1)
def fullfact(self, levels):
# TODO: MDS - Acknowledge the source here.
"""
Create a general full-factorial design
Parameters
----------
levels : array-like
An array of integers that indicate the number of levels of each input
design factor.
Returns
-------
mat : 2d-array
The design matrix with coded levels 0 to k-1 for a k-level factor
Example
-------
::
>>> fullfact([2, 4, 3])
array([[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 1., 1., 0.],
[ 0., 2., 0.],
[ 1., 2., 0.],
[ 0., 3., 0.],
[ 1., 3., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 1., 1.],
[ 0., 2., 1.],
[ 1., 2., 1.],
[ 0., 3., 1.],
[ 1., 3., 1.],
[ 0., 0., 2.],
[ 1., 0., 2.],
[ 0., 1., 2.],
[ 1., 1., 2.],
[ 0., 2., 2.],
[ 1., 2., 2.],
[ 0., 3., 2.],
[ 1., 3., 2.]])
"""
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H = np.zeros((nb_lines, n))
level_repeat = 1
range_repeat = np.prod(levels)
for i in range(n):
range_repeat //= levels[i]
lvl = []
for j in range(levels[i]):
lvl += [j] * level_repeat
rng = lvl * range_repeat
level_repeat *= levels[i]
H[:, i] = rng
return H
########################################################################################################################
########################################################################################################################
# Markov Chain Monte Carlo (MCMC)
########################################################################################################################
class MCMC:
"""Generate samples from an arbitrary probability density function using Markov Chain Monte Carlo.
This class generates samples from an arbitrary user-specified distribution using Metropolis-Hastings(MH),
Modified Metropolis-Hastings, of Affine Invariant Ensemble Sampler with stretch moves.
References:
S.-K. Au and J. L. Beck, “Estimation of small failure probabilities in high dimensions by subset simulation,”
Probabilistic Eng. Mech., vol. 16, no. 4, pp. 263–277, Oct. 2001.
J. Goodman and J. Weare, “Ensemble samplers with affine invariance,” Commun. Appl. Math. Comput. Sci., vol. 5,
no. 1, pp. 65–80, 2010.
Input:
:param dimension: A scalar value defining the dimension of target density function.
Default: 1
:type dimension: int
:param pdf_proposal_type: Type of proposal density function for MCMC. Only used with algorithm = 'MH' or 'MMH'
Options:
'Normal' : Normal proposal density
'Uniform' : Uniform proposal density
Default: 'Uniform'
If dimension > 1 and algorithm = 'MMH', this may be input as a list to assign different proposal
densities to each dimension. Example pdf_proposal_type = ['Normal','Uniform'].
If dimension > 1, algorithm = 'MMH' and this is input as a string, the proposal densities for all
dimensions are set equal to the assigned proposal type.
:type pdf_proposal_type: str or str list
:param pdf_proposal_scale: Scale of the proposal distribution
If algorithm == 'MH' or 'MMH'
For pdf_proposal_type = 'Uniform'
Proposal is Uniform in [x-pdf_proposal_scale/2, x+pdf_proposal_scale/2]
For pdf_proposal_type = 'Normal'
Proposal is Normal with standard deviation equal to pdf_proposal_scale
If algorithm == 'Stretch'
pdf_proposal_scale sets the scale of the stretch density
g(z) = 1/sqrt(z) for z in [1/pdf_proposal_scale, pdf_proposal_scale]
Default value: dimension x 1 list of ones
:type pdf_proposal_scale: float or float list
If dimension > 1, this may be defined as float or float list
If input as float, pdf_proposal_scale is assigned to all dimensions
If input as float list, each element is assigned to the corresponding dimension
:param pdf_target_type: Type of target density function for acceptance/rejection in MMH. Not used for MH or Stretch.
Options:
'marginal_pdf': Check acceptance/rejection for a candidate in MMH using the marginal pdf
For independent variables only
'joint_pdf': Check acceptance/rejection for a candidate in MMH using the joint pdf
Default: 'marginal_pdf'
:type pdf_target_type: str
:param pdf_target: Target density function from which to draw random samples
The target joint probability density must be a function, or list of functions, or a string.
If type == 'str'
The assigned string must refer to a custom pdf defined in the file custom_pdf.py in the working
directory
If type == function
The function must be defined in the python script calling MCMC
If dimension > 1 and pdf_target_type='marginal_pdf', the input to pdf_target is a list of size
[dimensions x 1] where each item of the list defines a marginal pdf.
Default: Multivariate normal distribution having zero mean and unit standard deviation
:type pdf_target: function, function list, or str
:param pdf_target_params: Parameters of the target pdf
:type pdf_target_params: list
:param algorithm: Algorithm used to generate random samples.
Options:
'MH': Metropolis Hastings Algorithm
'MMH': Component-wise Modified Metropolis Hastings Algorithm
'Stretch': Affine Invariant Ensemble MCMC with stretch moves
Default: 'MMH'
:type algorithm: str
:param jump: Number of samples between accepted states of the Markov chain.
Default value: 1 (Accepts every state)
:type: jump: int
:param nsamples: Number of samples to generate
No Default Value: nsamples must be prescribed
:type nsamples: int
:param seed: Seed of the Markov chain(s)
For 'MH' and 'MMH', this is a single point, defined as a numpy array of dimension (1 x dimension)
For 'Stretch', this is a numpy array of dimension N x dimension, where N is the ensemble size
Default:
For 'MH' and 'MMH': zeros(1 x dimension)
For 'Stretch': No default, this must be specified.
:type seed: float or numpy array
:param nburn: Length of burn-in. Number of samples at the beginning of the chain to discard.
This option is only used for the 'MMH' and 'MH' algorithms.
Default: nburn = 0
:type nburn: int
Output:
:return: MCMC.samples:
:rtype: MCMC.samples: numpy array
"""
# Authors: Mohit Chauhan, Dimitris Giovanis, Michael D. Shields
# Updated: 4/26/18 by Michael D. Shields
def __init__(self, dimension=None, pdf_proposal_type=None, pdf_proposal_scale=None, pdf_target_type=None,
pdf_target=None, pdf_target_params=None, algorithm=None, jump=None, nsamples=None, seed=None,
nburn=None):
self.pdf_proposal_type = pdf_proposal_type
self.pdf_proposal_scale = pdf_proposal_scale
self.pdf_target_type = pdf_target_type
self.pdf_target = pdf_target
self.pdf_target_params = pdf_target_params
self.algorithm = algorithm
self.jump = jump
self.nsamples = nsamples
self.dimension = dimension
self.seed = seed
self.nburn = nburn
self.init_mcmc()
if self.algorithm is 'Stretch':
self.ensemble_size = len(self.seed)
self.samples = self.run_mcmc()
def run_mcmc(self):
rejects = 0
# Defining an array to store the generated samples
samples = np.zeros([self.nsamples * self.jump, self.dimension])
################################################################################################################
# Classical Metropolis-Hastings Algorithm with symmetric proposal density
if self.algorithm == 'MH':
from numpy.random import normal, multivariate_normal, uniform
samples[0, :] = self.seed
pdf_ = self.pdf_target[0]
for i in range(self.nsamples * self.jump - 1 + self.nburn):
if self.pdf_proposal_type[0] == 'Normal':
if self.dimension == 1:
candidate = normal(samples[i, :], np.array(self.pdf_proposal_scale))
else:
if i == 0:
self.pdf_proposal_scale = np.diag(np.array(self.pdf_proposal_scale))
candidate = multivariate_normal(samples[i, :], np.array(self.pdf_proposal_scale))
elif self.pdf_proposal_type == 'Uniform':
candidate = uniform(low=samples[i, :] - np.array(self.pdf_proposal_scale) / 2,
high=samples[i, :] + np.array(self.pdf_proposal_scale) / 2,
size=self.dimension)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i, :], self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, :] = candidate
else:
samples[i + 1, :] = samples[i, :]
rejects += 1
################################################################################################################
# Modified Metropolis-Hastings Algorithm with symmetric proposal density
elif self.algorithm == 'MMH':
samples[0, :] = self.seed[0:]
if self.pdf_target_type == 'marginal_pdf':
for i in range(self.nsamples * self.jump - 1 + self.nburn):
for j in range(self.dimension):
pdf_ = self.pdf_target[j]
if self.pdf_proposal_type[j] == 'Normal':
candidate = np.random.normal(samples[i, j], self.pdf_proposal_scale[j])
elif self.pdf_proposal_type[j] == 'Uniform':
candidate = np.random.uniform(low=samples[i, j] - self.pdf_proposal_scale[j] / 2,
high=samples[i, j] + self.pdf_proposal_scale[j] / 2, size=1)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i, j], self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, j] = candidate
else:
samples[i + 1, j] = samples[i, j]
elif self.pdf_target_type == 'joint_pdf':
pdf_ = self.pdf_target[0]
for i in range(self.nsamples * self.jump - 1 + self.nburn):
candidate = list(samples[i, :])
current = list(samples[i, :])
for j in range(self.dimension):
if self.pdf_proposal_type[j] == 'Normal':
candidate[j] = np.random.normal(samples[i, j], self.pdf_proposal_scale[j])
elif self.pdf_proposal_type[j] == 'Uniform':
candidate[j] = np.random.uniform(low=samples[i, j] - self.pdf_proposal_scale[j] / 2,
high=samples[i, j] + self.pdf_proposal_scale[j] / 2,
size=1)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(current, self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
current[j] = candidate[j]
else:
candidate[j] = current[j]
samples[i + 1, :] = current
################################################################################################################
# Affine Invariant Ensemble Sampler with stretch moves
# Reference: Goodman, J. and Weare, J., (2010) "Ensemble samplers with affine invariance." Communications in
# applied mathematics and computational science. 5: 65-80.
elif self.algorithm == 'Stretch':
samples[0:self.ensemble_size, :] = self.seed
pdf_ = self.pdf_target[0]
for i in range(self.ensemble_size-1,self.nsamples * self.jump - 1):
complementary_ensemble = samples[i-self.ensemble_size+2:i+1,:]
S = random.choice(complementary_ensemble)
s = (1+(self.pdf_proposal_scale[0]-1)*random.random())**2/self.pdf_proposal_scale[0]
candidate = S+s*(samples[i-self.ensemble_size+1,:]-S)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i-self.ensemble_size+1, :], self.pdf_target_params)
p_accept = s**(self.dimension-1)*p_proposal/p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, :] = candidate
else:
samples[i + 1, :] = samples[i-self.ensemble_size+1, :]
################################################################################################################
# Return the samples
if self.algorithm is 'MMH' or self.algorithm is 'MH':
return samples[self.nburn:self.nsamples * self.jump +self.nburn:self.jump]
else:
output = np.zeros((self.nsamples,self.dimension))
j = 0
for i in range(self.jump*self.ensemble_size-self.ensemble_size, samples.shape[0],
self.jump*self.ensemble_size):
output[j:j+self.ensemble_size,:] = samples[i:i+self.ensemble_size,:]
j = j+self.ensemble_size
return output
# TODO: Add Gibbs Sampler
# TODO: Add Affine Invariant with walk moves
####################################################################################################################
# Check to ensure consistency of the user input and assign defaults
def init_mcmc(self):
if self.dimension is None:
self.dimension = 1
# Check nsamples
if self.nsamples is None:
raise NotImplementedError('Exit code: Number of samples not defined.')
# Check seed
if self.seed is None:
self.seed = np.zeros(self.dimension)
if self.algorithm is not 'Stretch':
if self.seed.__len__() != self.dimension:
raise NotImplementedError("Exit code: Incompatible dimensions in 'seed'.")
else:
if self.seed.shape[0] < 3:
raise NotImplementedError("Exit code: Ensemble size must be > 2.")
# Check jump
if self.jump is None:
self.jump = 1
# Check pdf_proposal_type
if self.pdf_proposal_type is None:
self.pdf_proposal_type = 'Uniform'
# If pdf_proposal_type is entered as a string, make it a list
if type(self.pdf_proposal_type).__name__=='str':
self.pdf_proposal_type = [self.pdf_proposal_type]
for i in self.pdf_proposal_type:
if i not in ['Uniform', 'Normal']:
raise ValueError('Exit code: Unrecognized type for proposal distribution. Supported distributions: '
'Uniform, '
'Normal.')
if self.algorithm is 'MH' and len(self.pdf_proposal_type)!=1:
raise ValueError('Exit code: MH algorithm can only take one proposal distribution.')
elif len(self.pdf_proposal_type)!=self.dimension:
if len(self.pdf_proposal_type) == 1:
self.pdf_proposal_type = self.pdf_proposal_type * self.dimension
else:
raise NotImplementedError("Exit code: Incompatible dimensions in 'pdf_proposal_type'.")
# Check pdf_proposal_scale
if self.pdf_proposal_scale is None:
if self.algorithm == 'Stretch':
self.pdf_proposal_scale = 2
else:
self.pdf_proposal_scale = 1
if type(self.pdf_proposal_scale).__name__ != 'list':
self.pdf_proposal_scale = [self.pdf_proposal_scale]
if len(self.pdf_proposal_scale) != self.dimension:
if len(self.pdf_proposal_scale) == 1:
self.pdf_proposal_scale = self.pdf_proposal_scale * self.dimension
else:
raise NotImplementedError("Exit code: Incompatible dimensions in 'pdf_proposal_scale'.")
# Check pdf_target_type
if self.algorithm is 'MMH' and self.pdf_target_type is None:
self.pdf_target_type = 'marginal_pdf'
if self.algorithm is 'Stretch':
self.pdf_target_type = 'joint_pdf'
if self.pdf_target_type not in ['joint_pdf', 'marginal_pdf']:
raise ValueError('Exit code: Unrecognized type for target distribution. Supported distributions: '
'joint_pdf, '
'marginal_pdf.')
# Check algorithm
if self.algorithm is None:
self.algorithm = 'MMH'
else:
if self.algorithm not in ['MH', 'MMH', 'Stretch']:
raise NotImplementedError('Exit code: Unrecognized MCMC algorithm. Supported algorithms: '
'Metropolis-Hastings (MH), '
'Modified Metropolis-Hastings (MMH), '
'Affine Invariant Ensemble with Stretch Moves (Stretch).')
# Check pdf_target
if type(self.pdf_target).__name__ == 'str':
self.pdf_target = pdf(self.pdf_target)
if self.pdf_target is None and self.algorithm is 'MMH':
if self.dimension == 1 or self.pdf_target_type is 'marginal_pdf':
def target(x, dummy):
return sp.norm.pdf(x)
if self.dimension == 1:
self.pdf_target = [target]
else:
self.pdf_target = [target] * self.dimension
else:
def target(x, dummy):
return sp.multivariate_normal.pdf(x,mean=np.zeros(self.dimension),cov=np.eye(self.dimension))
self.pdf_target = [target]
elif self.pdf_target is None:
if self.dimension == 1:
def target(x, dummy):
return sp.norm.pdf(x)
self.pdf_target = [target]
else:
def target(x, dummy):
return sp.multivariate_normal.pdf(x,mean=np.zeros(self.dimension),cov=np.eye(self.dimension))
self.pdf_target = [target]
elif type(self.pdf_target).__name__ != 'list':
self.pdf_target = [self.pdf_target]
# Check pdf_target_params
if self.pdf_target_params is None:
self.pdf_target_params = []
if type(self.pdf_target_params).__name__!='list':
self.pdf_target_params = [self.pdf_target_params]
if self.nburn is None:
self.nburn = 0
########################################################################################################################
########################################################################################################################
# ADD ANY NEW METHOD HERE
######################################################################################################################## | 47.693191 | 121 | 0.521617 | 47,418 | 0.76055 | 0 | 0 | 0 | 0 | 0 | 0 | 31,709 | 0.508589 |
1a351fa5c4ad0647f5e47f61d98718d18c8f41de | 5,366 | py | Python | examples/python-api/simple_alu.py | ahmed-irfan/cosa2 | 200c7ce44aa57e2e3d55dddb22051896ad5ee81c | [
"BSD-3-Clause"
] | 26 | 2020-07-23T01:15:03.000Z | 2022-01-16T06:10:10.000Z | examples/python-api/simple_alu.py | ahmed-irfan/cosa2 | 200c7ce44aa57e2e3d55dddb22051896ad5ee81c | [
"BSD-3-Clause"
] | 132 | 2020-06-29T19:40:19.000Z | 2021-11-30T02:21:43.000Z | examples/python-api/simple_alu.py | ahmed-irfan/cosa2 | 200c7ce44aa57e2e3d55dddb22051896ad5ee81c | [
"BSD-3-Clause"
] | 16 | 2020-10-12T11:53:46.000Z | 2022-03-30T13:22:39.000Z | #!/usr/bin/env python3
import argparse
import pono
import smt_switch as ss
from smt_switch.primops import And, BVAdd, BVSub, Equal, Ite
from smt_switch.sortkinds import BOOL, BV
def build_simple_alu_fts(s:ss.SmtSolver)->pono.Property:
'''
Creates a simple alu transition system
@param s - an SmtSolver from smt_switch
@return a property
'''
# Instantiate a functional transition system
fts = pono.FunctionalTransitionSystem(s)
# Create a bit-vector sorts
bvsort1 = s.make_sort(BV, 1)
bvsort8 = s.make_sort(BV, 8)
# Create the states
cfg = fts.make_statevar('cfg', bvsort1)
spec_res = fts.make_statevar('spec_res', bvsort8)
imp_res = fts.make_statevar('imp_res', bvsort8)
# Create the inputs
a = fts.make_inputvar('a', bvsort8)
b = fts.make_inputvar('b', bvsort8)
# Add logic for cfg
## Start at 0
fts.constrain_init(s.make_term(Equal, cfg, s.make_term(0, bvsort1)))
## Keeps the same value
fts.assign_next(cfg, cfg)
# Set logic for results
## they start equal
fts.constrain_init(s.make_term(Equal, spec_res, imp_res))
## spec_res is the sum: spec_res' = a + b
fts.assign_next(spec_res, s.make_term(BVAdd, a, b))
## imp_res depends on the configuration: imp_res' == (cfg == 0) ? a + b : a - b
fts.assign_next(imp_res, s.make_term(Ite,
s.make_term(Equal, cfg, s.make_term(0, bvsort1)),
s.make_term(BVAdd, a, b),
s.make_term(BVSub, a, b)))
# Create a property: spec_res == imp_res
prop = pono.Property(fts, s.make_term(Equal,
spec_res,
imp_res))
return prop
def k_induction_attempt():
# Create an smt_switch.SmtSolver with Boolector as the backend
# and no logging
s = ss.create_btor_solver(False)
s.set_opt('produce-models', 'true')
s.set_opt('incremental', 'true')
prop = build_simple_alu_fts(s)
fts = prop.transition_system
print('\n============== Running k-induction ==============')
print('INIT\n\t{}'.format(fts.init))
print('TRANS\n\t{}'.format(fts.trans))
print('PROP\n\t{}'.format(prop.prop))
# Create KInduction engine -- using same solver (in future can change the solver)
kind = pono.KInduction(prop, s)
res = kind.check_until(20)
print(res)
assert res is None, "Expecting k-induction not to prove property in 20 steps"
print("KInduction returned unknown")
def interpolant_attempt():
# Create solver and interpolator using MathSAT
# and no logging for the solver
s = ss.create_msat_solver(False)
itp = ss.create_msat_interpolator()
s.set_opt('produce-models', 'true')
s.set_opt('incremental', 'true')
prop = build_simple_alu_fts(s)
fts = prop.transition_system
print('\n============== Running Interpolant-based Model Checking ==============')
print('INIT\n\t{}'.format(fts.init))
print('TRANS\n\t{}'.format(fts.trans))
print('PROP\n\t{}'.format(prop.prop))
# Create InterpolantMC engine
itpmc = pono.InterpolantMC(prop, s, itp)
res = itpmc.check_until(20)
print(res)
assert res is True, "Expecting InterpolantMC to prove the property"
print("InterpolantMC returned true")
def k_induction_attempt_inductive():
# Create an smt_switch.SmtSolver with Boolector as the backend
# and no logging
s = ss.create_btor_solver(False)
s.set_opt('produce-models', 'true')
s.set_opt('incremental', 'true')
prop = build_simple_alu_fts(s)
fts = prop.transition_system
# store sets of states in a dictionary for accessing below
states = {str(sv):sv for sv in fts.statevars}
# make the property inductive manually
prop = pono.Property(fts,
s.make_term(And,
s.make_term(Equal,
states['cfg'],
s.make_term(0, s.make_sort(BV, 1))),
prop.prop))
print('\n============== Running k-induction on inductively strengthened property ==============')
print('INIT\n\t{}'.format(fts.init))
print('TRANS\n\t{}'.format(fts.trans))
print('PROP\n\t{}'.format(prop.prop))
# Create KInduction engine -- using same solver (in future can change the solver)
kind = pono.KInduction(prop, s)
res = kind.check_until(20)
print(res)
assert res is True, "Expecting k-induction to prove the inductively strengthened property"
print("KInduction returned true")
approaches = {
'kind': k_induction_attempt,
'interp': interpolant_attempt,
'kind-manual': k_induction_attempt_inductive
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pono SimpleALU example')
parser.add_argument('approach', choices=['kind', 'interp', 'kind-manual'],
help='Select the approach: k-induction, interpolant-based,'
' or k-induction with a manually strengthened property')
parser.add_argument('-v', '--verbosity', type=int, default=0)
args = parser.parse_args()
pono.set_global_logger_verbosity(args.verbosity)
approaches[args.approach]()
| 34.397436 | 101 | 0.62076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,997 | 0.372158 |
1a3616b61cc48f65c6a7989293c9e978921dfdf8 | 55 | py | Python | lrs/admin.py | ELSUru/ADL_LRS | aabeb9cf3e56795b148f37d07e1bb2b41e61e470 | [
"Apache-2.0"
] | null | null | null | lrs/admin.py | ELSUru/ADL_LRS | aabeb9cf3e56795b148f37d07e1bb2b41e61e470 | [
"Apache-2.0"
] | null | null | null | lrs/admin.py | ELSUru/ADL_LRS | aabeb9cf3e56795b148f37d07e1bb2b41e61e470 | [
"Apache-2.0"
] | null | null | null | from util.util import autoregister
autoregister('lrs') | 18.333333 | 34 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.090909 |
1a3702be15ebcb4bd15bdff000d4d69a1406a580 | 2,279 | py | Python | cloudmesh/key/Key.py | wang542/cloudmesh-cloud | f09f823172afedf55c008a5094b005eef6697551 | [
"Apache-2.0"
] | null | null | null | cloudmesh/key/Key.py | wang542/cloudmesh-cloud | f09f823172afedf55c008a5094b005eef6697551 | [
"Apache-2.0"
] | null | null | null | cloudmesh/key/Key.py | wang542/cloudmesh-cloud | f09f823172afedf55c008a5094b005eef6697551 | [
"Apache-2.0"
] | null | null | null | # See also the methods already implemented we have in cm for ssh management
# I think you reimplemented things that already exists.
# see and inspect cloudmesh.common
import os
from os.path import expanduser
# see content of path_expand it does expanduser as far as I know
from cloudmesh.common.util import path_expand
from cloudmesh.management.configuration.SSHkey import SSHkey
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.common.debug import VERBOSE
from pprint import pprint
from cloudmesh.configuration.Config import Config
# noinspection PyPep8Naming
class Key(object):
@classmethod
def get_from_dir(cls, directory=None, store=True):
directory = directory or path_expand("~/.ssh")
# find way that also works on windows, code always must work on windows
# and Linux, if not you need to have if condition
os.system("chmod 700 $HOME /.ssh")
files = [file for file in os.listdir(expanduser(path_expand(directory)))
if file.lower().endswith(".pub")]
d = []
for file in files:
print(file)
path = directory + "/" + file
# find way that also works on windows, code always must work on
# windows and Linux, if not you need to have if condition
os.system("chmod 700 $HOME /.ssh")
with open(path) as fd:
for pubkey in map(str.strip, fd):
# skip empty lines
if not pubkey:
continue
print(pubkey)
d.append(pubkey)
return d
@DatabaseUpdate()
def add(self, name, source):
"""
key add [NAME] [--source=FILENAME]
key add [NAME] [--source=git]
key add [NAME] [--source=ssh]
"""
keys = None
if source == "git":
config = Config()
username = config["cloudmesh.profile.github"]
keys = SSHkey().get_from_git(username)
elif source == "ssh":
key = SSHkey(name=name)
keys = [key]
else:
raise NotImplementedError
# source is filename
return keys
if __name__ == "__main__":
Key.get_from_dir(None, True)
| 33.028986 | 80 | 0.603335 | 1,627 | 0.71391 | 0 | 0 | 1,597 | 0.700746 | 0 | 0 | 776 | 0.3405 |
1a378a9835c7850c8d5fc052d1abc3beb861f804 | 1,901 | py | Python | code/DNN/dnn_regression-keras.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | 3 | 2021-05-25T10:18:23.000Z | 2022-02-09T08:55:14.000Z | code/DNN/dnn_regression-keras.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | null | null | null | code/DNN/dnn_regression-keras.py | Knowledge-Precipitation-Tribe/Neural-network | eac2e66cdde85b34ddf9313ce4d2b123cc1b8be8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-#
'''
# Name: dnn_regression-keras
# Description:
# Author: super
# Date: 2020/6/2
'''
from HelperClass2.MnistImageDataReader import *
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def load_data():
train_file = "../data/ch09.train.npz"
test_file = "../data/ch09.test.npz"
dataReader = DataReader_2_0(train_file, test_file)
dataReader.ReadData()
# dr.NormalizeX()
# dr.NormalizeY(YNormalizationMethod.Regression)
dataReader.Shuffle()
dataReader.GenerateValidationSet()
x_train, y_train = dataReader.XTrain, dataReader.YTrain
x_test, y_test = dataReader.XTest, dataReader.YTest
x_val, y_val = dataReader.XDev, dataReader.YDev
return x_train, y_train, x_test, y_test, x_val, y_val
def build_model():
model = Sequential()
model.add(Dense(4, activation='sigmoid', input_shape=(1, )))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='Adam',
loss='mean_squared_error')
return model
#画出训练过程中训练和验证的精度与损失
def draw_train_history(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
if __name__ == '__main__':
x_train, y_train, x_test, y_test, x_val, y_val = load_data()
# print(x_train.shape)
# print(x_test.shape)
# print(x_val.shape)
model = build_model()
history = model.fit(x_train, y_train, epochs=50, batch_size=10, validation_data=(x_val, y_val))
draw_train_history(history)
loss = model.evaluate(x_test, y_test)
print("test loss: {}".format(loss))
weights = model.get_weights()
print("weights: ", weights) | 27.550725 | 99 | 0.679642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 541 | 0.279298 |
1a3797269cc9f510ddd449f32834670da5b034b5 | 24 | py | Python | milarun/models/ssd/__init__.py | laceyg/milabench | a314094a406c2e98a932f6d4f3a9588a991148d3 | [
"MIT"
] | 67 | 2020-09-22T10:17:53.000Z | 2022-02-16T10:24:17.000Z | milarun/models/ssd/__init__.py | laceyg/milabench | a314094a406c2e98a932f6d4f3a9588a991148d3 | [
"MIT"
] | 6 | 2020-07-02T08:58:39.000Z | 2021-02-01T20:31:28.000Z | milarun/models/ssd/__init__.py | laceyg/milabench | a314094a406c2e98a932f6d4f3a9588a991148d3 | [
"MIT"
] | 8 | 2020-06-19T17:16:19.000Z | 2022-03-31T19:34:49.000Z | from .train import main
| 12 | 23 | 0.791667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1a385acf058d7c8902e6d45a5ac660ccee14a741 | 2,825 | py | Python | separator.py | TimBossuyt/GcodeAnalyzer | eb52e7e2360692919a5128802b189b06dfdb20d0 | [
"MIT"
] | 2 | 2021-03-31T18:13:34.000Z | 2021-03-31T18:26:03.000Z | separator.py | TimBossuyt/GcodeAnalyzer | eb52e7e2360692919a5128802b189b06dfdb20d0 | [
"MIT"
] | null | null | null | separator.py | TimBossuyt/GcodeAnalyzer | eb52e7e2360692919a5128802b189b06dfdb20d0 | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2020 Mitchell Lane
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
class Config:
def __init__(self, fileName, contents, configType):
self.fileName = fileName
self.contents = contents
self.configType = configType
with open("config_bundle.ini", "r") as bundleContents:
line = bundleContents.readline()
while line and not line.startswith("["):
line = bundleContents.readline()
configurationsFound = []
while line:
rawConfigHeader = line[1:-2]
if rawConfigHeader == "presets":
break
print(line)
configHeaderComponents = rawConfigHeader.split(":", 1)
configType = configHeaderComponents[0]
fileName = (configHeaderComponents[1] + ".ini").replace(" ", "_")
print("Found config section: " + configHeaderComponents[1])
line = bundleContents.readline()
contents=[]
while line and not line.startswith("["):
contents.append(line)
line = bundleContents.readline()
configurationsFound.append(Config(fileName, contents, configType))
print("//////////////////////////////////////////")
print("-----------------------------------\n" + "Found: " + str(len(configurationsFound)) + " configurations in total")
outputDir = "config_files"
for configuration in configurationsFound:
outputFileName = os.path.join(outputDir, configuration.fileName)
print("Writing configuration to '" + outputFileName + "'")
with open(outputFileName, 'w') as f:
for configLine in configuration.contents:
if configLine.rstrip():
f.write(configLine)
print("All configuration written to seperate files")
| 32.471264 | 123 | 0.667611 | 172 | 0.060885 | 0 | 0 | 0 | 0 | 0 | 0 | 1,370 | 0.484956 |
1a39172f1ee4c2e6ed3de6f119bd4fc128421537 | 944 | py | Python | corpustools/funcload/io.py | PhonologicalCorpusTools/CorpusTools | ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e | [
"BSD-3-Clause"
] | 97 | 2015-07-06T18:58:43.000Z | 2022-03-10T23:00:07.000Z | corpustools/funcload/io.py | PhonologicalCorpusTools/CorpusTools | ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e | [
"BSD-3-Clause"
] | 443 | 2015-03-10T21:24:39.000Z | 2022-03-22T22:20:13.000Z | corpustools/funcload/io.py | PhonologicalCorpusTools/CorpusTools | ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e | [
"BSD-3-Clause"
] | 22 | 2015-07-19T18:56:24.000Z | 2020-09-17T17:58:12.000Z | import csv
def save_minimal_pairs(output_filename, to_output, write_header=True):
if isinstance(output_filename, str):
outf = open(output_filename, mode='w', encoding='utf-8-sig', newline='')
needs_closed = True
else:
outf = output_filename
needs_closed = False
writer = csv.writer(outf, delimiter='\t')
if write_header:
writer.writerow(['FIRST_SEGMENT', 'SECOND_SEGMENT',
'FIRST_WORD', 'FIRST_WORD_TRANSCRIPTION',
'SECOND_WORD', 'SECOND_WORD_TRANSCRIPTION'])
for _, _, ret_dict in to_output:
for seg_pair, word_pair_set in ret_dict.items():
for word_pair in word_pair_set:
writer.writerow([seg_pair[0], seg_pair[1],
word_pair[0][0], word_pair[0][1],
word_pair[1][0], word_pair[1][1]])
if needs_closed:
outf.close()
| 36.307692 | 80 | 0.581568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.136653 |
1a394cf9c7eb99717e2514108e5f1a318701bbde | 666 | py | Python | src/modax/layers/network.py | GJBoth/modax | c7e1c128d4dd48b776f8ec4fa724c2e4b6e13c82 | [
"MIT"
] | 2 | 2021-12-10T14:36:37.000Z | 2022-02-10T11:47:03.000Z | src/modax/layers/network.py | GJBoth/modax | c7e1c128d4dd48b776f8ec4fa724c2e4b6e13c82 | [
"MIT"
] | null | null | null | src/modax/layers/network.py | GJBoth/modax | c7e1c128d4dd48b776f8ec4fa724c2e4b6e13c82 | [
"MIT"
] | 2 | 2020-12-22T14:49:13.000Z | 2021-04-09T08:52:08.000Z | from typing import Callable
from jax import lax
from flax import linen as nn
class MultiTaskDense(nn.Module):
features: int
n_tasks: int
kernel_init: Callable = nn.initializers.lecun_normal()
bias_init: Callable = nn.initializers.zeros
@nn.compact
def __call__(self, inputs):
kernel = self.param(
"kernel", self.kernel_init, (self.n_tasks, inputs.shape[-1], self.features)
)
y = lax.dot_general(
inputs, kernel, dimension_numbers=(((2,), (1,)), ((0,), (0,)))
)
bias = self.param("bias", self.bias_init, (self.n_tasks, 1, self.features))
y = y + bias
return y
| 28.956522 | 87 | 0.612613 | 586 | 0.87988 | 0 | 0 | 406 | 0.60961 | 0 | 0 | 14 | 0.021021 |
1a3b3a34411e2661693d22cef2b7c67cd304a5f8 | 14,658 | py | Python | tests/integrations/java/test_JDK__verify.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 522 | 2015-07-28T16:06:18.000Z | 2019-03-25T17:16:55.000Z | tests/integrations/java/test_JDK__verify.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 154 | 2015-09-17T02:50:55.000Z | 2019-03-22T07:10:34.000Z | tests/integrations/java/test_JDK__verify.py | pybee/briefcase | d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa | [
"BSD-3-Clause"
] | 105 | 2015-09-25T08:43:26.000Z | 2019-03-25T15:59:27.000Z | import os
import shutil
import subprocess
import sys
from pathlib import Path
from unittest import mock
import pytest
from requests import exceptions as requests_exceptions
from briefcase.console import Log
from briefcase.exceptions import BriefcaseCommandError, MissingToolError, NetworkFailure
from briefcase.integrations.java import JDK
from tests.utils import FsPathMock
@pytest.fixture
def test_command(tmp_path):
command = mock.MagicMock()
command.logger = Log()
command.tools_path = tmp_path / "tools"
# Mock environ.get returning no explicit JAVA_HOME
command.os.environ.get = mock.MagicMock(return_value="")
return command
def test_macos_tool_java_home(test_command, capsys):
"""On macOS, the /usr/libexec/java_home utility is checked."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock 2 calls to check_output.
test_command.subprocess.check_output.side_effect = [
"/path/to/java",
"javac 1.8.0_144\n",
]
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
test_command.subprocess.check_output.assert_has_calls(
[
# First call is to /usr/lib/java_home
mock.call(
["/usr/libexec/java_home"],
stderr=subprocess.STDOUT,
),
# Second is a call to verify a valid Java version
mock.call(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
]
)
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_macos_tool_failure(test_command, tmp_path, capsys):
"""On macOS, if the libexec tool fails, the Briefcase JDK is used."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock a failed call on the libexec tool
test_command.subprocess.check_output.side_effect = subprocess.CalledProcessError(
returncode=1, cmd="/usr/libexec/java_home"
)
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "Contents" / "Home" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java" / "Contents" / "Home"
test_command.subprocess.check_output.assert_has_calls(
[
# First call is to /usr/lib/java_home
mock.call(
["/usr/libexec/java_home"],
stderr=subprocess.STDOUT,
),
]
)
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_macos_provided_overrides_tool_java_home(test_command, capsys):
"""On macOS, an explicit JAVA_HOME overrides /usr/libexec/java_home."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac. libexec won't be invoked.
test_command.subprocess.check_output.return_value = "javac 1.8.0_144\n"
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
# A single call to check output
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_valid_provided_java_home(test_command, capsys):
"""If a valid JAVA_HOME is provided, it is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "javac 1.8.0_144\n"
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
# A single call to check output
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_invalid_jdk_version(test_command, tmp_path, capsys):
"""If the JDK pointed to by JAVA_HOME isn't a Java 8 JDK, the briefcase JDK
is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "javac 14\n"
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_no_javac(test_command, tmp_path, capsys):
"""If the JAVA_HOME doesn't point to a location with a bin/javac, the
briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/nowhere")
# Mock return value from javac failing because executable doesn't exist
test_command.subprocess.check_output.side_effect = FileNotFoundError
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JAVA_HOME should point at the Briefcase-provided JDK
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/nowhere/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_javac_error(test_command, tmp_path, capsys):
"""If javac can't be executed, the briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac failing because executable doesn't exist
test_command.subprocess.check_output.side_effect = subprocess.CalledProcessError(
returncode=1, cmd="/path/to/java/bin/javac"
)
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_unparseable_javac_version(test_command, tmp_path, capsys):
"""If the javac version can't be parsed, the briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "NONSENSE\n"
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
@pytest.mark.parametrize(
("host_os, jdk_url, jhome"),
[
(
"Darwin",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_mac_hotspot_8u242b08.tar.gz",
"java/Contents/Home",
),
(
"Linux",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
"java",
),
(
"Windows",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_windows_hotspot_8u242b08.zip",
"java",
),
],
)
def test_successful_jdk_download(
test_command, tmp_path, capsys, host_os, jdk_url, jhome
):
"""If needed, a JDK can be downloaded."""
# Mock host OS
test_command.host_os = host_os
# Mock a JAVA_HOME that won't exist
# This is only needed to make macOS *not* run /usr/libexec/java_home
test_command.os.environ.get = mock.MagicMock(return_value="/does/not/exist")
# Mock the cached download path
# Consider to remove if block when we drop py3.7 support, only keep statements from else.
# MagicMock below py3.8 doesn't has __fspath__ attribute.
if sys.version_info < (3, 8):
archive = FsPathMock("/path/to/download.zip")
else:
archive = mock.MagicMock()
archive.__fspath__.return_value = "/path/to/download.zip"
test_command.download_url.return_value = archive
# Create a directory to make it look like Java was downloaded and unpacked.
(tmp_path / "tools" / "jdk8u242-b08").mkdir(parents=True)
# Invoke the verify call
jdk = JDK.verify(command=test_command)
assert jdk.java_home == tmp_path / "tools" / jhome
# Console output contains a warning about the bad JDK location
output = capsys.readouterr()
assert output.err == ""
assert "** WARNING: JAVA_HOME does not point to a Java 8 JDK" in output.out
# Download was invoked
test_command.download_url.assert_called_with(
url=jdk_url,
download_path=tmp_path / "tools",
)
# The archive was unpacked
# TODO: Py3.6 compatibility; os.fsdecode not required in Py3.7
test_command.shutil.unpack_archive.assert_called_with(
"/path/to/download.zip", extract_dir=os.fsdecode(tmp_path / "tools")
)
# The original archive was deleted
archive.unlink.assert_called_once_with()
def test_not_installed(test_command, tmp_path):
"""If the JDK isn't installed, and install isn't requested, an error is
raised."""
# Mock host OS
test_command.host_os = "Linux"
# Invoke the verify call. Install is not requested, so this will fail.
with pytest.raises(MissingToolError):
JDK.verify(command=test_command, install=False)
# Download was not invoked
assert test_command.download_url.call_count == 0
def test_jdk_download_failure(test_command, tmp_path):
"""If an error occurs downloading the JDK, an error is raised."""
# Mock Linux as the host
test_command.host_os = "Linux"
# Mock a failure on download
test_command.download_url.side_effect = requests_exceptions.ConnectionError
# Invoking verify_jdk causes a network failure.
with pytest.raises(NetworkFailure):
JDK.verify(command=test_command)
# That download was attempted
test_command.download_url.assert_called_with(
url="https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
download_path=tmp_path / "tools",
)
# No attempt was made to unpack the archive
assert test_command.shutil.unpack_archive.call_count == 0
def test_invalid_jdk_archive(test_command, tmp_path):
"""If the JDK download isn't a valid archive, raise an error."""
# Mock Linux as the host
test_command.host_os = "Linux"
# Mock the cached download path
# Consider to remove if block when we drop py3.7 support, only keep statements from else.
# MagicMock below py3.8 doesn't has __fspath__ attribute.
if sys.version_info < (3, 8):
archive = FsPathMock("/path/to/download.zip")
else:
archive = mock.MagicMock()
archive.__fspath__.return_value = "/path/to/download.zip"
test_command.download_url.return_value = archive
# Mock an unpack failure due to an invalid archive
test_command.shutil.unpack_archive.side_effect = shutil.ReadError
with pytest.raises(BriefcaseCommandError):
JDK.verify(command=test_command)
# The download occurred
test_command.download_url.assert_called_with(
url="https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
download_path=tmp_path / "tools",
)
# An attempt was made to unpack the archive.
# TODO: Py3.6 compatibility; os.fsdecode not required in Py3.7
test_command.shutil.unpack_archive.assert_called_with(
"/path/to/download.zip", extract_dir=os.fsdecode(tmp_path / "tools")
)
# The original archive was not deleted
assert archive.unlink.call_count == 0
| 35.066986 | 93 | 0.68611 | 0 | 0 | 0 | 0 | 2,821 | 0.192455 | 0 | 0 | 6,459 | 0.440647 |
1a3d09039dd0883546bc6476b9cb21a1d00197ed | 1,094 | py | Python | Script Examples/selectionpickobject.py | chuongmep/CadPythonShell | c47d576596b1c2ffa054c8a17dafb2f4156f9a00 | [
"MIT"
] | 9 | 2022-03-15T17:19:34.000Z | 2022-03-19T17:13:33.000Z | Script Examples/selectionpickobject.py | atultegar/CadPythonShell | cef1055f39fcd76f6638900a7e1517942181648b | [
"MIT"
] | 2 | 2022-03-29T02:34:20.000Z | 2022-03-31T03:34:44.000Z | Script Examples/selectionpickobject.py | atultegar/CadPythonShell | cef1055f39fcd76f6638900a7e1517942181648b | [
"MIT"
] | 2 | 2022-03-16T14:10:09.000Z | 2022-03-19T17:13:10.000Z | import clr
import sys
sys.path.append('C:\Program Files (x86)\IronPython 2.7\Lib')
import os
import math
clr.AddReference('acmgd')
clr.AddReference('acdbmgd')
clr.AddReference('accoremgd')
# Import references from AutoCAD
from Autodesk.AutoCAD.Runtime import *
from Autodesk.AutoCAD.ApplicationServices import *
from Autodesk.AutoCAD.EditorInput import *
from Autodesk.AutoCAD.DatabaseServices import *
from Autodesk.AutoCAD.Geometry import *
doc = Application.DocumentManager.MdiActiveDocument
ed = doc.Editor
db = doc.Database
#Code Here :
objects = []
with doc.LockDocument():
with doc.Database as db:
with db.TransactionManager.StartTransaction() as t:
acblkbl = t.GetObject(db.BlockTableId,OpenMode.ForRead)
print(type(acblkbl))
acblktblrec = t.GetObject(acblkbl[BlockTableRecord.ModelSpace],OpenMode.ForWrite)
print(type(acblktblrec))
sel = doc.Editor.GetSelection()
if(sel.Status== PromptStatus.OK):
results = sel.Value
for i in range(len(results)):
if(results[i] != None) : objects.append(i)
else : pass
print("Count Object Exploded:",len(objects)) | 33.151515 | 84 | 0.760512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.127057 |
1a3d1f6c992c6dfd49b42ac641ef764ebd057319 | 11,751 | py | Python | Katna/config.py | viddik13/katna | 12256602a5fd24368ffffe2c1a82a46a49215c15 | [
"MIT"
] | 125 | 2019-08-22T06:53:55.000Z | 2022-03-24T05:53:41.000Z | Katna/config.py | viddik13/katna | 12256602a5fd24368ffffe2c1a82a46a49215c15 | [
"MIT"
] | 19 | 2020-02-13T07:14:59.000Z | 2021-12-01T15:13:33.000Z | Katna/config.py | viddik13/katna | 12256602a5fd24368ffffe2c1a82a46a49215c15 | [
"MIT"
] | 28 | 2019-09-03T07:00:29.000Z | 2021-12-30T04:20:14.000Z | """
.. module:: Katna.config
:platform: Platfrom Independent
:synopsis: This module defines some helpful configuration variables
"""
import os
# # Configuration parameters for Image class
class Image:
# default value by which image size to be reduces for processing
down_sample_factor = 8
# Debug flag
DEBUG = False
# Crop_height_reduction_factor_in_each_iterationnot found crop height
# will be reduced by this multiplier/factor and search for candidate crops
# is resumed.
# Decreasing the height and width for crops while checking it don't get small by 1/(min_image_to_crop_factor) of image height/width
min_image_to_crop_factor = 4
crop_height_reduction_factor_in_each_iteration = 0.05
# # Configurations for Scoring crops for crop extractor
class CropScorer:
detail_weight = 0.2 # default weight value for detail parameter
edge_radius = 0.4 # default edge radius
edge_weight = -20 # default edge weight
outside_importance = (
-0.5
) # default value to set if the pixel is outside crop rectangle
rule_of_thirds = True # boolean to set rule of third condition check
saliency_bias = 0.2 # bias color value for saliency(+- error value)
saliency_weight = 1.3 # default edge radius
face_bias = 0.01 # bias color value for face(+- error value)
face_weight = 3.4 # default weight value for face parameter
rects_weight = 1 # default weight value for crop rectangles
# # Configurations for Text detection class
class TextDetector:
# Min Confidence Threshold for Text detection model
min_confidence = 0.9
# Threshold for merging text detection boxes
merge_threshold = 1
# Name of Model files to be used for text detection
frozen_weights = "frozen_east_text_detection.pb"
# Location where model file will be downloaded
cache_subdir = "models"
# Layers Name for text detection
layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
# Download Link for Text detection model
model_download_link = "https://github.com/oyyd/frozen_east_text_detection.pb/raw/master/frozen_east_text_detection.pb"
# # Configurations for Edge Feature class
class EdgeFeature:
# min edge threshold value
min_val_threshold = 100
# Max edge threshold value
max_val_threshold = 200
# aperture_size/size of Sobel kernel for canny edge detector
ksize = 3
# # Configurations for Face detection Feature class
class FaceFeature:
# Model file name to be used for face detection
model_file = "res10_300x300_ssd_iter_140000_fp16.caffemodel"
# Model definition file name to be used for face detetion
prototxt_file = "deploy.prototxt"
# Location where model file will be downloaded
cache_subdir = "models"
# Min Confidence Threshold for face detection model
confidence = 0.5
# Download Link for face detection model defintion file
prototxt_download_link = "https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt"
# Download Link for face detection model
modelfile_download_link = "https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel"
# # Configuration parameters for Video class
class Video:
# Debug flag
DEBUG = False
min_video_duration = 5.0
# consume % of memory during video keyframe extraction
# 80% of available memory will be consumed
memory_consumption_threshold = 0.80
# assumed numbers of frames within which 1 candidate frames which might be available
# seconds to reach threshold if all frames are collected, but not all are candidate frames
# currently we assume 1 in 5 frame for that
assumed_no_of_frames_per_candidate_frame = 5
# if video duration greater than this number video will be treated as a large video
video_split_threshold_in_minutes = 20
# https://trac.ffmpeg.org/wiki/Encode/H.264
# Keep this between 20 to 30 value
video_compression_crf_parameter = 23
video_compression_codec = "libx264" # Currently "libx264 and is supported"
compression_output_file_extension = "mp4"
# Supported/valid video extensions supported by ffmpeg
# You can generate updated list by using following shell script on MacOSX or Linux
# $ ffmpeg -demuxers -hide_banner | tail -n +5 | cut -d' ' -f4 | xargs -I{} ffmpeg -hide_banner -h demuxer={} | grep 'Common extensions' | cut -d' ' -f7 | tr ',' $'\n' | tr -d '.'
video_extensions = [
".str",
".aa",
".aac",
".ac3",
".acm",
".adf",
".adp",
".dtk",
".ads",
".ss2",
".adx",
".aea",
".afc",
".aix",
".al",
".ape",
".apl",
".mac",
".aptx",
".aptxhd",
".aqt",
".ast",
".avi",
".avr",
".bfstm",
".bcstm",
".bit",
".bmv",
".brstm",
".cdg",
".cdxl",
".xl",
".c2",
".302",
".daud",
".str",
".dss",
".dts",
".dtshd",
".dv",
".dif",
".cdata",
".eac3",
".paf",
".fap",
".flm",
".flac",
".flv",
".fsb",
".g722",
".722",
".tco",
".rco",
".g723_1",
".g729",
".genh",
".gsm",
".h261",
".h26l",
".h264",
".264",
".avc",
".hevc",
".h265",
".265",
".idf",
".cgi",
".sf",
".ircam",
".ivr",
".flv",
".lvf",
".m4v",
".mkv",
".mk3d",
".mka",
".mks",
".mjpg",
".mjpeg",
".mpo",
".j2k",
".mlp",
".mov",
".mp4",
".m4a",
".3gp",
".3g2",
".mj2",
".mp2",
".mp3",
".m2a",
".mpa",
".mpc",
".mjpg",
".txt",
".mpl2",
".sub",
".msf",
".mtaf",
".ul",
".musx",
".mvi",
".mxg",
".v",
".nist",
".sph",
".nsp",
".nut",
".ogg",
".oma",
".omg",
".aa3",
".pjs",
".pvf",
".yuv",
".cif",
".qcif",
".rgb",
".rt",
".rsd",
".rsd",
".rso",
".sw",
".sb",
".smi",
".sami",
".sbc",
".msbc",
".sbg",
".scc",
".sdr2",
".sds",
".sdx",
".shn",
".vb",
".son",
".sln",
".mjpg",
".stl",
".sub",
".sub",
".sup",
".svag",
".tak",
".thd",
".tta",
".ans",
".art",
".asc",
".diz",
".ice",
".nfo",
".txt",
".vt",
".ty",
".ty+",
".uw",
".ub",
".v210",
".yuv10",
".vag",
".vc1",
".viv",
".idx",
".vpk",
".txt",
".vqf",
".vql",
".vqe",
".vtt",
".wsd",
".xmv",
".xvag",
".yop",
".y4m",
]
# Configuration parameters for mediapipe
class MediaPipe:
class AutoFlip:
# Rerun is required due to autoflip issue mentione here:
# https://github.com/google/mediapipe/issues/497
RERUN_LIMIT = 2
# Models folder location
MODELS_FOLDER_LOCATION = os.path.join(os.getcwd(), "mediapipe", "models")
# pbtxt temp folder name
TMP_PBTXT_FOLDER_NAME = "temp_pbtxt"
TMP_PBTXT_FOLDER_PATH = os.path.join(os.getcwd(), TMP_PBTXT_FOLDER_NAME)
# Default pbtxt and build cmd
CONFIG_FILE_PBTXT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "mediapipe_autoflip.pbtxt"
)
BUILD_CMD = "run_autoflip"
# user friendly conf keys
ENFORCE_FEATURES_KEYNAME = "ENFORCE_FEATURES"
STABALIZATION_THRESHOLD_KEYNAME = "STABALIZATION_THRESHOLD"
BLUR_AREA_OPACITY_KEYNAME = "BLUR_AREA_OPACITY"
# DEFAULT VALUES IN PBTXT
DEFAULT_BLUR_AREA_OPACITY = 0.6
DEFAULT_MOTION_STABALIZATION_THRESHOLD = 0.5
DEFAULT_FEATURE_SIGNAL_VALUE = "false"
# ENFORCE_FEATURES Keys
_FACE_CORE_LANDMARKS = "FACE_CORE_LANDMARKS"
_FACE_FULL = "FACE_FULL"
_FACE_ALL_LANDMARKS = "FACE_ALL_LANDMARKS"
_HUMAN = "HUMAN"
_PET = "PET"
_CAR = "CAR"
_OBJECT = "OBJECT"
# the variables names below should match the keyname for set_conf to work
# smoothly
# ENFORCE_FEATURES list
ENFORCE_FEATURES = {
_FACE_CORE_LANDMARKS: False,
_FACE_ALL_LANDMARKS: False,
_FACE_FULL: False,
_HUMAN: False,
_PET: False,
_CAR: False,
_OBJECT: False,
}
# % AREA from center where most of the content is
# usually applied when content is focused near center
STABALIZATION_THRESHOLD = DEFAULT_MOTION_STABALIZATION_THRESHOLD
# opacity of blur area
BLUR_AREA_OPACITY = DEFAULT_BLUR_AREA_OPACITY
@classmethod
def get_pbtxt_mapping(cls):
return {
cls.ENFORCE_FEATURES_KEYNAME: "signal_settings",
cls.STABALIZATION_THRESHOLD_KEYNAME: "motion_stabilization_threshold_percent",
cls.BLUR_AREA_OPACITY_KEYNAME: "overlay_opacity",
}
@classmethod
def get_conf(cls):
"""Gets the current config
:return: dictionary containing the current config
:rtype: dict
"""
return {
cls.ENFORCE_FEATURES_KEYNAME: cls.ENFORCE_FEATURES,
cls.STABALIZATION_THRESHOLD_KEYNAME: cls.STABALIZATION_THRESHOLD,
cls.BLUR_AREA_OPACITY_KEYNAME: cls.BLUR_AREA_OPACITY,
}
@classmethod
def set_conf(cls, config):
"""Sets the config passed
:param config: The configuration to set.
:type config: dict
"""
for attr in config.keys():
current_conf = cls.get_conf()
if attr in current_conf.keys():
if attr == cls.ENFORCE_FEATURES_KEYNAME:
updated_attr_dict = {**current_conf[attr], **config[attr]}
setattr(cls, attr, updated_attr_dict)
else:
setattr(cls, attr, config[attr])
else:
raise Exception(
" Invalid configuration. Use get_conf method to see existing configuration or refer documentation."
)
class ImageSelector:
# Setting for optimum Brightness values
min_brightness_value = 10.0
max_brightness_value = 90.0
brightness_step = 2.0
# Setting for optimum Contrast/Entropy values
min_entropy_value = 1.0
max_entropy_value = 10.0
entropy_step = 0.5
class FrameExtractor:
# Setting local maxima criteria
USE_LOCAL_MAXIMA = True
# Lenght of sliding window taking difference
len_window = 20
# Chunk size of Images to be processed at a time in memory
max_frames_in_chunk = 500
# Type of smoothening window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing.
window_type = "hanning"
| 28.521845 | 183 | 0.562676 | 11,249 | 0.95728 | 0 | 0 | 1,561 | 0.13284 | 0 | 0 | 5,815 | 0.494852 |
1a3d73a6c52da2deb3d1d2f1db4c3862bf7713d4 | 350 | py | Python | functions/closeAll.py | chiluf/visvis.dev | 373846ea25044b7ca50f44c63dab4248e14deacd | [
"BSD-3-Clause"
] | null | null | null | functions/closeAll.py | chiluf/visvis.dev | 373846ea25044b7ca50f44c63dab4248e14deacd | [
"BSD-3-Clause"
] | null | null | null | functions/closeAll.py | chiluf/visvis.dev | 373846ea25044b7ca50f44c63dab4248e14deacd | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
def closeAll():
""" closeAll()
Closes all figures.
"""
for fig in vv.BaseFigure._figures.values():
fig.Destroy()
| 19.444444 | 65 | 0.614286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.648571 |
1a3e656693ac2d53648f435ad47a133f2ee184fd | 15,423 | py | Python | gtrace/optics/geometric.py | terrencetec/gtrace | e471aa534e5b0fa4cfd7a79f83641b534e276b67 | [
"BSD-2-Clause"
] | 1 | 2021-03-25T04:08:47.000Z | 2021-03-25T04:08:47.000Z | gtrace/optics/geometric.py | terrencetec/gtrace | e471aa534e5b0fa4cfd7a79f83641b534e276b67 | [
"BSD-2-Clause"
] | 1 | 2021-04-06T07:03:07.000Z | 2021-04-06T07:03:07.000Z | gtrace/optics/geometric.py | terrencetec/gtrace | e471aa534e5b0fa4cfd7a79f83641b534e276b67 | [
"BSD-2-Clause"
] | 1 | 2021-03-24T14:16:45.000Z | 2021-03-24T14:16:45.000Z | #{{{ Import
import numpy as np
pi = np.pi
#}}}
#{{{ Snell's Law
def deflection_angle(theta, n1, n2, deg=True):
"""Calculate deflection angle according to Snell's law.
Parameters
----------
theta : float
Angle of incidence.
n1 : float
Refractive index of the first medium.
n2 : float
Refraction index of the second medium.
deg : boolean, optional
True if theta is specified in degrees.
"""
if deg:
factor = pi/180.0
else:
factor = 1.0
return np.arcsin(n1*np.sin(theta*factor)/n2)/factor
#}}}
#{{{ Geometry utilities
#{{{ line_plane_intersection
def line_plane_intersection(pos,
dirVect,
plane_center,
normalVector,
diameter):
'''
Compute the intersection point between a line
and a plane
Parameters
----------
pos : array
The position of the end point of the line.
dirVert : array
The directional vector specifying the line.
plane_center : array
The position of the center of the plane.
normalVector: array
The normal vector of the plane.
diameter: float
The diameter of the plane.
Returns
-------
dict
The returned value is a dictionary with the following keys:
"Intersection Point": numpy array of the coordinates of the intersection point.
"isHit": A boolean value of whether the line intersects with the plane or not.
"distance": Distance between the origin of the line and the intersection point.
"distance from center": Distance between the center of the plane and the intersection point.
'''
#Make sure the inputs are ndarrays
pos = np.array(pos, dtype=np.float64)
dirVect = np.array(dirVect, dtype=np.float64)
plane_center = np.array(plane_center, dtype=np.float64)
normalVector = np.array(normalVector, dtype=np.float64)
diameter = float(diameter)
#Get a normalized vector along the plane
plVect = np.array([-normalVector[1], normalVector[0]])
plVect = plVect/np.linalg.norm(plVect)
#Normalize
dirVect = dirVect/np.linalg.norm(dirVect)
#Make sure that the plVect and dirVect are not parallel
if np.abs(np.dot(dirVect, plVect)) > 1 - 1e-10:
return {'Intersection Point': np.array((0.,0.)), 'isHit': False,
'distance': 0.0,
'distance from center': 0.0}
#Solve line equations to get the intersection point
M = np.vstack((dirVect, -plVect)).T
ans = np.linalg.solve(M, plane_center - pos)
intersection_point = pos + ans[0]*dirVect
#How far the intersection point is from the center
#of the plane
dist_from_center = np.abs(ans[1])
if dist_from_center > diameter/2.0\
or ans[0] < 0.\
or np.dot(dirVect, normalVector) > 0.:
hit = False
else:
hit = True
return {'Intersection Point': intersection_point, 'isHit': hit,
'distance': np.abs(ans[0]),
'distance from center': ans[1]}
#}}}
#{{{ line_arc_intersection
def line_arc_intersection(pos,
dirVect,
chord_center,
chordNormVect,
invROC,
diameter,
verbose=False):
'''
Compute the intersection point between a line
and an arc.
Parameters
----------
pos : array
Origin of the line.
dirVect : array
Direction of the line.
chord_center : array
The center of the chord made by the arc.
chordNormVect : array
Normal vector of the chord.
invROC : float
Inverse of the ROC of the arc. Positive for concave surface.
diameter : float
Length of the chord.
verbose : boolean, optional
Prints useful information.
Returns
-------
dict
The returned value is a dictionary with the following keys:
"Intersection Point": numpy array of the coordinates of the intersection point.
"isHit": A boolean value of whether the line intersects with the plane or not.
"distance": Distance between the origin of the line and the intersection point.
"localNormVect": localNormVect,
"localNormAngle": localNormAngle.
'''
#Make sure the inputs are ndarrays
pos = np.array(pos, dtype=np.float64)
dirVect = np.array(dirVect, dtype=np.float64)
chord_center = np.array(chord_center, dtype=np.float64)
chordNormVect = np.array(chordNormVect, dtype=np.float64)
invROC = float(invROC)
diameter = float(diameter)
#Normalize
dirVect = dirVect/np.linalg.norm(dirVect)
chordNormVect = chordNormVect/np.linalg.norm(chordNormVect)
#Check if the ROC is too large.
if np.abs(invROC) < 1e-5:
#It is almost a plane
ans = line_plane_intersection(pos, dirVect, chord_center, chordNormVect, diameter)
localNormVect = chordNormVect
localNormAngle = np.mod(np.arctan2(localNormVect[1],
localNormVect[0]), 2*pi)
ans['localNormVect'] = localNormVect
ans['localNormAngle'] = localNormAngle
return ans
ROC = 1/invROC
#Compute the center of the arc
theta = np.arcsin(diameter/(2*ROC))
l = ROC*np.cos(theta)
arc_center = chord_center + chordNormVect*l
#For convex surface, pos has to be outside the circle.
if ROC < 0 and np.linalg.norm(pos - arc_center) < np.abs(ROC):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#First, decompose the vector connecting from the arc_center
#to pos into the components parallel to the line and orthogonal to it.
# s is the component in the orthogonal direction and t is the one along
#the line.
#A vector orthogonal to the line
k = np.array([-dirVect[1], dirVect[0]])
#Solve the equation to decompose the vector pos-arc_center
M = np.vstack((k, -dirVect)).T
ans = np.linalg.solve(M, pos - arc_center)
s = ans[0]
t = ans[1]
if np.abs(s) > np.abs(ROC):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#Compute two cross points
#Work with the chord formed by the line and the circle.
#d is half the length of the chord.
d = np.sqrt(ROC**2 - s**2)
if ROC > 0:
intersection_point = k*s+arc_center + d*dirVect
localNormVect = arc_center - intersection_point
else:
intersection_point = k*s+arc_center - d*dirVect
localNormVect = intersection_point - arc_center
#Check if dirVect and the vector connecting from pos to intersection_point
#are pointing the same direction.
if np.dot(dirVect, intersection_point - pos) < 0:
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#Normalize
localNormVect = localNormVect/np.linalg.norm(localNormVect)
localNormAngle = np.mod(np.arctan2(localNormVect[1],
localNormVect[0]), 2*pi)
#Check if the intersection point is within the
#diameter
v0 = - np.sign(ROC) * chordNormVect*(1-1e-16) #(1-1e-16) is necessary to avoid rounding error
v1 = intersection_point - arc_center
v1 = v1/np.linalg.norm(v1)*(1-1e-16)
if np.arccos(np.dot(v0,v1)) > np.abs(theta):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
distance = np.linalg.norm(intersection_point - pos)
return {'Intersection Point': intersection_point, 'isHit': True,
'distance': distance, 'localNormVect': localNormVect,
'localNormAngle': localNormAngle}
#}}}
#{{{ vector_rotation_2D
def vector_rotation_2D(vect, angle):
"""Rotate a 2D vector by an angle.
Parameters
----------
vect : array
A 2D vector.
angle : float
Angle of rotation in radians.
Returns
-------
array
The rotated vector.
"""
vect = np.array(vect)
angle = float(angle)
M = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle),np.cos(angle)]])
return np.dot(M, vect)
#}}}
def vector_normalize(vect):
'''
Normalize a vector
Parameters
----------
vect : array
The vector to be normalized
Returns
-------
array
The normalized vector.
'''
return vect/np.linalg.norm(vect)
#{{{ normSpheric
def normSpheric(normAngle, invROC, dist_from_center):
'''
Returns the local normal angle of a spheric mirror
at a distance from the center.
Parameters
----------
normAngle : float
The angle formed by the normal vector of the mirror
at the center and the x-axis.
invROC : float
1/R, where R is the ROC of the mirror.
dist_from_center: float
The distance from the center of the point where
the local normal is requested.
This is a signed value.
For a mirror facing +x (the normal vector points
towards positive x direction), this distance
is positive for points with positive y coordinate,
and negative for points with negative y coordinate.
Returns
-------
float
The local normal angle of a spheric mirror
at a distance from the center.
'''
normAngle = np.mod(normAngle, 2*pi)
return np.mod(np.arcsin(- dist_from_center * invROC) + normAngle, 2*pi)
#}}}
#{{{ reflection and deflection angle
def refl_defl_angle(beamAngle, normAngle, n1, n2, invROC=None):
'''
Returns a tuples of reflection and deflection angles.
Parameters
----------
beamAngle : float
The angle formed by the propagation direction vector
of the incident beam and the x-axis.
normAngle : float
The angle formed by the normal vector of the surface
and the x-axis.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
invROC : float or None, optional
Inverse of the radius of curvature.
Returns
-------
6-tuple or 2-tuple
(reflAngle, deflAngle, Mrx, Mry, Mtx, Mty) or (reflAngle, deflAngle)
'''
beamAngle = np.mod(beamAngle, 2*pi)
normAngle = np.mod(normAngle, 2*pi)
incidentAngle = np.mod(beamAngle - normAngle, 2*pi) - pi
reflAngle = np.mod(normAngle - incidentAngle, 2*pi)
deflAngle = np.arcsin(n1*np.sin(incidentAngle)/n2)
deflAngle = np.mod(deflAngle + pi + normAngle, 2*pi)
if not invROC == None:
#Calculate ABCD matrices
#Absolute value of the incident angle
theta1 = np.abs(incidentAngle)
#For reflection
Mrx = np.array([[1., 0.], [-2*n1*invROC/np.cos(theta1), 1.]])
Mry = np.array([[1., 0.], [-2*n1*invROC*np.cos(theta1), 1.]])
#For transmission
theta2 = np.arcsin(n1*np.sin(theta1)/n2)
nex = (n2*np.cos(theta2)-n1*np.cos(theta1))/(np.cos(theta1)*np.cos(theta2))
Mtx = np.array([[np.cos(theta2)/np.cos(theta1), 0.],
[nex*invROC, np.cos(theta1)/np.cos(theta2)]])
ney = n2*np.cos(theta2)-n1*np.cos(theta1)
Mty = np.array([[1., 0.],[ney*invROC, 1.]])
return (reflAngle, deflAngle, Mrx, Mry, Mtx, Mty)
else:
return (reflAngle, deflAngle)
#}}}
#{{{ reflection and deflection angle for cylindrical surface
def cyl_refl_defl_angle(beamAngle, normAngle, n1, n2, invROC=None, curve_direction='h'):
'''
Returns a tuples of reflection and deflection angles for incidence of a beam into a cylindrical surface.
Parameters
----------
beamAngle : float
The angle formed by the propagation direction vector
of the incident beam and the x-axis.
normAngle : float
The angle formed by the normal vector of the surface
and the x-axis.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
invROC : float or None, optional
Inverse of the radius of curvature.
curve_direction : str, optional
Direction of curvature. Either 'h' or 'v'.
'''
beamAngle = np.mod(beamAngle, 2*pi)
normAngle = np.mod(normAngle, 2*pi)
incidentAngle = np.mod(beamAngle - normAngle, 2*pi) - pi
reflAngle = np.mod(normAngle - incidentAngle, 2*pi)
deflAngle = np.arcsin(n1*np.sin(incidentAngle)/n2)
deflAngle = np.mod(deflAngle + pi + normAngle, 2*pi)
if not invROC == None:
#Calculate ABCD matrices
#Absolute value of the incident angle
theta1 = np.abs(incidentAngle)
#For reflection
if curve_direction == 'h':
Mrx = np.array([[1., 0.], [-2*n1*invROC/np.cos(theta1), 1.]])
Mry = np.array([[1., 0.], [0., 1.]])
else:
Mrx = np.array([[1., 0.], [0., 1.]])
Mry = np.array([[1., 0.], [-2*n1*invROC*np.cos(theta1), 1.]])
#For transmission
theta2 = np.arcsin(n1*np.sin(theta1)/n2)
nex = (n2*np.cos(theta2)-n1*np.cos(theta1))/(np.cos(theta1)*np.cos(theta2))
Mtx = np.array([[np.cos(theta2)/np.cos(theta1), 0.],
[nex*invROC, np.cos(theta1)/np.cos(theta2)]])
ney = n2*np.cos(theta2)-n1*np.cos(theta1)
Mty = np.array([[1., 0.],[ney*invROC, 1.]])
return (reflAngle, deflAngle, Mrx, Mry, Mtx, Mty)
else:
return (reflAngle, deflAngle)
#}}}
#}}}
#{{{ VariCAD utility functions
def vc_deflect(theta, theta1, n1, n2):
'''
Deflection angle helper function for VariCAD.
Parameters
----------
theta : float
Angle of the surface measured from right.
theta1 : float
Angle of the incident beam measured from right.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
Returns
-------
phi2 : float
Angle of the deflected beam measured from right.
'''
#Combert theta and theta1 to 0-360 format
if theta < 0:
theta = 360.0 + theta
if theta > 180:
theta = theta -180.0
if theta1 < 0:
theta1 = 360.0 + theta1
#Determine the incident angle
phi = abs(theta - theta1)
phi1 = 90.0-np.arcsin(np.abs(np.sin(pi*phi/180.0)))*180.0/pi
#Calculate deflection angle
phi2 = deflection_angle(phi1, n1, n2)
#Convert to the 0-360 angle
s1 = np.sign(np.sin(pi*(theta1 - theta)/180.0))
s2 = -np.sign(np.cos(pi*(theta1 - theta)/180.0))
phi2 = theta + s1*90 + s1*s2*phi2
return phi2
def vc_reflect(theta, theta1):
"""Convert theta and theta1 to 0-360 format.
Parameters
----------
theta : float
Angle of the surface measured from right.
theta1 : float
Angle of the incident beam measured from right.
Returns
-------
float
"""
#Combert theta and theta1 to 0-360 format
if theta < 0:
theta = 360.0 + theta
if theta > 180:
theta = theta -180.0
if theta1 < 0:
theta1 = 360.0 + theta1
return theta - (theta1 - theta)
#}}}
| 28.774254 | 108 | 0.607145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,713 | 0.500097 |
1a4315521e410535022480f1787d8082ba26bce9 | 24 | py | Python | tpau_gtfsutilities/gtfs/process/__init__.py | anniekfifer/tpau-gtfsutils | a022d4c8465b7f736023ecc294ff0d7d0201b0e9 | [
"BSD-3-Clause"
] | 3 | 2019-09-25T10:05:42.000Z | 2019-11-26T13:30:29.000Z | tpau_gtfsutilities/gtfs/process/__init__.py | anniekfifer/tpau-gtfsutils | a022d4c8465b7f736023ecc294ff0d7d0201b0e9 | [
"BSD-3-Clause"
] | null | null | null | tpau_gtfsutilities/gtfs/process/__init__.py | anniekfifer/tpau-gtfsutils | a022d4c8465b7f736023ecc294ff0d7d0201b0e9 | [
"BSD-3-Clause"
] | null | null | null | from . import preprocess | 24 | 24 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1a43602ede101f6065f899eb4ae9c5972b6d4f2d | 865 | py | Python | setup.py | mlab-upenn/pyEp | 14435158bba4c11df43dfac6b662e81d7d0029b9 | [
"MIT"
] | 11 | 2018-06-20T16:09:50.000Z | 2021-06-28T18:48:01.000Z | setup.py | mlab-upenn/pyEp | 14435158bba4c11df43dfac6b662e81d7d0029b9 | [
"MIT"
] | 4 | 2018-05-09T18:14:52.000Z | 2018-08-21T13:59:52.000Z | setup.py | mlab-upenn/pyEp | 14435158bba4c11df43dfac6b662e81d7d0029b9 | [
"MIT"
] | 2 | 2020-02-16T07:52:45.000Z | 2021-09-19T05:19:41.000Z | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyEp',
version='0.9.4.4',
description='pyEp: EnergyPlus cosimulation in Python',
long_description=long_description,
url='',
author='Derek Nong',
author_email='dnong@sas.upenn.edu',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5'
],
keywords='EnergyPlus simulation',
packages=['pyEp'],
package_dir={'pyEp': 'pyEp'},
include_package_data=True
) | 27.03125 | 64 | 0.717919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.487861 |
1a44e0b322e41eca3cc5fe2f6db02f126c139edf | 1,918 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jcq/models/Subscription.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jcq/models/Subscription.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jcq/models/Subscription.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Subscription(object):
def __init__(self, consumerGroupId=None, endPoint=None, messageInvisibleTimeInSeconds=None, subscriptionType=None, tags=None, dlqEnable=None, maxRetryTimes=None, createTime=None, lastUpdateTime=None, consumerNumbers=None):
"""
:param consumerGroupId: (Optional) consumerGroupId
:param endPoint: (Optional) endPoint
:param messageInvisibleTimeInSeconds: (Optional) messageInvisibleTimeInSeconds
:param subscriptionType: (Optional) subscriptionType
:param tags: (Optional) tags
:param dlqEnable: (Optional) 是否开启死信队列
:param maxRetryTimes: (Optional) 最大重试次数
:param createTime: (Optional) 创建时间
:param lastUpdateTime: (Optional) 最后更新时间
:param consumerNumbers: (Optional) 在线consumer个数
"""
self.consumerGroupId = consumerGroupId
self.endPoint = endPoint
self.messageInvisibleTimeInSeconds = messageInvisibleTimeInSeconds
self.subscriptionType = subscriptionType
self.tags = tags
self.dlqEnable = dlqEnable
self.maxRetryTimes = maxRetryTimes
self.createTime = createTime
self.lastUpdateTime = lastUpdateTime
self.consumerNumbers = consumerNumbers
| 41.695652 | 226 | 0.728363 | 1,303 | 0.660081 | 0 | 0 | 0 | 0 | 0 | 0 | 1,253 | 0.634752 |
1a45326720b16c2af69d18465b3b98b47482317d | 451 | py | Python | aggregator/migrations/0033_auto_20190118_1735.py | dipapaspyros/bdo_platform | 336de07c6ed14290c54f2154117dbf90a187e4ea | [
"MIT"
] | 2 | 2018-02-07T10:26:28.000Z | 2018-09-21T09:12:58.000Z | aggregator/migrations/0033_auto_20190118_1735.py | dipapaspyros/bdo_platform | 336de07c6ed14290c54f2154117dbf90a187e4ea | [
"MIT"
] | 5 | 2018-09-21T10:40:44.000Z | 2019-04-06T10:59:57.000Z | aggregator/migrations/0033_auto_20190118_1735.py | dipapaspyros/bdo_platform | 336de07c6ed14290c54f2154117dbf90a187e4ea | [
"MIT"
] | 3 | 2019-06-09T15:42:02.000Z | 2022-02-14T19:50:33.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-01-18 15:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0032_auto_20190118_1720'),
]
operations = [
# migrations.RenameField(
# model_name='dataset',
# old_name='spatiaNorth',
# new_name='spatialNorth',
# ),
]
| 21.47619 | 50 | 0.605322 | 303 | 0.67184 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.490022 |
1a4560e6c15b32f07d918f4f5ee71a7ff0eeadff | 1,793 | py | Python | src/lead2gold/tools/emd.py | plachta11b/lead2gold | 96a0b2fda190d6ab6045b6045d5c621cea119306 | [
"MIT"
] | null | null | null | src/lead2gold/tools/emd.py | plachta11b/lead2gold | 96a0b2fda190d6ab6045b6045d5c621cea119306 | [
"MIT"
] | null | null | null | src/lead2gold/tools/emd.py | plachta11b/lead2gold | 96a0b2fda190d6ab6045b6045d5c621cea119306 | [
"MIT"
] | null | null | null | import ntpath
from lead2gold.tools.tool import Tool
from lead2gold.util import pwm2consensus
from lead2gold.util import sequence2pwm
from lead2gold.motif import Motif
class EMD(Tool):
"""Class implementing a EMD search tool motif convertor.
"""
toolName = "EMD"
def __init__(self):
"""Initialize all class attributes with their default values.
"""
super(self.__class__, self).__init__(self.toolName)
def parse(self, motif_file, type=None):
"""Loads the searcher parameters specified in the configuration file.
Args:
motif_file: file containing one or more EMD motifs.
Returns:
[Motif()]
"""
basename=ntpath.basename(motif_file.name)
def get_section(line, section, order):
if len(line) < 2:
return "stop", 1
if "Motif " in line[0:8]:
return "name", 2
return section, order
def get_template():
return {
"start": [],
"stop": [],
"name": []
}
motifs = []
section = "start"
order = 0
t_motif = get_template()
for line in motif_file:
clean_line = line.strip()
section, order_new = get_section(line, section, order)
if order_new < order:
motifs.append(self._parse_motif(t_motif))
t_motif = get_template()
order = order_new
t_motif[section].append(clean_line)
motifs.append(self._parse_motif(t_motif))
return list(filter(None, motifs))
def _parse_motif(self, t_motif):
name = t_motif["name"].pop(0)
sequences = []
for row in t_motif["name"]:
row_values = row.split()
if len(row_values) == 4:
sequences.append(row_values[0])
counters, _ = sequence2pwm(sequences)
motif = Motif(identifier=name, counters=counters)
consensus = pwm2consensus(motif.get_PPM())
motif.set_number_of_sites(len(sequences))
motif.set_alternate_name(consensus)
return motif
| 21.60241 | 71 | 0.693809 | 1,623 | 0.905187 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.197992 |
1a46c7e94441079b15b76f478077729aeb904fff | 1,915 | py | Python | plaso/formatters/manager.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | 1,253 | 2015-01-02T13:58:02.000Z | 2022-03-31T08:43:39.000Z | plaso/formatters/manager.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | 3,388 | 2015-01-02T11:17:58.000Z | 2022-03-30T10:21:45.000Z | plaso/formatters/manager.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | 376 | 2015-01-20T07:04:54.000Z | 2022-03-04T23:53:00.000Z | # -*- coding: utf-8 -*-
"""Manages custom event formatter helpers."""
class FormattersManager(object):
"""Custom event formatter helpers manager."""
_custom_formatter_helpers = {}
@classmethod
def GetEventFormatterHelper(cls, identifier):
"""Retrieves a custom event formatter helper.
Args:
identifier (str): identifier.
Returns:
CustomEventFormatterHelper: custom event formatter or None if not
available.
"""
identifier = identifier.lower()
return cls._custom_formatter_helpers.get(identifier)
@classmethod
def RegisterEventFormatterHelper(cls, formatter_helper_class):
"""Registers a custom event formatter helper.
The custom event formatter helpers are identified based on their lower
case identifier.
Args:
formatter_helper_class (type): class of the custom event formatter helper.
Raises:
KeyError: if a custom formatter helper is already set for the
corresponding identifier.
"""
identifier = formatter_helper_class.IDENTIFIER.lower()
if identifier in cls._custom_formatter_helpers:
raise KeyError((
'Custom event formatter helper already set for identifier: '
'{0:s}.').format(formatter_helper_class.IDENTIFIER))
cls._custom_formatter_helpers[identifier] = formatter_helper_class()
@classmethod
def RegisterEventFormatterHelpers(cls, formatter_helper_classes):
"""Registers custom event formatter helpers.
The formatter classes are identified based on their lower case data type.
Args:
formatter_helper_classes (list[type]): classes of the custom event
formatter helpers.
Raises:
KeyError: if a custom formatter helper is already set for the
corresponding data type.
"""
for formatter_helper_class in formatter_helper_classes:
cls.RegisterEventFormatterHelper(formatter_helper_class)
| 30.887097 | 80 | 0.723238 | 1,842 | 0.96188 | 0 | 0 | 1,716 | 0.896084 | 0 | 0 | 1,107 | 0.578068 |
1a47e16bbc1c9a27bb44a250b95b6dc46f70cbad | 2,814 | py | Python | scvae/analyses/metrics/summary.py | chgroenbech/deep-learning-for-single-cell-transcriptomics | d6148efabfb12eda8bd1b895e1bb72f592e39ab0 | [
"Apache-2.0"
] | 46 | 2019-06-05T14:17:12.000Z | 2022-02-02T22:15:52.000Z | scvae/analyses/metrics/summary.py | chgroenbech/deep-learning-for-single-cell-transcriptomics | d6148efabfb12eda8bd1b895e1bb72f592e39ab0 | [
"Apache-2.0"
] | 12 | 2019-07-17T05:24:15.000Z | 2021-08-17T23:02:06.000Z | scvae/analyses/metrics/summary.py | chgroenbech/deep-learning-for-single-cell-transcriptomics | d6148efabfb12eda8bd1b895e1bb72f592e39ab0 | [
"Apache-2.0"
] | 13 | 2017-03-03T02:56:20.000Z | 2019-04-17T18:13:42.000Z | # ======================================================================== #
#
# Copyright (c) 2017 - 2020 scVAE authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================== #
import numpy
from scvae.data.sparse import sparsity
from scvae.data.utilities import standard_deviation
MAXIMUM_NUMBER_OF_VALUES_FOR_NORMAL_STATISTICS_COMPUTATION = 5e8
def summary_statistics(x, name="", tolerance=1e-3, skip_sparsity=False):
batch_size = None
if x.size > MAXIMUM_NUMBER_OF_VALUES_FOR_NORMAL_STATISTICS_COMPUTATION:
batch_size = 1000
x_mean = x.mean()
x_std = standard_deviation(x, ddof=1, batch_size=batch_size)
x_min = x.min()
x_max = x.max()
x_dispersion = x_std**2 / x_mean
if skip_sparsity:
x_sparsity = numpy.nan
else:
x_sparsity = sparsity(x, tolerance=tolerance, batch_size=batch_size)
statistics = {
"name": name,
"mean": x_mean,
"standard deviation": x_std,
"minimum": x_min,
"maximum": x_max,
"dispersion": x_dispersion,
"sparsity": x_sparsity
}
return statistics
def format_summary_statistics(statistics_sets, name="Data set"):
if not isinstance(statistics_sets, list):
statistics_sets = [statistics_sets]
name_width = max(
[len(name)]
+ [len(statistics_set["name"]) for statistics_set in statistics_sets]
)
table_heading = " ".join([
"{:{}}".format(name, name_width),
" mean ", "std. dev. ", "dispersion",
" minimum ", " maximum ", "sparsity"
])
table_rows = [table_heading]
for statistics_set in statistics_sets:
table_row_parts = [
"{:{}}".format(statistics_set["name"], name_width),
"{:<9.5g}".format(statistics_set["mean"]),
"{:<9.5g}".format(statistics_set["standard deviation"]),
"{:<9.5g}".format(statistics_set["dispersion"]),
"{:<11.5g}".format(statistics_set["minimum"]),
"{:<11.5g}".format(statistics_set["maximum"]),
"{:<7.5g}".format(statistics_set["sparsity"]),
]
table_row = " ".join(table_row_parts)
table_rows.append(table_row)
table = "\n".join(table_rows)
return table
| 29.93617 | 77 | 0.613717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,042 | 0.370291 |
1a48ff01cabe164e72d8f1b64353d48a4be49d51 | 5,137 | py | Python | template.py | gtback/kuberwatcher | 697b99d47b764dc59f8a46a2077693d25561a106 | [
"Apache-2.0"
] | 23 | 2018-06-29T09:12:40.000Z | 2021-12-02T23:32:55.000Z | template.py | gtback/kuberwatcher | 697b99d47b764dc59f8a46a2077693d25561a106 | [
"Apache-2.0"
] | 8 | 2018-07-12T10:02:23.000Z | 2022-01-11T10:36:00.000Z | template.py | gtback/kuberwatcher | 697b99d47b764dc59f8a46a2077693d25561a106 | [
"Apache-2.0"
] | 6 | 2018-07-17T14:07:18.000Z | 2021-11-16T23:34:46.000Z | template_open = '{{#ctx.payload.aggregations.result.hits.hits.0._source}}'
template_close = template_open.replace('{{#','{{/')
kibana_url = (
"{{ctx.metadata.kibana_url}}/app/kibana#/discover?"
"_a=(columns:!(_source),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,"
"index:'metricbeat-*',key:query,negate:!f,type:custom,value:''),"
"query:(bool:(must:!((regexp:(kubernetes.pod.name:'{{ctx.metadata.regex}}')),"
"(match:(metricset.name:'state_pod')),"
"(match:(kubernetes.namespace:{{ctx.metadata.namespace}}))))))),"
"index:'metricbeat-*',"
"interval:auto,query:(language:lucene,query:''),"
"regexp:(language:lucene,query:'kubernetes.pod.name:test-nginx-%5B%5E-%5D%20-%5B%5E-%5D%20'),"
"sort:!('@timestamp',desc),time:(from:now%2FM,mode:quick,to:now%2FM))"
"&_g=(refreshInterval:(display:Off,pause:!f,value:0),"
"time:(from:now-15m,mode:quick,to:now))"
)
watch_url = "{{ctx.metadata.kibana_url}}/app/management/insightsAndAlerting/watcher/watches/watch/{{ctx.metadata.name}}/status"
slack_alert_template = "{template_open}*<{kibana_url}|{{{{ctx.metadata.name}}}}>* has `{{{{ctx.payload.aggregations.pods.value}}}}` not ready pod(s) <{watch_url}|[ack]>{{{{#ctx.metadata.docs}}}} <{{{{.}}}}|[docs]>{{{{/ctx.metadata.docs}}}}{template_close}".format(**locals())
email_alert_template = "{template_open}<a href=\"{kibana_url}\">{{{{ctx.metadata.name}}}}</a> has {{{{ctx.payload.aggregations.pods.value}}}} not ready pod(s) <a href=\"{watch_url}\">[ack]</a>{{{{#ctx.metadata.docs}}}} <a href=\"{{{{.}}}}\">[docs]</a>{{{{/ctx.metadata.docs}}}}{template_close}".format(**locals())
k8s_template = {
"metadata": {
"name": "",
"namespace": "",
"regex": "",
"kibana_url": "",
"kibana_dashboard": "",
"docs": "",
"xpack" : {
"type" : "json"
},
},
"trigger": {
"schedule": {
"interval": ""
}
},
"input": {
"search": {
"request": {
"search_type": "query_then_fetch",
"indices": [
"metricbeat-*"
],
"rest_total_hits_as_int": True,
"body": {
"aggs": {
"result": {
"top_hits": {
"size": 1
}
},
"pods": {
"cardinality": {
"field": "kubernetes.pod.name"
}
},
"not_ready": {
"terms": {
"field": "kubernetes.pod.name",
"min_doc_count": 12,
"size": 100
}
}
},
"query": {
"bool": {
"must_not": [],
"must": [],
"filter": [
{
"range": {
"@timestamp": {
"gte": "now-{{ctx.metadata.window}}"
}
}
}
]
}
}
}
}
}
},
"condition": {},
"actions": {
"email_admin": {
"throttle_period_in_millis": 300000,
"email": {
"profile": "standard",
"subject": "{{#ctx.payload.aggregations.result.hits.hits.0._source}}{{ctx.metadata.name}} has {{ctx.payload.aggregations.pods.value}} not ready pod(s){{/ctx.payload.aggregations.result.hits.hits.0._source}}",
"body": {
"html": email_alert_template
}
}
},
"notify-slack": {
"throttle_period_in_millis": 300000,
"slack": {
"message": {
"text": slack_alert_template
}
}
}
}
}
metricbeat_template = {
"metadata": {
"window": "300s",
"subject": "No metricbeat data has been recieved in the last 5 minutes!"
},
"trigger": {
"schedule": {
"interval": "60s"
}
},
"input": {
"search": {
"request": {
"search_type": "query_then_fetch",
"indices": [
"metricbeat-*"
],
"rest_total_hits_as_int": True,
"body": {
"query": {
"bool": {
"must": [
{
"match": {
"metricset.name": "state_pod"
}
}
],
"filter": [
{
"range": {
"@timestamp": {
"gte": "now-{{ctx.metadata.window}}"
}
}
}
]
}
}
}
}
}
},
"condition": {
"compare": {
"ctx.payload.hits.total": {
"eq": 0
}
}
},
"actions": {
"email_admin": {
"throttle_period_in_millis": 300000,
"email": {
"profile": "standard",
"subject": "{{ctx.metadata.subject}}",
"body": {
"text": "{{ctx.metadata.message}}"
}
}
},
"notify-slack": {
"throttle_period_in_millis": 300000,
"slack": {
"message": {
"text": "{{ctx.metadata.message}}"
}
}
}
}
}
| 28.538889 | 313 | 0.451041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,879 | 0.560444 |
1a4b282b49de9a38d9c3f3b091630a0e74fa7af6 | 2,504 | py | Python | preset.py | IkhwanFikri1997/Exam-Schedule-Generation | 33b8a7faf714cd05552faa5b8ed2a3717eafbb72 | [
"MIT"
] | null | null | null | preset.py | IkhwanFikri1997/Exam-Schedule-Generation | 33b8a7faf714cd05552faa5b8ed2a3717eafbb72 | [
"MIT"
] | null | null | null | preset.py | IkhwanFikri1997/Exam-Schedule-Generation | 33b8a7faf714cd05552faa5b8ed2a3717eafbb72 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
ZetCode wxPython tutorial
In this example, we create a wx.ListBox widget.
author: Jan Bodnar
website: www.zetcode.com
last modified: July 2020
"""
import wx
class Example(wx.Frame):
def __init__(self, *args, **kw):
super(Example, self).__init__(*args, **kw)
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.listbox = wx.ListBox(panel)
hbox.Add(self.listbox, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
btnPanel = wx.Panel(panel)
vbox = wx.BoxSizer(wx.VERTICAL)
newBtn = wx.Button(btnPanel, wx.ID_ANY, 'New', size=(90, 30))
renBtn = wx.Button(btnPanel, wx.ID_ANY, 'Rename', size=(90, 30))
delBtn = wx.Button(btnPanel, wx.ID_ANY, 'Delete', size=(90, 30))
clrBtn = wx.Button(btnPanel, wx.ID_ANY, 'Clear', size=(90, 30))
self.Bind(wx.EVT_BUTTON, self.NewItem, id=newBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnRename, id=renBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnDelete, id=delBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnClear, id=clrBtn.GetId())
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnRename)
vbox.Add((-1, 20))
vbox.Add(newBtn)
vbox.Add(renBtn, 0, wx.TOP, 5)
vbox.Add(delBtn, 0, wx.TOP, 5)
vbox.Add(clrBtn, 0, wx.TOP, 5)
btnPanel.SetSizer(vbox)
hbox.Add(btnPanel, 0.6, wx.EXPAND | wx.RIGHT, 20)
panel.SetSizer(hbox)
self.SetTitle('wx.ListBox')
self.Centre()
def NewItem(self, event):
text = wx.GetTextFromUser('Enter a new item', 'Insert dialog')
if text != '':
self.listbox.Append(text)
def OnRename(self, event):
sel = self.listbox.GetSelection()
text = self.listbox.GetString(sel)
renamed = wx.GetTextFromUser('Rename item', 'Rename dialog', text)
if renamed != '':
self.listbox.Delete(sel)
item_id = self.listbox.Insert(renamed, sel)
self.listbox.SetSelection(item_id)
def OnDelete(self, event):
sel = self.listbox.GetSelection()
if sel != -1:
self.listbox.Delete(sel)
def OnClear(self, event):
self.listbox.Clear()
def main():
app = wx.App()
ex = Example(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main() | 27.217391 | 75 | 0.57508 | 2,160 | 0.86262 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.11861 |
1a4b926977f46de6f98472226aaa5d3b9b4737c2 | 11,210 | py | Python | CrySPY/interface/QE/collect_qe.py | sgbaird/CrySPY | fc24b312c999aebf6a8a2a56c8d9f17357a277a9 | [
"MIT"
] | 57 | 2018-01-13T13:01:48.000Z | 2022-03-26T14:25:38.000Z | CrySPY/interface/QE/collect_qe.py | sgbaird/CrySPY | fc24b312c999aebf6a8a2a56c8d9f17357a277a9 | [
"MIT"
] | 9 | 2018-10-27T09:07:06.000Z | 2022-03-26T15:12:19.000Z | CrySPY/interface/QE/collect_qe.py | sgbaird/CrySPY | fc24b312c999aebf6a8a2a56c8d9f17357a277a9 | [
"MIT"
] | 30 | 2017-08-18T07:31:31.000Z | 2022-03-26T14:37:27.000Z | '''
Collect results in Quantum ESPRESSO
'''
import sys
import numpy as np
from pymatgen.core import Structure
from . import structure as qe_structure
from ... import utility
from ...IO import pkl_data
from ...IO import read_input as rin
def collect_qe(current_id, work_path):
# ---------- check optimization in previous stage
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
check_opt = 'not_yet'
for line in lines:
if 'End final coordinates' in line:
check_opt = 'done'
except Exception as e:
print(e)
check_opt = 'no_file'
# ---------- obtain energy and magmom
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
energy = np.nan
for line in reversed(lines):
if line.startswith('!'):
energy = float(line.split()[-2]) # in Ry
energy = energy * utility.ry2ev / float(rin.natot) # Ry/cell --> eV/atom
break
magmom = np.nan # implemented by H. Sawahata 2020/10/04
for line in reversed(lines):
if line.find("total magnetization") >= 0:
muB = line.split()
magmom = float(muB[3])
break
except Exception as e:
energy = np.nan # error
magmom = np.nan # error
print(e)
print(' Structure ID {0}, could not obtain energy from {1}'.format(
current_id, rin.qe_outfile))
# ---------- collect the last structure
try:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_outfile)
if lines_cell is None:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_infile)
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_outfile)
if lines_atom is None:
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_infile)
opt_struc = qe_structure.from_lines(lines_cell, lines_atom)
# ------ opt_qe-structure
with open('./data/opt_qe-structure', 'a') as fstruc:
fstruc.write('# ID {0:d}\n'.format(current_id))
qe_structure.write(opt_struc, './data/opt_qe-structure', mode='a')
except Exception as e:
print(e)
opt_struc = None
# ---------- check
if np.isnan(energy):
opt_struc = None
if opt_struc is None:
energy = np.nan
magmom = np.nan
# ---------- return
return opt_struc, energy, magmom, check_opt
def get_energy_step_qe(energy_step_data, current_id, work_path):
'''
get energy step data in eV/atom
energy_step_data[ID][stage][step]
energy_step_data[ID][0] <-- stage 1
energy_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get energy step
energy_step = []
final_flag = False # End final coordinates
vc_flag = False # vc-relax
for line in lines:
if line.startswith('!'):
energy_step.append(line.split()[4])
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
energy_step.pop(-1)
# ------ list --> array, Ry/cell --> eV/atom
if not energy_step:
energy_step = None # if empty
print('#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
else:
energy_step = utility.ry2ev / rin.natot * np.array(energy_step,
dtype='float')
except Exception as e:
energy_step = None
print(e, '#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
# ---------- append energy_step
if energy_step_data.get(current_id) is None:
energy_step_data[current_id] = [] # initialize
energy_step_data[current_id].append(energy_step)
# ---------- save energy_step_data
pkl_data.save_energy_step(energy_step_data)
# ---------- return
return energy_step_data
def get_struc_step_qe(struc_step_data, current_id, work_path):
'''
get structure step data
# ---------- args
struc_step_data: (dict) the key is structure ID
struc_step_data[ID][stage][step]
struc_step_data[ID][0] <-- stage 1
struc_step_data[ID][1] <-- stage 2
'''
try:
struc_step = []
# ------ init struc from pwscf.in
_extract_struc_qe(work_path+rin.qe_infile, struc_step)
# ------ struc step from pwscf.out
_extract_struc_qe(work_path+rin.qe_outfile, struc_step)
# ------ delete last structure due to duplication
struc_step.pop(-1)
except Exception as e:
struc_step = None
print(e ,'#### ID: {0}: failed to parse in struc_step\n'.format(
current_id), file=sys.stderr)
# ---------- append struc_step_data
if struc_step_data.get(current_id) is None:
struc_step_data[current_id] = [] # initialize
struc_step_data[current_id].append(struc_step)
# ---------- save struc_step_data
pkl_data.save_struc_step(struc_step_data)
# ---------- return
return struc_step_data
def _extract_struc_qe(filename, struc_step):
# ---------- read a file
with open(filename, 'r') as f:
lines = f.readlines()
# ---------- extract struc
read_cell = False
read_coords = False
vc_flag = False # in case of vc-relax
for line in lines:
# ------ cell part
if read_cell:
lattice.append(line.split())
if len(lattice) == 3:
read_cell = False
lattice = np.array(lattice, dtype='float')
if 'CELL_PARAMETERS' in line:
read_cell = True
vc_flag = True
lattice = []
# ------ coords part
if read_coords:
lsplit = line.split()
species.append(lsplit[0])
coords.append(lsplit[1:])
if len(coords) == rin.natot:
read_coords = False
coords = np.array(coords, dtype='float')
# ---- gen struc
if not vc_flag: # empty lattice, use init lattice
lattice = struc_step[0].lattice
struc = Structure(lattice, species, coords)
struc_step.append(struc)
if 'ATOMIC_POSITIONS' in line:
read_coords = True
species = []
coords = []
def get_force_step_qe(force_step_data, current_id, work_path):
'''
get force step data in eV/angstrom
# ---------- args
force_step_data: (dict) the key is structure ID
force_step_data[ID][stage][step]
force_step_data[ID][0] <-- stage 1
force_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get force step
force_step = []
read_force = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if 'atom 1 type 1 force' in line:
read_force = True
force = []
if read_force:
force.append(line.split()[6:])
if len(force) == rin.natot:
read_force = False
force_step.append(utility.ry2ev / utility.bohr2ang * np.array(
force, dtype='float'))
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
force_step.pop(-1)
# ------ if empty
if len(force_step) == 0:
force_step = None
print('#### ID: {0}: failed to parse force_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
force_step = None
print(e, '#### ID: {0}: failed to parse in force_step\n'.format(
current_id), file=sys.stderr)
# ---------- append force_step
if force_step_data.get(current_id) is None:
force_step_data[current_id] = [] # initialize
force_step_data[current_id].append(force_step)
# ---------- save force_step_data
pkl_data.save_force_step(force_step_data)
# ---------- return
return force_step_data
def get_stress_step_qe(stress_step_data, current_id, work_path):
'''
get stress step data in eV/ang**3
# ---------- args
stress_step_data: (dict) the key is structure ID
stress_step_data[ID][stage][step]
stress_step_data[ID][0] <-- stage 1
stress_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get stress step
stress_step = []
read_stress = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if read_stress:
stress.append(line.split()[3:])
if len(stress) == 3:
read_stress = False
stress_step.append(utility.kbar2ev_ang3 * np.array(
stress, dtype='float'))
if 'total stress (Ry/bohr**3)' in line:
read_stress = True
stress = []
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
stress_step.pop(-1)
# ------ if empty
if len(stress_step) == 0:
stress_step = None
print('#### ID: {0}: failed to parse stress_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
stress_step = None
print(e, '#### ID: {0}: failed to parse in stress_step\n'.format(
current_id), file=sys.stderr)
# ---------- append stress_step
if stress_step_data.get(current_id) is None:
stress_step_data[current_id] = [] # initialize
stress_step_data[current_id].append(stress_step)
# ---------- save stress_step_data
pkl_data.save_stress_step(stress_step_data)
# ---------- return
return stress_step_data
| 34.072948 | 91 | 0.550937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,280 | 0.292596 |
1a4c10e57be9c1ea98e7ba154fb2b42115d10876 | 778 | py | Python | jobs/scripts/index_jobs.py | soheltarir/django-es-test | 2cdce24fb0288f16f2526b38d139359dbe8472f5 | [
"MIT"
] | 5 | 2019-12-01T13:06:55.000Z | 2020-01-22T04:21:54.000Z | jobs/scripts/index_jobs.py | soheltarir/django-es-test | 2cdce24fb0288f16f2526b38d139359dbe8472f5 | [
"MIT"
] | 2 | 2020-06-06T00:15:11.000Z | 2022-02-10T11:24:31.000Z | jobs/scripts/index_jobs.py | soheltarir/django-elastic-postgres | 2cdce24fb0288f16f2526b38d139359dbe8472f5 | [
"MIT"
] | null | null | null | import json
from django.db import connection
from elasticsearch import Elasticsearch
from jobs.models import Job
es_client = Elasticsearch('http://localhost:9200')
def run():
# Create Index
es_client.indices.create(index='jobs')
# Put Mapping
with open("jobs/job.json", "r") as fp:
es_client.indices.put_mapping(index='jobs', doc_type='job', body=json.load(fp))
# Start Indexing
job_ids = Job.objects.values_list('id', flat=True)
db_cursor = connection.cursor()
for job_id in job_ids:
query = "SELECT get_job_data({});".format(job_id)
db_cursor.execute(query)
result = db_cursor.fetchone()
es_client.index(index='jobs', doc_type='job', body=result[0])
print("Indexed job {}".format(job_id))
| 27.785714 | 87 | 0.673522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.203085 |
1a4cc0f2953e4278fff2eee0c3cc81e9ff8d35c1 | 2,543 | py | Python | dealconvert/formats/bri.py | michzimny/deal-convert | 76179bc58e50cd2e4d61ad782b4e65ae27f3173b | [
"BSD-2-Clause"
] | null | null | null | dealconvert/formats/bri.py | michzimny/deal-convert | 76179bc58e50cd2e4d61ad782b4e65ae27f3173b | [
"BSD-2-Clause"
] | null | null | null | dealconvert/formats/bri.py | michzimny/deal-convert | 76179bc58e50cd2e4d61ad782b4e65ae27f3173b | [
"BSD-2-Clause"
] | null | null | null | import warnings
from . import DealFormat
from .. import dto
class BRIFormat(DealFormat):
number_warning = '.bri file format assumes consequent deal numbers from 1'
@property
def suffix(self):
return '.bri'
def parse_content(self, content):
warnings.warn(self.number_warning)
dealset = []
number = 1
while True:
deal_str = content.read(128).strip()
if len(deal_str) > 0:
if len(deal_str) < 78:
warning.warn('truncated .bri input: %s' % (deal_str))
break
else:
deal_obj = dto.Deal()
deal_obj.number = number
deal_obj.dealer = deal_obj.get_dealer(number)
deal_obj.vulnerable = deal_obj.get_vulnerability(number)
deal_obj.hands = self.parse_hands(deal_str)
dealset.append(deal_obj)
number += 1
else:
break
return dealset
def parse_hands(self, deal_str):
deal_obj = dto.Deal()
try:
deal = [int(deal_str[i*2:(i+1)*2], 10) for i in range(0, 39)]
if max(deal) > 52:
raise RuntimeError(
'invalid card in .bri file: %d' % (max(deal)))
for hand in range(0, 3):
for card in deal[13*hand:13*(hand+1)]:
card = card - 1
suit = card / 13
card = card % 13
deal_obj.hands[hand][suit].append(self.cards[card])
deal_obj.fill_west()
except ValueError:
raise RuntimeError('invalid card in .bri file: %s' % (deal_str))
return deal_obj.hands
def output_content(self, out_file, dealset):
warnings.warn(self.number_warning)
for deal in dealset:
deal_str = self.single_deal_output(deal)
deal_str += ' ' * 32
deal_str += chr(0) * 18
out_file.write(deal_str)
def single_deal_output(self, deal):
deal_str = ''
for hand in deal.hands[0:3]:
for i, suit in enumerate(hand):
for card in suit:
try:
deal_str += '%02d' % (self.cards.index(card) + 13*i + 1)
except ValueError:
raise RuntimeError(
'invalid card character: %s in board %d' % (card, deal.number))
return deal_str
| 35.319444 | 91 | 0.50177 | 2,480 | 0.975226 | 0 | 0 | 53 | 0.020842 | 0 | 0 | 202 | 0.079434 |
1a4dc0ed40e0f9a9a400f39481d67c44613f9b9c | 1,766 | py | Python | src/test_sudoku_solver.py | tillschallau/sudoku-solver | c1de723b5c61776b9194abd9b93faf56c7ed9039 | [
"MIT"
] | null | null | null | src/test_sudoku_solver.py | tillschallau/sudoku-solver | c1de723b5c61776b9194abd9b93faf56c7ed9039 | [
"MIT"
] | null | null | null | src/test_sudoku_solver.py | tillschallau/sudoku-solver | c1de723b5c61776b9194abd9b93faf56c7ed9039 | [
"MIT"
] | null | null | null | import src.sudoku_solver as sudoku_solver
from src.sudoku import Sudoku
correct_sudoku = Sudoku([[9, 5, 7, 6, 1, 3, 2, 8, 4], [4, 8, 3, 2, 5, 7, 1, 9, 6], [6, 1, 2, 8, 4, 9, 5, 3, 7],
[1, 7, 8, 3, 6, 4, 9, 5, 2], [5, 2, 4, 9, 7, 1, 3, 6, 8], [3, 6, 9, 5, 2, 8, 7, 4, 1],
[8, 4, 5, 7, 9, 2, 6, 1, 3], [2, 9, 1, 4, 3, 6, 8, 7, 5], [7, 3, 6, 1, 8, 5, 4, 2, 9]])
starting_sudoku = Sudoku([[0, 0, 0, 0, 0, 0, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0], [6, 0, 2, 0, 0, 0, 5, 0, 0],
[0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0], [0, 0, 6, 0, 0, 0, 0, 0, 0]])
starting_sudoku_current_cell_test = Sudoku([[1, 3, 4, 5, 6, 7, 2, 0, 0], [0, 8, 0, 0, 0, 7, 0, 9, 0],
[6, 0, 2, 0, 0, 0, 5, 0, 0],
[0, 7, 0, 0, 6, 0, 0, 0, 0], [0, 0, 0, 9, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0],
[0, 0, 6, 0, 0, 0, 0, 0, 0]])
starting_sudoku_current_cell_test2 = Sudoku([[1, 1, 1, 1, 1, 1, 2, 1, 1], [1, 8, 1, 1, 1, 7, 1, 9, 1],
[6, 0, 2, 0, 0, 0, 5, 0, 0], [0, 7, 0, 0, 6, 0, 0, 0, 0],
[0, 0, 0, 9, 0, 1, 0, 0, 0], [0, 0, 0, 0, 2, 0, 0, 4, 0],
[0, 0, 5, 0, 0, 0, 6, 0, 3], [0, 9, 0, 4, 0, 0, 0, 7, 0],
[0, 0, 6, 0, 0, 0, 0, 0, 0]])
| 73.583333 | 113 | 0.286523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1a4e4f913b585a4e44da7276a8b5f88fb26ad331 | 599 | py | Python | maquinaria/alquileres/serializers/alquileres.py | CFredy9/Maquinaria | 7b55d2e882702b62cdbb11f9684ab54d9b485199 | [
"MIT"
] | null | null | null | maquinaria/alquileres/serializers/alquileres.py | CFredy9/Maquinaria | 7b55d2e882702b62cdbb11f9684ab54d9b485199 | [
"MIT"
] | null | null | null | maquinaria/alquileres/serializers/alquileres.py | CFredy9/Maquinaria | 7b55d2e882702b62cdbb11f9684ab54d9b485199 | [
"MIT"
] | null | null | null | """Serializers Alquileres"""
#Django REST Framework
from rest_framework import serializers
#Model
from maquinaria.alquileres.models import Alquiler
from maquinaria.maquinas.models import Maquina
class AlquilerModelSerializer(serializers.ModelSerializer):
"""Modelo Serializer de Cliente"""
class Meta:
"""Clase Meta"""
model = Alquiler
fields = (
'id', 'cliente',
'maquina', 'fecha_inicio',
'fecha_final', 'precio_alquiler'
)
class Update(serializers.Serializer):
def save(self):
maquina=Maquina.objects.get(id=1)
maquina.estado=False
maquina.save()
| 17.617647 | 59 | 0.724541 | 384 | 0.641068 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.287145 |
1a4f33963cc653151cea3eb94ee867a8bc500078 | 660 | py | Python | test_RTC_DS1307.py | LeMaker/LeScratch | 0dde167925afe40cf63cf8ccba13321761494c25 | [
"Apache-2.0"
] | 4 | 2015-06-23T17:57:47.000Z | 2016-02-15T12:52:46.000Z | test_RTC_DS1307.py | LeMaker/LeScratch | 0dde167925afe40cf63cf8ccba13321761494c25 | [
"Apache-2.0"
] | 1 | 2021-08-18T03:17:45.000Z | 2021-08-18T03:17:45.000Z | test_RTC_DS1307.py | LeMaker/LeScratch | 0dde167925afe40cf63cf8ccba13321761494c25 | [
"Apache-2.0"
] | 4 | 2015-07-13T14:43:24.000Z | 2015-12-25T09:14:50.000Z | #!/usr/bin/env python
#
# Test RTC_DS1307
import sys
import time
import datetime
import RTC_DS1307
# Main Program
print "Program Started at:"+ time.strftime("%Y-%m-%d %H:%M:%S")
filename = time.strftime("%Y-%m-%d%H:%M:%SRTCTest") + ".txt"
starttime = datetime.datetime.utcnow()
ds1307 = RTC_DS1307.RTC_DS1307(2, 0x68)
ds1307.write_now()
# Main Loop - sleeps 10 minutes, then reads and prints values of all clocks
while True:
currenttime = datetime.datetime.utcnow()
deltatime = currenttime - starttime
print ""
print "LeMaker Guitar=\t" + time.strftime("%Y-%m-%d %H:%M:%S")
print "DS1307=\t\t%s" % ds1307.read_datetime()
time.sleep(10.0)
| 18.333333 | 75 | 0.692424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.384848 |
1a4fe40aa6eef969719ab20b34d1e9156144719c | 4,536 | py | Python | VS State and Virtual IP Info/avi_virtual_service_info.py | jagmeetsingh91/AviSDK-Scripts | 371c9dadc561efe5087e57beac8b24191d48834d | [
"Apache-2.0"
] | null | null | null | VS State and Virtual IP Info/avi_virtual_service_info.py | jagmeetsingh91/AviSDK-Scripts | 371c9dadc561efe5087e57beac8b24191d48834d | [
"Apache-2.0"
] | null | null | null | VS State and Virtual IP Info/avi_virtual_service_info.py | jagmeetsingh91/AviSDK-Scripts | 371c9dadc561efe5087e57beac8b24191d48834d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Created on Nov 14, 2017
# @author: aziz@avinetworks.com, jagmeet@avinetworks.com
#
# AVISDK based Script to get the status and configuration information of the Virtual Services
#
# Requires AVISDK ("pip install avisdk") and PrettyTable ("pip install PrettyTable")
# Usage:- python avi_virtual_service_info.py -c <Controller-IP> -u <user-name> -p <password>
# Note:- This script works for Avi Controler version 17.1.1 onwards
import json
import argparse
from avi.sdk.avi_api import ApiSession
from requests.packages import urllib3
from prettytable import PrettyTable
from prettytable import ALL as ALL
urllib3.disable_warnings()
def get_vs_list(api, api_version):
vs_list = []
rsp = api.get('virtualservice', api_version=api_version)
for vs in rsp.json()['results']:
vs_list.append(vs['uuid'])
return vs_list
def get_vs_oper_info(api, api_version, vs_list):
oper_dict = {}
for vs in vs_list:
rsp = api.get('virtualservice-inventory/%s' % vs, api_version=api_version)
vs_data = rsp.json()
req_vs_data = { "state": vs_data['runtime']['oper_status']['state'], "name": vs_data['config']['name'],
"uuid": vs_data['config']['uuid'] }
i = 1
for vips in vs_data['config']['vip']:
req_vs_data["vip_"+str(i)] = vips
i = i+1
j = 1
for dns in vs_data['config']['dns_info']:
req_vs_data["dns_"+str(j)] = dns
j = j+1
if vs_data['runtime']['oper_status']['state'] in oper_dict.keys():
oper_dict[vs_data['runtime']['oper_status']['state']].append(req_vs_data)
else:
oper_dict[vs_data['runtime']['oper_status']['state']] = []
oper_dict[vs_data['runtime']['oper_status']['state']].append(req_vs_data)
return oper_dict
def main():
#Getting Required Args
parser = argparse.ArgumentParser(description="AVISDK based Script to get the status and configuration"+
" information of the Virtual Services")
parser.add_argument("-u", "--username", required=True, help="Login username")
parser.add_argument("-p", "--password", required=True, help="Login password")
parser.add_argument("-c", "--controller", required=True, help="Controller IP address")
parser.add_argument("-t", "--tenant", required=False, help="Tenant Name")
parser.add_argument("-a", "--api_version", required=False, help="Tenant Name")
args = parser.parse_args()
user = args.username
host = args.controller
password = args.password
if args.tenant:
tenant=args.tenant
else:
tenant="*"
if args.api_version:
api_version=args.api_version
else:
api_version="17.1.1"
#Getting API session for the intended Controller.
api = ApiSession.get_session(host, user, password, tenant=tenant, api_version=api_version)
#Getting the list of VirtualService(s).
vs_list = get_vs_list(api, api_version)
#Getting VS information
oper_dict = get_vs_oper_info(api, api_version, vs_list)
#print "Final Oper Dict:" + str(oper_dict)
for state, vs in oper_dict.iteritems():
print("VS in State:%s [%s]" % (state, len(vs)))
table = PrettyTable(hrules=ALL)
table.field_names = ["VS Name","VIP_ID", "VIP_Address", "DNS_INFO"]
for vss in vs:
vips = list()
dns_info = list()
vip_count = 0
dns_count = 0
if 'vip_1' in vss.keys():
vips = [value for key, value in vss.iteritems() if 'vip' in key.lower()]
vip_count = len(vips)
if 'dns_1' in vss.keys():
dns_info = [value for key, value in vss.iteritems() if 'dns' in key.lower()]
dns_count = len(dns_info)
vs_name = vss['name']
vip_ids = ''
vips_list = ''
dns_list = ''
for vip in vips:
vip_ids += vip['vip_id'] + "\n"
vips_list += vip['ip_address']['addr']
if vip.get('floating_ip', None):
vips_list += '- ' + vip['floating_ip']['addr']
vips_list+='\n'
for dns in dns_info:
dns_list += dns['fqdn'] + "\n"
table.add_row([vs_name, vip_ids[:-1], vips_list[:-1], dns_list[:-1]])
print table
print "\n"
if __name__ == "__main__":
main()
| 37.8 | 111 | 0.592813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,372 | 0.302469 |
1a5078f614596e83a998507f278a0b9bd0a27b7f | 1,035 | py | Python | tomograph/transform.py | fkokosinski/tomograph | 4e988e37441efb94d7010e3f1e95aa8519a5a686 | [
"MIT"
] | 4 | 2019-06-22T22:33:52.000Z | 2021-04-21T09:17:26.000Z | tomograph/transform.py | fkokosinski/tomograph | 4e988e37441efb94d7010e3f1e95aa8519a5a686 | [
"MIT"
] | null | null | null | tomograph/transform.py | fkokosinski/tomograph | 4e988e37441efb94d7010e3f1e95aa8519a5a686 | [
"MIT"
] | null | null | null | import numpy as np
def projective(coords):
""" Convert 2D cartesian coordinates to homogeneus/projective. """
num = np.shape(coords)[0]
w = np.array([[1], ]*num)
return np.append(coords, w, axis=1)
def cartesian(coords):
""" Convert 2D homogeneus/projective coordinates to cartesian. """
return coords[:, :2]
def translate(x, y):
""" Return translation matrix. """
return np.array([
[1, 0, x],
[0, 1, y],
[0, 0, 1],
])
def rotate(a):
""" Return rotation matrix. """
return np.array([
[np.cos(a), -np.sin(a), 0],
[np.sin(a), np.cos(a), 0],
[0, 0, 1]
])
def transform_list(coords, matrix):
""" Apply transformation to a list of coordinates. """
return matrix.dot(coords.T).T
def transform_apply(coords, transforms):
""" Apply list of transformations to a list of coordinates. """
out = projective(coords)
for transform in transforms:
out = transform_list(out, transform)
return cartesian(out)
| 21.5625 | 70 | 0.593237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.304348 |
1a50ac06cbf925305c38c7dc470aa1b2a01208f1 | 316 | py | Python | tvl_backends/tvl-backends-nvdec/tests/test_nvdec.py | ashwhall/tvl | 78fa8d2908d8eac8a032273d3142ab530cee1a33 | [
"Apache-2.0"
] | 21 | 2019-02-28T02:58:21.000Z | 2021-06-02T03:36:34.000Z | tvl_backends/tvl-backends-nvdec/tests/test_nvdec.py | ashwhall/tvl | 78fa8d2908d8eac8a032273d3142ab530cee1a33 | [
"Apache-2.0"
] | 25 | 2019-02-22T11:39:34.000Z | 2021-06-02T00:12:26.000Z | tvl_backends/tvl-backends-nvdec/tests/test_nvdec.py | ashwhall/tvl | 78fa8d2908d8eac8a032273d3142ab530cee1a33 | [
"Apache-2.0"
] | 6 | 2019-05-31T02:06:14.000Z | 2021-07-14T06:27:17.000Z | import torch
from tvl_backends.nvdec import nv12_to_rgb
def test_nv12_to_rgb():
w = 3840
h = 2160
nv12 = torch.empty(int(w * h * 1.5), device='cuda:0', dtype=torch.uint8)
for i in range(100):
nv12.random_(0, 256)
rgb = nv12_to_rgb(nv12, h, w)
assert rgb.shape == (3, h, w)
| 22.571429 | 76 | 0.60443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.025316 |
1a50bdf63d5b3a9d4ad0df4d1fa835d50376c8f8 | 3,289 | py | Python | agent/dxagent.py | Advanced-Observability/dxagent | 552c50f465e3389a2784d9a459308693ce29315a | [
"MIT"
] | 3 | 2021-11-13T16:41:00.000Z | 2022-02-01T12:04:39.000Z | agent/dxagent.py | Advanced-Observability/dxagent | 552c50f465e3389a2784d9a459308693ce29315a | [
"MIT"
] | null | null | null | agent/dxagent.py | Advanced-Observability/dxagent | 552c50f465e3389a2784d9a459308693ce29315a | [
"MIT"
] | 1 | 2021-06-19T13:45:16.000Z | 2021-06-19T13:45:16.000Z | """
dxagent.py
This file contains the core of dxagent
@author: K.Edeline
"""
import sched
import time
import signal
import importlib
from .constants import AGENT_INPUT_PERIOD
from .core.ios import IOManager
from .core.daemon import Daemon
from .input.sysinfo import SysInfo
from .input.bm_input import BMWatcher
from .input.vm_input import VMWatcher
from .input.vpp_input import VPPWatcher
from .assurance.health import HealthEngine
from .gnmi.exporter import DXAgentExporter
class DXAgent(Daemon, IOManager):
"""
DXAgent
"""
def __init__(self, parse_args=True):
Daemon.__init__(self, pidfile='/var/run/dxagent.pid',
stdout='/var/log/dxagent.log',
stderr='/var/log/dxagent.log',
name='dxagent',
input_rate=AGENT_INPUT_PERIOD)
IOManager.__init__(self, child=self, parse_args=parse_args)
self.load_ios()
if not parse_args:
return
def _init(self):
self.sysinfo = SysInfo()
self.scheduler = sched.scheduler()
# ringbuffers are stored here
self._data = {}
# SharedMemory with dxtop.
# Drop privileges to avoid dxtop root requirements
if not self.args.disable_shm:
mod = importlib.import_module("agent.core.shareablebuffer")
with self.drop():
self.sbuffer = getattr(mod, "ShareableBuffer")(create=True)
# watchers.
self.bm_watcher = BMWatcher(self._data, self.info, self)
self.vm_watcher = VMWatcher(self._data, self.info, self)
self.vpp_watcher = VPPWatcher(self._data, self.info, self)
# health engine
self.engine = HealthEngine(self._data, self.info, self)
# exporter
if self.gnmi_target:
self.exporter = DXAgentExporter(self._data, self.info, self,
target_url=self.gnmi_target)
self.exporter.run()
# catch signal for cleanup
signal.signal(signal.SIGTERM, self.exit)
def _input(self):
self.bm_watcher.input()
self.vm_watcher.input()
self.vpp_watcher.input()
def process(self):
"""
read input data, process and write it to shmem.
re-schedule itself.
"""
# fetch input
self._input()
# compute metrics&symptoms from input
self.engine.update_health()
# write to shmem
if not self.args.disable_shm:
skip=["stats"] if not self.args.verbose else []
self.sbuffer.write(self._data, skip=skip, info=self.info)
#self.info(list(self.exporter._iterate_data()))
self.scheduler.enter(AGENT_INPUT_PERIOD,0,self.process)
def exit(self, signum=None, stackframe=None):
"""
cleanup before exiting
"""
self.running = False
time.sleep(AGENT_INPUT_PERIOD)
self.bm_watcher.exit()
self.vm_watcher.exit()
self.vpp_watcher.exit()
if not self.args.disable_shm:
self.sbuffer.unlink()
del self.sbuffer
def run(self):
"""
main function
"""
self._init()
self.running = True
self.info(self.sysinfo)
self.process()
while self.running:
self.scheduler.run(blocking=False)
time.sleep(AGENT_INPUT_PERIOD)
| 25.897638 | 73 | 0.631803 | 2,802 | 0.851931 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.208574 |
1a5399afafee0730303e2ab23186b2faba652525 | 2,410 | py | Python | app/models.py | SFC-foundations/SFC-website | 59f001bac558c0fd9b3eb17e41cc054b4de59e04 | [
"MIT"
] | null | null | null | app/models.py | SFC-foundations/SFC-website | 59f001bac558c0fd9b3eb17e41cc054b4de59e04 | [
"MIT"
] | null | null | null | app/models.py | SFC-foundations/SFC-website | 59f001bac558c0fd9b3eb17e41cc054b4de59e04 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
import os
#BLOGS
class BlogPost(models.Model):
author=models.CharField(max_length=200)
role=models.CharField(max_length=200)
image=models.ImageField(upload_to='blogMedia/')
title=models.CharField(max_length=200)
displayText=models.TextField()
body=models.TextField()
date=models.DateTimeField(default=timezone.now)
def delete(self):
self.image.storage.delete(str(self.image))
super(BlogPost, self).delete()
def __str__(self):
return self.title+' | '+self.author
class BlogPostComment(models.Model):
post = models.ForeignKey(BlogPost, related_name='comments',on_delete=models.CASCADE)
author = models.CharField(max_length=200)
comment = models.TextField()
date = models.DateTimeField(auto_now_add=True)
approved =models.BooleanField(default=False)
def __str__(self):
return str(self.post.title)+' | '+str(self.author)+': '+str(self.comment)
#News
class New(models.Model):
image=models.ImageField(upload_to='newsMedia')
title = models.CharField(max_length=200)
subtitle = models.CharField(max_length=200)
source = models.CharField(max_length=500)
date=models.DateTimeField(auto_now_add=True)
body=models.TextField()
def delete(self):
self.image.storage.delete(str(self.image))
super(New, self).delete()
def __str__(self):
return str(self.title)+' | '+str(self.date)
#EVENT
class Event(models.Model):
name = models.CharField(max_length=200)
date = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=200)
bannerimage=models.ImageField(upload_to='eventMedia')
image1=models.ImageField(upload_to='eventMedia', blank=True)
image2=models.ImageField(upload_to='eventMedia', blank=True)
image3 = models.ImageField(upload_to='eventMedia', blank=True)
image4 = models.ImageField(upload_to='eventMedia', blank=True)
body = models.TextField()
def delete(self):
self.bannerimage.storage.delete(str(self.bannerimage))
self.image1.storage.delete(str(self.image1))
self.image2.storage.delete(str(self.image2))
self.image3.storage.delete(str(self.image3))
self.image4.storage.delete(str(self.image4))
super(Event, self).delete()
def __str__(self):
return str(self.name) | 31.298701 | 88 | 0.705394 | 2,302 | 0.955187 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.053942 |
1a540e8d050b4c621923140a81a1cbce0129a5cb | 1,794 | py | Python | script/generate_default_repositories.py | peyanski/documentation | 548f91b4391c9ebc7a12a285ea667f64e961760f | [
"MIT"
] | null | null | null | script/generate_default_repositories.py | peyanski/documentation | 548f91b4391c9ebc7a12a285ea667f64e961760f | [
"MIT"
] | null | null | null | script/generate_default_repositories.py | peyanski/documentation | 548f91b4391c9ebc7a12a285ea667f64e961760f | [
"MIT"
] | 1 | 2021-08-15T02:57:02.000Z | 2021-08-15T02:57:02.000Z | import requests
import json
import os
from github import Github
BASE = """---
id: default_repositories
title: Default repositories
description: "Default repositories in HACS"
---
<!-- The content of this file is autogenerated during build with script/generate_default_repositories.py -->
"""
github = Github(os.environ['TOKEN'])
integration_org = github.get_organization("custom-components")
plugin_org = github.get_organization("custom-cards")
theme_org = github.get_organization("home-assistant-community-themes")
blacklist = requests.get('https://raw.githubusercontent.com/hacs/default/master/blacklist')
blacklist = json.loads(blacklist.text.lower())
for category in ["integration", "plugin", "appdaemon", "python_script", "theme"]:
response = requests.get(f'https://raw.githubusercontent.com/hacs/default/master/{category}')
repos = json.loads(response.text.lower())
if category == "integration":
for repo in list(integration_org.get_repos()):
repos.append(repo.full_name.lower())
elif category == "plugin":
for repo in list(plugin_org.get_repos()):
repos.append(repo.full_name.lower())
elif category == "theme":
for repo in list(theme_org.get_repos()):
repos.append(repo.full_name.lower())
for repo in repos:
if repo in blacklist:
repos.remove(repo)
title = category.replace("_", " ").title() + 's' if category != 'appdaemon' else 'AppDaemon Apps'
BASE += f"\n## {title}\n\n"
BASE += f"_{len(repos)} Repositories in total._\n\n"
for repo in sorted(repos):
BASE += f"<p className='defaultrepo'><a href='https://github.com/{repo}' target='_blank'>{repo}</a></p>\n"
with open("documentation/default_repositories.md", "w") as mdfile:
mdfile.write(BASE)
| 38.170213 | 114 | 0.688406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 746 | 0.415831 |
1a5498f81765fb1eac207c52c6344cd3eedbeb35 | 164 | py | Python | jp.atcoder/abc005/abc005_2/26220615.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc005/abc005_2/26220615.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc005/abc005_2/26220615.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
import typing
def main() -> typing.NoReturn:
n = int(input())
(*t,) = map(int, sys.stdin.read().split())
print(min(t))
main()
| 13.666667 | 47 | 0.536585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1a54b28acedd9ff633d1db4868301520a6ba9dcb | 748 | py | Python | List Events.py | hcaushi/higgs-hunters | f433a71ab01470fb6e72ebd8b69e697e77ae3c94 | [
"MIT"
] | null | null | null | List Events.py | hcaushi/higgs-hunters | f433a71ab01470fb6e72ebd8b69e697e77ae3c94 | [
"MIT"
] | null | null | null | List Events.py | hcaushi/higgs-hunters | f433a71ab01470fb6e72ebd8b69e697e77ae3c94 | [
"MIT"
] | null | null | null | import csv
import sys
#This program was written in Python 3.6.3 by Henry Caushi. You are free to use it for any reason, without my permission, without having to inform myself or anyone else
#This program was was written to aid other programs, by providing a list of all event IDs so that they appear only once
#List of all event IDs
list_ids = []
filename = "Higgs_Hunters_data_ALL.csv"
#Open the data file
f = open(filename+,"r")
reader = csv.reader(f)
for row in reader:
#If an event ID is not already added to the list, add it to the list
if row[3] not in list_ids:
list_ids.append(row[3])
f.close()
#Open a new file, and dump the event IDs
f = open("List IDs.txt","w")
for row in list_ids:
f.write(row+"\n")
f.close()
| 27.703704 | 167 | 0.713904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.65107 |
1a550f338065214a5625283d1ea0bc348f1499f6 | 268 | py | Python | custom_latex_cell_style/scenario2/ipython_nbconvert_config.py | isabella232/nbconvert-examples | 039724f4251cc8183f85534785fbee14809248ac | [
"BSD-3-Clause"
] | 120 | 2015-09-26T22:16:59.000Z | 2022-03-14T19:58:46.000Z | custom_latex_cell_style/scenario2/ipython_nbconvert_config.py | tarkantemizoz/nbconvert-examples | 039724f4251cc8183f85534785fbee14809248ac | [
"BSD-3-Clause"
] | 12 | 2015-09-23T19:52:38.000Z | 2021-08-04T23:30:37.000Z | custom_latex_cell_style/scenario2/ipython_nbconvert_config.py | tarkantemizoz/nbconvert-examples | 039724f4251cc8183f85534785fbee14809248ac | [
"BSD-3-Clause"
] | 82 | 2015-12-11T22:04:01.000Z | 2021-12-08T07:09:31.000Z | c = get_config()
#Export all the notebooks in the current directory to the sphinx_howto format.
c.NbConvertApp.notebooks = ['*.ipynb']
c.NbConvertApp.export_format = 'latex'
c.NbConvertApp.postprocessor_class = 'PDF'
c.Exporter.template_file = 'custom_article.tplx'
| 29.777778 | 78 | 0.779851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.447761 |
1a577854087d8ed49d83c5bd1d4c0683315c68ab | 102 | py | Python | setup.py | puhoy/django-s3file | 6293e991feae77d7e18939770eecd264047bd7c3 | [
"MIT"
] | null | null | null | setup.py | puhoy/django-s3file | 6293e991feae77d7e18939770eecd264047bd7c3 | [
"MIT"
] | null | null | null | setup.py | puhoy/django-s3file | 6293e991feae77d7e18939770eecd264047bd7c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
setup(name='django-s3file', use_scm_version=True)
| 20.4 | 49 | 0.784314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.352941 |
1a57db3282ce89ea3a9210d6d6b1d1169726fdcf | 668 | py | Python | hard/python3/c0084_440_k-th-smallest-in-lexicographical-order/00_leetcode_0084.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | null | null | null | hard/python3/c0084_440_k-th-smallest-in-lexicographical-order/00_leetcode_0084.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | null | null | null | hard/python3/c0084_440_k-th-smallest-in-lexicographical-order/00_leetcode_0084.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | 3 | 2018-02-09T02:46:48.000Z | 2021-02-20T08:32:03.000Z | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#440. K-th Smallest in Lexicographical Order
#Given integers n and k, find the lexicographically k-th smallest integer in the range from 1 to n.
#Note: 1 ≤ k ≤ n ≤ 109.
#Example:
#Input:
#n: 13 k: 2
#Output:
#10
#Explanation:
#The lexicographical order is [1, 10, 11, 12, 13, 2, 3, 4, 5, 6, 7, 8, 9], so the second smallest number is 10.
#class Solution:
# def findKthNumber(self, n, k):
# """
# :type n: int
# :type k: int
# :rtype: int
# """
# Time Is Money | 27.833333 | 111 | 0.657186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 651 | 0.965875 |
1a587cc9f646db45b49c21cdf1f8d366ab98bdae | 1,055 | py | Python | 0015_3Sum.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | null | null | null | 0015_3Sum.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | null | null | null | 0015_3Sum.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | 1 | 2020-03-18T05:23:40.000Z | 2020-03-18T05:23:40.000Z | class Solution:
def twoSum(self, nums: List[int], target: int) -> List[List[int]]:
complement = {}
out = []
for i,n in enumerate(nums):
complement[target-n] = i
for i,n in enumerate(nums):
idx = complement.get(n, None)
if idx != None and idx != i:
out.append([nums[idx], nums[i]])
return out
def threeSum(self, nums: List[int]) -> List[List[int]]:
if len(nums) < 3:
return []
nums.sort()
out = []
if set(nums) == {0}:
return [[0,0,0]]
i = 0
while len(nums) >= 3:
l_twosum = self.twoSum(nums[1:], -nums[0])
if l_twosum != None:
for l in l_twosum:
l.append(nums[0])
out.append(l)
nums.pop(0)
for i,l in enumerate(out):
out[i] = sorted(l)
out = list(map(list, set(map(tuple, out))))
return out
| 28.513514 | 70 | 0.422749 | 1,054 | 0.999052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1a58e9cd302698322742d260b397d2fbee2e8755 | 1,255 | py | Python | app/handlers.py | zjurelinac/Bitboard | 4e17d04dff92e304e45a5416d7c2f2d927974c52 | [
"MIT"
] | null | null | null | app/handlers.py | zjurelinac/Bitboard | 4e17d04dff92e304e45a5416d7c2f2d927974c52 | [
"MIT"
] | null | null | null | app/handlers.py | zjurelinac/Bitboard | 4e17d04dff92e304e45a5416d7c2f2d927974c52 | [
"MIT"
] | null | null | null | import logging
import traceback
import peewee
from flask import request
from east.exceptions import *
from app import app, db
print('TESTING')
class DummyLogger:
def log(self, *args):
pass
def error(self, *args):
pass
logger = DummyLogger()
# logger = logging.getLogger(__name__)
@app.errorhandler(BaseAPIException)
def handle_api_errors(e):
logger.error('API Exception <%s>:: %s', e.name, e.description)
db.rollback()
return e.make_response()
@app.errorhandler(peewee.DoesNotExist)
def handle_peewee_doesnotexist(e):
logger.error('DoesNotExist: %s' % e)
db.rollback()
return DoesNotExistError(str(e)).make_response()
@app.errorhandler(404)
def handle_404_error(e):
logger.error(str(e))
return APIRouteDoesNotExist().make_response()
@app.errorhandler(405)
def handle_405_error(e):
logger.error(str(e))
return APIMethodNotAllowed('Requested route does not support this method [%s].' % request.method).make_response()
@app.errorhandler(Exception)
def handle_generic_exception(e):
logger.error('Generic <%s>:: %s', e.__class__.__name__, e)
logger.error(traceback.format_exc())
db.rollback()
return BaseAPIException(e.__class__.__name__, str(e)).make_response()
| 23.679245 | 117 | 0.718725 | 98 | 0.078088 | 0 | 0 | 929 | 0.740239 | 0 | 0 | 164 | 0.130677 |
1a5e0de647ef014672a59f0788a74144694185e3 | 28,438 | py | Python | adapter.py | JuliaChae/Waymo-Kitti-Adapter | 8b4ed7a183331169be72e0d147f289ef449728f1 | [
"MIT"
] | 4 | 2020-10-22T20:41:05.000Z | 2021-12-07T02:58:56.000Z | adapter.py | JuliaChae/Waymo-Kitti-Adapter | 8b4ed7a183331169be72e0d147f289ef449728f1 | [
"MIT"
] | null | null | null | adapter.py | JuliaChae/Waymo-Kitti-Adapter | 8b4ed7a183331169be72e0d147f289ef449728f1 | [
"MIT"
] | 1 | 2022-02-07T21:24:07.000Z | 2022-02-07T21:24:07.000Z | import argparse
import os
import math
# import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import progressbar
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import test_utils
from waymo_open_dataset.utils import box_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
from adapter_lib import *
import pdb
############################Config###########################################
# path to waymo dataset "folder" (all .tfrecord files in that folder will
# be converted)
DATA_PATH = '/media/trail/harddrive/datasets/Waymo/original/validation'
# path to save kitti dataset
KITTI_PATH = '/media/trail/harddrive/datasets/Waymo/waymo/validation'
# location filter, use this to convert your preferred location
LOCATION_FILTER = False
LOCATION_NAME = ['location_sf']
# max indexing length
INDEX_LENGTH = 15
# as name
IMAGE_FORMAT = 'png'
# do not change
LABEL_PATH = KITTI_PATH + '/label_0'
LABEL_ALL_PATH = KITTI_PATH + '/label_all'
IMAGE_PATH = KITTI_PATH + '/image_0'
CALIB_PATH = KITTI_PATH + '/calib'
LIDAR_PATH = KITTI_PATH + '/velodyne'
IMG_CALIB_PATH = KITTI_PATH + '/img_calib'
###############################################################################
class Adapter:
def __init__(self):
self.__lidar_list = ['_FRONT', '_FRONT_RIGHT',
'_FRONT_LEFT', '_SIDE_RIGHT', '_SIDE_LEFT']
self.__type_list = ['UNKNOWN', 'VEHICLE',
'PEDESTRIAN', 'SIGN', 'CYCLIST']
self.__file_names = []
self.T_front_cam_to_ref = []
self.T_vehicle_to_front_cam = []
def cvt(self, args, folder, start_ind):
""" convert dataset from Waymo to KITTI
Args:
return:
"""
self.start_ind = start_ind
self.get_file_names(DATA_PATH + '/' + folder)
print("Converting ..." + folder)
self.create_folder(args.camera_type)
bar = progressbar.ProgressBar(maxval=len(self.__file_names) + 1,
widgets=[progressbar.Percentage(), ' ',
progressbar.Bar(
marker='>', left='[', right=']'), ' ',
progressbar.ETA()])
tf.enable_eager_execution()
file_num = 1
frame_num = 0
frame_name = self.start_ind
label_exists = False
print("start converting ...")
bar.start()
for file_idx, file_name in enumerate(self.__file_names):
print('File {}/{}'.format(file_idx, len(self.__file_names)))
dataset = tf.data.TFRecordDataset(file_name, compression_type='')
for data in dataset:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
if (frame_num % args.keyframe) == 0:
if LOCATION_FILTER == True and frame.context.stats.location not in LOCATION_NAME:
continue
if args.test == False:
label_exists = self.save_label(frame, frame_name, args.camera_type, False, True)
if args.test == label_exists:
frame_num += 1
continue
self.save_calib(frame, frame_name)
self.save_label(
frame, frame_name, args.camera_type)
self.save_image(frame, frame_name, args.camera_type)
self.save_lidar(frame, frame_name)
self.save_image_calib(frame, frame_name)
# print("image:{}\ncalib:{}\nlidar:{}\nlabel:{}\n".format(str(s1-e1),str(s2-e2),str(s3-e3),str(s4-e4)))
frame_name += 1
frame_num += 1
bar.update(file_num)
file_num += 1
bar.finish()
print("\nfinished ...")
return frame_name
def save_image(self, frame, frame_num, cam_type):
""" parse and save the images in png format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
for img in frame.images:
if cam_type == 'all' or cam_type == str(img.name - 1):
img_path = IMAGE_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.' + IMAGE_FORMAT
img = cv2.imdecode(np.frombuffer(
img.image, np.uint8), cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
plt.imsave(img_path, rgb_img, format=IMAGE_FORMAT)
def save_calib(self, frame, frame_num, kitti_format=True):
""" parse and save the calibration data
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
fp_calib = open(CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
self.T_front_cam_to_ref = np.array([
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[1.0, 0.0, 0.0]
])
camera_calib = []
R0_rect = ["%e" % i for i in np.eye(3).flatten()]
Tr_velo_to_cam = []
calib_context = ''
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = self.cart_to_homo(self.T_front_cam_to_ref) @ np.linalg.inv(tmp)
Tr_velo_to_cam.append(["%e" % i for i in tmp[:3,:].reshape(12)])
for cam in frame.context.camera_calibrations:
tmp = np.zeros((3, 4))
tmp[0, 0] = cam.intrinsic[0]
tmp[1, 1] = cam.intrinsic[1]
tmp[0, 2] = cam.intrinsic[2]
tmp[1, 2] = cam.intrinsic[3]
tmp[2, 2] = 1
tmp = list(tmp.reshape(12))
tmp = ["%e" % i for i in tmp]
camera_calib.append(tmp)
T_front_cam_to_vehicle = np.array(frame.context.camera_calibrations[0].extrinsic.transform).reshape(4, 4)
self.T_vehicle_to_front_cam = np.linalg.inv(T_front_cam_to_vehicle)
for i in range(5):
calib_context += "P" + str(i) + ": " + \
" ".join(camera_calib[i]) + '\n'
calib_context += "R0_rect" + ": " + " ".join(R0_rect) + '\n'
for i in range(5):
calib_context += "Tr_velo_to_cam_" + \
str(i) + ": " + " ".join(Tr_velo_to_cam[i]) + '\n'
calib_context += "timestamp_micros: " + \
str(frame.timestamp_micros) + '\n'
calib_context += "context_name: " + str(frame.context.name) + '\n'
fp_calib.write(calib_context)
fp_calib.close()
def save_lidar(self, frame, frame_num):
""" parse and save the lidar data in psd format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = np.concatenate(points, axis=0)
intensity_all = np.concatenate(intensity, axis=0)
point_cloud = np.column_stack((points_all, intensity_all))
pc_path = LIDAR_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.bin'
point_cloud.tofile(pc_path)
def save_label(self, frame, frame_num, cam_type, kitti_format=False, check_label_exists = False):
""" parse and save the label data in .txt format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
# get point cloud in the frame
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = tf.convert_to_tensor(
np.concatenate(points, axis=0), dtype=np.float32)
# preprocess bounding box data
id_to_bbox = dict()
id_to_name = dict()
for labels in frame.projected_lidar_labels:
name = labels.name
for label in labels.labels:
bbox = [label.box.center_x - label.box.length / 2, label.box.center_y - label.box.width / 2,
label.box.center_x + label.box.length / 2, label.box.center_y + label.box.width / 2]
id_to_bbox[label.id] = bbox
id_to_name[label.id] = name - 1
Tr_velo_to_cam = []
recorded_label = []
label_lines = ''
label_all_lines = ''
"""
if kitti_format:
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = np.linalg.inv(tmp)
axes_transformation = np.array([[0, -1, 0, 0],
[0, 0, -1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]])
tmp = np.matmul(axes_transformation, tmp)
Tr_velo_to_cam.append(tmp)
"""
for obj in frame.laser_labels:
# caculate bounding box
bounding_box = None
name = None
id = obj.id
for lidar in self.__lidar_list:
if id + lidar in id_to_bbox:
bounding_box = id_to_bbox.get(id + lidar)
name = str(id_to_name.get(id + lidar))
break
if bounding_box == None or name == None:
continue
box = tf.convert_to_tensor(
[obj.box.center_x, obj.box.center_y, obj.box.center_z, obj.box.length, obj.box.width, obj.box.height, obj.box.heading], dtype=np.float32)
box = tf.reshape(box, (1, 7))
num_points = box_utils.compute_num_points_in_box_3d(
points_all, box)
num_points = num_points.numpy()[0]
detection_difficulty = obj.detection_difficulty_level
my_type = self.__type_list[obj.type]
truncated = 0
occluded = 0
height = obj.box.height
width = obj.box.width
length = obj.box.length
x = obj.box.center_x
y = obj.box.center_y
z = obj.box.center_z - height/2
if check_label_exists == False:
pt_ref = self.cart_to_homo(self.T_front_cam_to_ref) @ self.T_vehicle_to_front_cam @ np.array([x,y,z,1]).reshape((4,1))
x, y, z, _ = pt_ref.flatten().tolist()
rotation_y = -obj.box.heading - np.pi/2
beta = math.atan2(x, z)
alpha = (rotation_y + beta - math.pi / 2) % (2 * math.pi)
# save the labels
line = my_type + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(round(truncated, 2),
occluded,
round(
alpha, 2),
round(
bounding_box[0], 2),
round(
bounding_box[1], 2),
round(
bounding_box[2], 2),
round(
bounding_box[3], 2),
round(
height, 2),
round(
width, 2),
round(
length, 2),
round(
x, 2),
round(
y, 2),
round(
z, 2),
round(
rotation_y, 2),
num_points,
detection_difficulty)
line_all = line[:-1] + ' ' + name + '\n'
# store the label
label_all_lines += line_all
if (name == cam_type):
label_lines += line
recorded_label.append(line)
if len(recorded_label) == 0:
return False
else:
fp_label_all = open(LABEL_ALL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label = open(LABEL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label.write(label_lines)
fp_label.close()
fp_label_all.write(label_all_lines)
fp_label_all.close()
return True
def save_image_calib(self, frame, frame_num):
fp_image_calib = open(IMG_CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
camera_calib = []
pose = []
velocity = []
timestamp = []
shutter = []
trigger_time = []
readout_done_time = []
calib_context = ''
for camera in frame.images:
tmp = np.array(camera.pose.transform).reshape((16,))
pose.append(["%e" % i for i in tmp])
tmp = np.zeros(6)
tmp[0] = camera.velocity.v_x
tmp[1] = camera.velocity.v_y
tmp[2] = camera.velocity.v_z
tmp[3] = camera.velocity.w_x
tmp[4] = camera.velocity.w_y
tmp[5] = camera.velocity.w_z
velocity.append(["%e" % i for i in tmp])
timestamp.append(camera.pose_timestamp)
shutter.append(camera.shutter)
trigger_time.append(camera.camera_trigger_time)
readout_done_time.append(camera.camera_readout_done_time)
for i in range(5):
calib_context += "Pose_" + str(i) + ": " + \
" ".join(pose[i]) + '\n'
for i in range(5):
calib_context += "Velocity_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Timestamp_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Shutter_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Trigger_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Readout_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
fp_image_calib.write(calib_context)
fp_image_calib.close()
def get_file_names(self, folder):
for i in os.listdir(folder):
if i.split('.')[-1] == 'tfrecord':
self.__file_names.append(folder + '/' + i)
def cart_to_homo(self, mat):
ret = np.eye(4)
if mat.shape == (3, 3):
ret[:3, :3] = mat
elif mat.shape == (3, 4):
ret[:3, :] = mat
else:
raise ValueError(mat.shape)
return ret
def create_folder(self, cam_type):
if not os.path.exists(KITTI_PATH):
os.mkdir(KITTI_PATH)
if not os.path.exists(CALIB_PATH):
os.mkdir(CALIB_PATH)
if not os.path.exists(LIDAR_PATH):
os.mkdir(LIDAR_PATH)
if not os.path.exists(LABEL_ALL_PATH):
os.mkdir(LABEL_ALL_PATH)
if not os.path.exists(IMG_CALIB_PATH):
os.mkdir(IMG_CALIB_PATH)
if not os.path.exists(IMAGE_PATH):
os.mkdir(IMAGE_PATH)
if not os.path.exists(LABEL_PATH):
os.mkdir(LABEL_PATH)
def extract_intensity(self, frame, range_images, lidar_num):
""" extract the intensity from the original range image
:param frame: open dataset frame proto
:param frame_num: the current frame number
:param lidar_num: the number of current lidar
:return:
"""
intensity_0 = np.array(range_images[lidar_num][0].data).reshape(-1, 4)
intensity_0 = intensity_0[:, 1]
intensity_1 = np.array(range_images[lidar_num][
1].data).reshape(-1, 4)[:, 1]
return intensity_0, intensity_1
def image_show(self, data, name, layout, cmap=None):
"""Show an image."""
plt.subplot(*layout)
plt.imshow(tf.image.decode_jpeg(data), cmap=cmap)
plt.title(name)
plt.grid(False)
plt.axis('off')
def parse_range_image_and_camera_projection(self, frame):
"""Parse range images and camera projections given a frame.
Args:
frame: open dataset frame proto
Returns:
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
"""
self.__range_images = {}
# camera_projections = {}
# range_image_top_pose = None
for laser in frame.lasers:
if len(laser.ri_return1.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name] = [ri]
if laser.name == open_dataset.LaserName.TOP:
range_image_top_pose_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
range_image_top_pose = open_dataset.MatrixFloat()
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy()))
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return1.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name] = [cp]
if len(laser.ri_return2.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return2.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name].append(ri)
#
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return2.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name].append(cp)
return self.__range_images, range_image_top_pose
def plot_range_image_helper(self, data, name, layout, vmin=0, vmax=1, cmap='gray'):
"""Plots range image.
Args:
data: range image data
name: the image title
layout: plt layout
vmin: minimum value of the passed data
vmax: maximum value of the passed data
cmap: color map
"""
plt.subplot(*layout)
plt.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(name)
plt.grid(False)
plt.axis('off')
def get_range_image(self, laser_name, return_index):
"""Returns range image given a laser name and its return index."""
return self.__range_images[laser_name][return_index]
def show_range_image(self, range_image, layout_index_start=1):
"""Shows range image.
Args:
range_image: the range image data from a given lidar of type MatrixFloat.
layout_index_start: layout offset
"""
range_image_tensor = tf.convert_to_tensor(range_image.data)
range_image_tensor = tf.reshape(
range_image_tensor, range_image.shape.dims)
lidar_image_mask = tf.greater_equal(range_image_tensor, 0)
range_image_tensor = tf.where(lidar_image_mask, range_image_tensor,
tf.ones_like(range_image_tensor) * 1e10)
range_image_range = range_image_tensor[..., 0]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
self.plot_range_image_helper(range_image_range.numpy(), 'range',
[8, 1, layout_index_start], vmax=75, cmap='gray')
self.plot_range_image_helper(range_image_intensity.numpy(), 'intensity',
[8, 1, layout_index_start + 1], vmax=1.5, cmap='gray')
self.plot_range_image_helper(range_image_elongation.numpy(), 'elongation',
[8, 1, layout_index_start + 2], vmax=1.5, cmap='gray')
def convert_range_image_to_point_cloud(self, frame, range_images, range_image_top_pose, ri_index=0):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
intensity: {[N, 1]} list of intensity of length 5 (number of lidars).
"""
calibrations = sorted(
frame.context.laser_calibrations, key=lambda c: c.name)
# lasers = sorted(frame.lasers, key=lambda laser: laser.name)
points = []
# cp_points = []
intensity = []
frame_pose = tf.convert_to_tensor(
np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[...,
0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[
..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0:
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min,
c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == open_dataset.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(
beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
intensity_tensor = tf.gather_nd(range_image_tensor,
tf.where(range_image_mask))
# cp = camera_projections[c.name][0]
# cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
# cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points.append(points_tensor.numpy())
# cp_points.append(cp_points_tensor.numpy())
intensity.append(intensity_tensor.numpy()[:, 1])
return points, intensity
def rgba(self, r):
"""Generates a color based on range.
Args:
r: the range value of a given point.
Returns:
The color for a given range
"""
c = plt.get_cmap('jet')((r % 20.0) / 20.0)
c = list(c)
c[-1] = 0.5 # alpha
return c
def plot_image(self, camera_image):
"""Plot a cmaera image."""
plt.figure(figsize=(20, 12))
plt.imshow(tf.image.decode_jpeg(camera_image.image))
plt.grid("off")
def plot_points_on_image(self, projected_points, camera_image, rgba_func, point_size=5.0):
"""Plots points on a camera image.
Args:
projected_points: [N, 3] numpy array. The inner dims are
[camera_x, camera_y, range].
camera_image: jpeg encoded camera image.
rgba_func: a function that generates a color from a range value.
point_size: the point size.
"""
self.plot_image(camera_image)
xs = []
ys = []
colors = []
for point in projected_points:
xs.append(point[0]) # width, col
ys.append(point[1]) # height, row
colors.append(rgba_func(point[2]))
plt.scatter(xs, ys, c=colors, s=point_size, edgecolors="none")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Save Waymo dataset into Kitti format')
parser.add_argument('--keyframe',
type=int,
default=10,
help='Saves every specified # of scenes. Default is 1 and the program saves every scene')
parser.add_argument('--camera_type',
type=str,
default="0",
help='Select camera views to save. Input argument from 0 to 4 or all')
parser.add_argument('--start_ind',
type=int,
default=0,
help='File number starts counting from this index')
parser.add_argument('--test',
type=bool,
default=False,
help='if true, does not save any ground truth data')
args = parser.parse_args()
start_ind = args.start_ind
path, dirs, files = next(os.walk(DATA_PATH))
dirs.sort()
for directory in dirs:
adapter = Adapter()
last_ind = adapter.cvt(args, directory, start_ind)
start_ind = last_ind
| 41.88218 | 153 | 0.550601 | 25,923 | 0.911562 | 0 | 0 | 0 | 0 | 0 | 0 | 6,823 | 0.239925 |
1a5ec4950e6cf0d029663368b47eef30213bdc82 | 1,924 | py | Python | renku/cli/_providers/__init__.py | vigsterkr/renku-python | fb443d752436478a2903c4403ba6c702949b1b59 | [
"Apache-2.0"
] | null | null | null | renku/cli/_providers/__init__.py | vigsterkr/renku-python | fb443d752436478a2903c4403ba6c702949b1b59 | [
"Apache-2.0"
] | null | null | null | renku/cli/_providers/__init__.py | vigsterkr/renku-python | fb443d752436478a2903c4403ba6c702949b1b59 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2019 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Third party data registry integration."""
from urllib.parse import urlparse
from renku.cli._providers.zenodo import ZenodoProvider
from renku.utils.doi import is_doi
class ProviderFactory:
"""Create a provider type from URI."""
PROVIDERS = {'zenodo': ZenodoProvider}
@staticmethod
def from_uri(uri):
"""Get provider type based on uri."""
is_doi_ = is_doi(uri)
if is_doi_ is False:
url = urlparse(uri)
if bool(url.scheme and url.netloc and url.params == '') is False:
return None, 'Cannot parse URL.'
provider = None
if 'zenodo' in uri:
provider = ZenodoProvider(is_doi=is_doi_)
if is_doi_ and provider is None:
return None, (
'Provider {} not found. '.format(
uri.split('/')[1].split('.')[0] # Get DOI provider name.
) + 'Currently supporting following providers: (Zenodo, )'
)
return provider, None
@staticmethod
def from_id(provider_id):
"""Get provider type based on identifier."""
return ProviderFactory.PROVIDERS[provider_id]()
| 34.357143 | 77 | 0.660083 | 997 | 0.516848 | 0 | 0 | 876 | 0.454121 | 0 | 0 | 1,051 | 0.544842 |
1a612743d582b02908a4b3a8f29574ce5358d4cb | 865 | py | Python | POPGEN/flashpca_to_smartpca.py | Hammarn/Scripts | eb9fb51b614d29aea425168aa16c58410d975f46 | [
"MIT"
] | null | null | null | POPGEN/flashpca_to_smartpca.py | Hammarn/Scripts | eb9fb51b614d29aea425168aa16c58410d975f46 | [
"MIT"
] | null | null | null | POPGEN/flashpca_to_smartpca.py | Hammarn/Scripts | eb9fb51b614d29aea425168aa16c58410d975f46 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import pandas as pd
def main(input_file,output):
pd_data = pd.read_csv(input_file, sep = "\t" )
import pdb
pd_data['last'] = pd_data['FID']
for i in pd_data.index:
pd_data.loc[i,'FID'] = "{}:{}".format(pd_data.loc[i,'FID'],pd_data.loc[i,'IID'])
pd_data.to_csv(output, sep = "\t", index=False)
print "Output written to {}".format(output)
if __name__ == "__main__":
# Command line arguments
parser = argparse.ArgumentParser("""Converts FlashPCA output into SmartPCA output
""")
parser.add_argument("-i", "--input", default = 'king.kin',
help="Input file from FlashPCA to convert to SmartPCA output format.")
parser.add_argument("-o", "--output", default = 'pca.evec',
help="Name of Outputfile.")
args = parser.parse_args()
main(args.input, args.output)
| 30.892857 | 88 | 0.649711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 306 | 0.353757 |
1a62f4164187518caa99367786abcd22a21e6480 | 1,288 | py | Python | registration_app/tests/frontend/test_view.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | registration_app/tests/frontend/test_view.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | registration_app/tests/frontend/test_view.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | import pytest
import requests
import unittest
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
# giving pytest access to database.
# pytestmark = pytest.mark.django_db
# class TestRegistrationSignUp(unittest.TestCase):
# def test_post_request(self):
# options = Options()
# options.headless = True
# self.driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
#
# self.driver.get("http://localhost:8000/registration/")
# self.driver.find_element_by_id('username').send_keys("test_username")
# self.driver.find_element_by_id('password1').send_keys("Test_password123")
# self.driver.find_element_by_id('password2').send_keys("Test_password123")
#
# self.driver.find_element_by_id('submit').click()
# self.assertIn("http://localhost:8000/", self.driver.current_url)
#
# created_user = User.objects.get(username='test_username')
# self.assertEqual(created_user.username, 'test_username')
# self.assertEqual(isinstance(created_user.password, str), True)
| 33.894737 | 97 | 0.735248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 963 | 0.747671 |
1a6518f5fb8f864732a99e710e1d698d8dac1476 | 18,923 | py | Python | export_util/normalize.py | amoghmatt/export-lib | ec0a5d3b81116e72cc76f1d16023041ae5df33c7 | [
"Apache-2.0"
] | 3 | 2019-05-23T10:56:36.000Z | 2020-07-15T14:24:21.000Z | export_util/normalize.py | amoghmatt/export-lib | ec0a5d3b81116e72cc76f1d16023041ae5df33c7 | [
"Apache-2.0"
] | 1 | 2019-09-24T08:26:08.000Z | 2019-09-24T08:26:08.000Z | export_util/normalize.py | amoghmatt/export-lib | ec0a5d3b81116e72cc76f1d16023041ae5df33c7 | [
"Apache-2.0"
] | 2 | 2020-03-26T10:57:50.000Z | 2020-09-25T09:56:56.000Z | import collections
import schematics
from export_util import (
template as tpl,
value as val
)
class Normalizer:
"""
Normalizer object formats data into ordered rows using provided template. Templates are building
using `export_lib.template` functionality. First what should be passed into nomralizer constructor
is the `export_lib.template.Object` instance. This is the way to say how to format each object
at the provided data list.
Each `export_lib.template.Object` takes `export_lib.template.Field` as an `field` argument, which
allows to understand which fields and in which order they should be rendered. Also `Object` has
other properties which are allows to specify how they should looks like in a table.
Example:
def create_duration(start_time, object_source):
""
This function formats duration column depending on the object
`cuesheet_start_time.$date` and `cuesheet_end_time.$date`.
When we know the amount of seconds duration, we formatting it into
the HH:MM:SS format.
:param start_time:
:param DataGetter object_source:
:return:
""
return time.strftime(
'%H:%M:%S',
time.gmtime(
int((object_source.get('cuesheet_end_time.$date') - start_time) / 1000)
)
)
def get_year_from_timestamp(milliseconds, object_source):
""
This function returns year from the provided timestamp.
:param milliseconds:
:param DataGetter object_source:
:return:
""
return time.strftime('%Y', time.gmtime(int(milliseconds / 1000)))
def verbose_boolean(boolean, object_source):
""
This function returns "NO" if `boolean` is False, and
returns "YES" if `boolean` is True.
:param boolean:
:param DataGetter object_source:
:return:
""
return 'YES' if boolean else 'NO'
def verbose_list(list_objects, object_source):
""
This function formats list of an objects into single string.
:param list_objects:
:param DataGetter object_source:
:return:
""
return ', '.join(map(lambda x: str(x), list_objects))
# This is how the data exporter should be initialized.
ex = Exporter(normalizer=Normalizer(
# This name - `OBJ_1`, will be used to describe hard logic.
tpl.Object(
# This is the number of column where the object should start render
col=1,
# If `titles` is True, then all headers of each field will be rendered before objects list.
titles=True,
# If you want to translate or rename some fields - use `translate` option. Note:
# 1. Keys of this dict would be used as field name and values as a replacement.
# 2. Keys are case-sensitive. So the field named `duration` will not be replaced in this example.
# 3. Translation dict will be automatically converted to DataGetter object. So you can pass not only
# field names, but path to replacement as of `DataGetter` specification too.
translate={
'Duration': 'Length',
'fields': {
'year': {
'title': 'Year
}
}
},
# This is the object fields
fields=[
# Required. First argument of the Field object is the column number. It's related to parent Object
# col number, and always starts from 1.
tpl.Field(1, 'Production ID', '_id.$oid'),
# Required. The second argument is the field title / verbose_name.
tpl.Field(2, 'Source', 'cuesheet_channel'),
# Optional. The third argument is the where values is stored at the object. If you keep it empty -
# title will be renderred instead.
tpl.Field(3, 'Duration', 'cuesheet_start_time.$date', create_duration),
# Optional. The fourth argument is the callable function which takes field value as the first arg
# and the whole object `DataGetter` instance as the second argument. So you can compute absolutely
# new value from field value and any amount of other objects values.
tpl.Field(4, 'Year', 'updated_at.$date', get_year_from_timestamp),
tpl.Field(5, 'Free Music', 'free_music', verbose_boolean),
tpl.Field(6, 'Title', 'category.other_production.original_title'),
tpl.Field(7, 'Gema AVR', 'cuesheet_progress'),
tpl.Field(8, 'Country', 'production_country', verbose_list),
# Then we rendering child objects list starting from the first column of table. Each nested object
# is rendering under the parent object. So if you have more than one nested objects list to be
# rendered, you need to fold them in a one table, see description below.
#
# This name - `OBJ_2`, will be used to describe hard logic.
tpl.Object(
# Nested objects table start column
col=1,
# How much rows should be kept empty before rendering this table.
offset_top=5,
# This is the Object verbose_name. It's required for all nested objects but should be empty
# for the root.
verbose_name='Cuesheets',
# This is the path of the list of objects which are will be rendered.
path='cuesheet.cues',
# If `titles` is True, then all headers of each field will be rendered before objects list.
titles=True,
# This is the way to render multiple nested objects lists in a one table and not one under
# other. If you kept it empty or set it to False, child `Objects` will be rendered as stairs.
fold_nested=True,
# Let's say, we want to keep one row empty before rendering next nested object, to get more
# readable table.
offset_item=1,
# Object also has optional `preformat` parameter, which allows to pass callable object
# preprocessor. For example if you want to change something in the object before rendering.
# This is the nested object template fields.
fields=[
tpl.Field(1, 'Start Time', 'start_time'),
tpl.Field(2, 'Work ID', 'work_id'),
tpl.Field(3, 'Length', 'length'),
tpl.Field(4, 'Music Type', 'music_type'),
tpl.Field(5, 'Use', 'use'),
tpl.Field(6, 'Music Title', 'music_work.music_title'),
tpl.Field(7, 'Origin', 'music_work.origin'),
tpl.Field(8, 'Work ID', 'music_work.work_id.mpn_id'),
tpl.Field(9, 'Work ID Type', 'music_work.work_id.iswc'),
# This name - `OBJ_3`, will be used to describe hard logic.
tpl.Object(
col=10,
verbose_name='Authors',
path='music_work.author',
# Inline option is the way to render nested object title in a one line with the
# parent object. If you keep it empty, then parent object will have empty cells
# before this table.
inline=True,
titles=False,
fields=[
# Each field of this nested object, has numeration of columns starting from 1.
# It's made to simplify templates building. Nested objects column numbers are
# always relative to the parent object.
# This column number will be calculated in a next way:
# OBJ_1.column + OBJ_2.column + OBJ_3.column + self.column = 10
# It's hard to explain, just believe me it works as described.
tpl.Field(1, 'Name', 'name'),
tpl.Field(2, 'Rolle', 'rolle'),
]
),
# In the previous object, we have two fields placed horizontally. This means that previous
# object will take two columns of space. Then we need to give him a place and place next
# nested object on a column `prev.column+2`
tpl.Object(
col=12,
verbose_name='Publishers',
path='music_work.publisher',
inline=True,
titles=False,
fields=[
tpl.Field(1, 'Name', 'name'),
tpl.Field(2, 'Rolle', 'rolle'),
]
),
tpl.Object(
col=14,
verbose_name='Interpreters',
path='music_work.interpreter',
inline=True,
titles=False,
fields=[
tpl.Field(1, 'Name', 'name'),
tpl.Field(2, 'Rolle', 'rolle'),
]
),
]
)
]
)),
output=XLSXBytesOutputWriter(cols_dimensions={
'A': 28.06,
'B': 27.65,
'C': 10.0,
'D': 13.19,
'E': 11.25,
'F': 43.9,
'G': 13.89,
'H': 30.7,
'I': 14.72,
'J': 29.45,
'K': 8.67,
'L': 28.76,
'M': 8.67,
'N': 29.03,
'O': 8.67
})
)
*Preformat* argument is a function which takes two arguments. First - os the this column value, and the Second
is the whole object `DataGetter`. So you can compute any difficult values which are depends on other object values.
"""
def __init__(self, template: tpl.Object, *args, **kwargs):
"""
Create normalizer instance.
"""
self.template = template
def build_table(self, obj):
"""
Returns N rows which are representing this object due to provided.
template.
"""
yield from self.template.render(obj)
class SchematicsNormalizer(Normalizer):
"""
Creates object template from the schematics model.
"""
root_object_options = dict(
col=1,
titles=True,
fold_nested=True,
)
nested_object_options = dict(
titles=True,
inline=True,
fold_nested=True,
)
def __init__(self, model: schematics.Model, *args, **kwargs):
"""
Create objects template from schematics model.
"""
template = self._build_template(model, **kwargs)
super(SchematicsNormalizer, self).__init__(template, *args, **kwargs)
def _build_template(self, model: schematics.Model, **kwargs) -> tpl.Object:
"""
Creates object template from model.
"""
template_options = self.root_object_options
template_options.update(kwargs)
template = tpl.Object(**template_options)
for field, preformat in self._get_model_renderable_fields(model):
options = {}
if preformat is not None:
options['preformat'] = preformat
template.add_field(
field=self._create_field_template(
field=field,
parent=kwargs.get('parent'),
previous=template.fields[-1] if template.fields else None,
**options
)
)
return template
def _create_field_template(self, field: schematics.types.BaseType, parent=None, previous=None, **kwargs):
if isinstance(field, schematics.types.ListType) and not kwargs.get('preformat'):
return self._type_list_related_field(field, parent, previous, **kwargs)
if isinstance(field, schematics.types.ModelType) and not kwargs.get('preformat'):
return self._type_related_field(field, parent, previous, **kwargs)
return self._type_base_field(field, parent, previous, **kwargs)
def _type_base_field(self, field: schematics.types.BaseType, parent=None, previous=None, **kwargs):
if 'col' in kwargs:
column = kwargs.pop('col')
else:
column = self._get_next_column_number(previous)
preformat = None
if 'preformat' in kwargs:
preformat = kwargs.pop('preformat')
if preformat is None:
preformat = val.any_to_string
return tpl.Field(
col=column,
path=self._get_field_path(parent, field.name),
preformat=preformat,
verbose_name=self._get_field_verbose_name(field),
**kwargs
)
def _type_list_related_field(self, field: schematics.types.BaseType, parent=None, previous=None, **kwargs):
if hasattr(field, 'model_class'):
return self._type_related_field(field, parent, previous, **kwargs)
if 'col' in kwargs:
column = kwargs.pop('col')
else:
column = self._get_next_column_number(previous)
preformat = None
if 'preformat' in kwargs:
preformat = kwargs.pop('preformat')
if preformat is None:
preformat = val.any_to_string
return self._type_base_field(
col=column,
field=field,
parent=parent,
previous=previous,
preformat=preformat,
**kwargs
)
def _type_related_field(self, field: schematics.types.BaseType, parent=None, previous=None, **kwargs):
options = kwargs or self._get_model_template_options(field.model_class)
if 'col' in options:
column = options.pop('col')
else:
column = self._get_next_column_number(previous)
return self._build_template(
col=column,
path=self._get_field_path(parent, field.name),
model=field.model_class,
parent=parent,
verbose_name=self._get_field_verbose_name(field),
**options
)
def _get_model_template_options(self, model) -> dict:
o = self.nested_object_options.copy()
o.update({
k: v for k, v in model._options
if k in tpl.Object.supported_options and v is not None
})
return dict(filter(lambda x: x[1] is not None, o.items()))
def _get_model_renderable_fields(self, model: schematics.models.Model):
if 'fields' in dict(model._options) and model._options.fields is not None:
for field_name in model._options.fields:
# Get model field
if field_name in model.fields:
yield model.fields[field_name], self._get_model_preformat_field(model, field_name)
continue
# Get custom report field
getter = f'get_{field_name}'
if not hasattr(model, getter):
raise NotImplementedError(f'{model.__name__}.{getter} is not implemented')
getter = getattr(model, getter)
getter.name = getter.serialized_name = field_name
# Define preformatters and prepend getter
preformatters = self._get_model_preformat_field(model, field_name)
if not preformatters:
preformatters = [getter]
elif isinstance(preformatters, collections.Iterable):
preformatters = [getter] + list(preformatters)
elif callable(preformatters):
preformatters = [getter, preformatters]
yield getter, preformatters
return
yield from (
(v, self._get_model_preformat_field(model, k))
for k, v in model.fields.items()
)
def _get_model_preformat_field(self, model: schematics.models.Model, field_name):
if 'preformat' in dict(model._options) and model._options.preformat is not None:
source_formatters = model._options.preformat.get(field_name)
if callable(source_formatters) or not source_formatters:
return source_formatters
callable_formatters = []
if isinstance(source_formatters, collections.Iterable):
for formatter in source_formatters:
if isinstance(formatter, str):
callable_formatters.append(getattr(model, formatter))
elif callable(formatter):
callable_formatters.append(formatter)
else:
raise TypeError(f'{field_name} formatter must be callable or iterable of callable')
return callable_formatters
return None
def _get_next_column_number(self, previous_field=None):
if previous_field is None:
return 1
return previous_field.column + previous_field.length
def _get_field_verbose_name(self, field):
return field.serialized_name or field.name
def _get_field_path(self, parent, field_name):
return '.'.join([parent or '', field_name]).strip('.')
__all__ = [
'Normalizer',
'SchematicsNormalizer',
]
| 42.051111 | 119 | 0.531205 | 18,750 | 0.990858 | 1,621 | 0.085663 | 0 | 0 | 0 | 0 | 12,064 | 0.637531 |
1a65dba3fb6b320ee85ba73a4571435a2d581c12 | 324 | py | Python | gwent/vendor/pygwinc_clone/gwinc/ifo/aLIGO/__init__.py | ark0015/GWDetectorDesignToolkit | 6ee2f7a633c973ea10b450257b1ad4dbd0323738 | [
"MIT"
] | 14 | 2019-10-16T13:27:19.000Z | 2022-03-15T02:14:49.000Z | gwent/vendor/pygwinc_clone/gwinc/ifo/aLIGO/__init__.py | ark0015/GWDetectorDesignToolkit | 6ee2f7a633c973ea10b450257b1ad4dbd0323738 | [
"MIT"
] | 1 | 2019-09-29T21:21:40.000Z | 2019-09-29T21:21:40.000Z | gwent/vendor/pygwinc_clone/gwinc/ifo/aLIGO/__init__.py | ark0015/gwent | 6ee2f7a633c973ea10b450257b1ad4dbd0323738 | [
"MIT"
] | 6 | 2019-11-27T09:45:31.000Z | 2022-03-15T02:14:31.000Z | from gwinc.ifo.noises import *
class aLIGO(nb.Budget):
name = "Advanced LIGO"
noises = [
QuantumVacuum,
Seismic,
Newtonian,
SuspensionThermal,
CoatingBrownian,
CoatingThermoOptic,
SubstrateBrownian,
SubstrateThermoElastic,
ExcessGas,
]
| 17.052632 | 31 | 0.589506 | 290 | 0.895062 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.046296 |
1a67853ba17830fabf13e15748fa32cd51e99dee | 8,397 | py | Python | BioSTEAM 2.x.x/biorefineries/TAL/analyze_across_adsorption_design_space.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 2 | 2020-01-03T21:04:41.000Z | 2020-01-09T01:15:48.000Z | BioSTEAM 2.x.x/biorefineries/TAL/analyze_across_adsorption_design_space.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 6 | 2020-01-03T21:31:27.000Z | 2020-02-28T13:53:56.000Z | BioSTEAM 2.x.x/biorefineries/TAL/analyze_across_adsorption_design_space.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 2 | 2020-01-07T14:04:06.000Z | 2020-01-08T23:05:25.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 13:52:52 2022
@author: sarangbhagwat
"""
from biorefineries.TAL.system_TAL_adsorption_glucose import *
from matplotlib import pyplot as plt
import numpy as np
column = AC401
#%% Across regeneration fluid velocity and cycle time
def MPSP_at_adsorption_design(v, t):
column.regeneration_velocity = v
column.cycle_time = t
return get_SA_MPSP(), AC401.installed_cost/1e6
regen_vels = np.linspace(3., 20., 40)
cycle_times = np.linspace(1., 4., 40)
MPSPs_ads_ds = []
column_costs_ads_r_t = []
#%%
for i in regen_vels:
MPSPs_ads_ds.append([])
column_costs_ads_r_t.append([])
for j in cycle_times:
MPSP, cost = None, None
try:
MPSP, cost = MPSP_at_adsorption_design(i, j)
except:
print(i, j)
MPSP, cost = np.nan, np.nan
MPSPs_ads_ds[-1].append(MPSP)
column_costs_ads_r_t[-1].append(cost)
#%% Set parameters to optimal
min_MPSP = np.min(MPSPs_ads_ds)
opt_indices = np.where(MPSPs_ads_ds==min_MPSP)
column.regeneration_velocity = regen_vels[opt_indices[0][0]]
column.cycle_time = cycle_times[opt_indices[1][0]]
print(min_MPSP, get_SA_MPSP())
#%% Plot MPSP
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, regen_vels, MPSPs_ads_ds, levels=[4., 4.5, 5, 5.5, 6., 6.5, 7.])
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Regeneration solvent velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('MPSP [$/kg]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Plot column cost
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, regen_vels, column_costs_ads_r_t,
levels=[0, 0.25, 0.5, 0.75, 1., 1.25, 1.5, 1.75, 2., 2.25, 2.5],
)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Regeneration solvent velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%%
AC401.regeneration_velocity = 14.4
AC401.target_recovery=None
superficial_velocities = np.linspace(4., 15., 9)
cycle_times = np.linspace(1., 4., 10)
MPSPs = []
column_costs = []
for m in superficial_velocities:
AC401.superficial_velocity = m
MPSPs.append([])
column_costs.append([])
for t in cycle_times:
AC401.cycle_time = t
MPSPs[-1].append(get_SA_MPSP())
column_costs[-1].append(AC401.installed_cost/1e6)
#%% Plot column cost
# plt.contourf(superficial_velocities, cycle_times, MPSPs)
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, superficial_velocities, column_costs)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Superficial feed velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Plot MPSP
# plt.contourf(superficial_velocities, cycle_times, MPSPs)
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, superficial_velocities, MPSPs)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Superficial feed velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('MPSP [$/kg]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Across titer
AC401.regeneration_velocity = 14.4
AC401.target_recovery = 0.99
AC401.cycle_time = 2.
titers = np.linspace(2., 25., 10)
MPSPs_titer_only = []
costs_titer_only = []
for t in titers:
spec.load_specifications(spec_1=spec.baseline_yield, spec_2=t, spec_3=spec.baseline_productivity)
MPSPs.append(get_SA_MPSP())
costs_titer_only.append(AC401.installed_cost)
spec.load_specifications(spec_1=spec.baseline_yield, spec_2=spec.baseline_titer, spec_3=spec.baseline_productivity)
#%% Plot MPSP
plt.plot(titers, MPSPs_titer_only)
#%% Plot column cost
plt.plot(titers, costs_titer_only)
#%% Across titer and target recovery
# AC401.regeneration_velocity = 14.4
# AC401.target_recovery = 0.99
# # def MPSP_at_titer(t):
# # spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
# # column.regeneration_velocity = 3. + (17./25.)*t
# # return get_SA_MPSP()
# titers = np.linspace(2., 25., 10)
# recoveries = np.linspace(0.5, 0.99, 10)
# # MPSPs_titer = []
# #%%
# MPSPs_titer = []
# costs_titer = []
# for t in titers:
# MPSPs_titer.append([])
# costs_titer.append([])
# for r in recoveries:
# spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
# AC401.target_recovery = r
# MPSPs_titer[-1].append(get_SA_MPSP())
# costs_titer[-1].append(AC401.installed_cost)
# spec.load_specifications(spec.baseline_yield, spec.baseline_titer, spec.baseline_productivity)
# #%% Plot MPSP
# fig1, ax2 = plt.subplots(constrained_layout=True)
# CS = ax2.contourf(recoveries, titers, MPSPs_titer,
# # levels=[0., 2.5, 5., 7.5, 10, 12.5, 15, 17.5, 20],
# )
# CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# # ax2.set_title('Nonsense (3 masked regions)')
# ax2.set_ylabel('Fermentation titer [g/L]')
# ax2.set_xlabel('Target adsorbate recovery [% of influent]')
# # Make a colorbar for the ContourSet returned by the contourf call.
# cbar = fig1.colorbar(CS)
# cbar.ax.set_ylabel('MPSP [$/kg]')
# # Add the contour line levels to the colorbar
# cbar.add_lines(CS2)
# #%% Plot column cost
# fig1, ax2 = plt.subplots(constrained_layout=True)
# CS = ax2.contourf(recoveries, titers, costs_titer,
# # levels=[0, 2, 4, 6, 8, 10, 12, 14, 16, 18 ,20],
# )
# CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# # ax2.set_title('Nonsense (3 masked regions)')
# ax2.set_ylabel('Regeneration solvent velocity [m/s]')
# ax2.set_xlabel('Cycle time [h]')
# # Make a colorbar for the ContourSet returned by the contourf call.
# cbar = fig1.colorbar(CS)
# cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# # Add the contour line levels to the colorbar
# cbar.add_lines(CS2)
#%% Across titer with rigorous adsorption design optimization
AC401.regeneration_velocity = 14.4
AC401.target_recovery = 0.99
AC401.cycle_time = 2.
regen_vels = np.linspace(1., 14.4, 20)
# cycle_times = np.linspace(0.5, 4., 20)
opt_regen_vels = []
opt_cycle_times = []
def MPSP_and_cost_at_regen_vel(v):
column.regeneration_velocity = v
return get_SA_MPSP(), AC401.installed_cost/1e6
def MPSP_at_titer(t):
spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
MPSPs_ads_ds = []
costs_ads_ds = []
for i in regen_vels:
m, c = MPSP_and_cost_at_regen_vel(i)
MPSPs_ads_ds.append(m)
costs_ads_ds.append(c)
min_MPSP = np.min(MPSPs_ads_ds)
opt_indices = np.where(MPSPs_ads_ds==min_MPSP)
opt_regen_vels.append(regen_vels[opt_indices[0][0]])
# opt_cycle_times.append(cycle_times[opt_indices[1][0]])
column.regeneration_velocity = opt_regen_vels[-1]
# column.cycle_time = opt_cycle_times[-1]
print('titer =', t)
print(min_MPSP, column.ins[1].F_mass, column.regeneration_velocity, column.cycle_time)
print('\n')
return min_MPSP
titers = np.linspace(3., 30, 20)
#%%
MPSPs_titer = []
for i in titers:
MPSPs_titer.append(MPSP_at_titer(i))
spec.load_specifications(spec.baseline_yield, spec.baseline_titer, spec.baseline_productivity)
#%% Plot MPSP
plt.plot(titers, MPSPs_titer)
#%% Plot optimum regeneration velocity
plt.plot(titers, opt_regen_vels)
#%% Plot | 31.1 | 115 | 0.700965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,805 | 0.453138 |
1a6787210bec51c96bf435788743b6ad461d235d | 1,700 | py | Python | backtracking/python/rat_in_a_maze.py | CHuante/CSCognisanse | df0def906b41bd9651c72c6a76bb2d437eb257c1 | [
"MIT"
] | 2 | 2020-08-24T18:39:12.000Z | 2020-08-30T06:45:26.000Z | backtracking/python/rat_in_a_maze.py | CHuante/CSCognisanse | df0def906b41bd9651c72c6a76bb2d437eb257c1 | [
"MIT"
] | 15 | 2020-08-25T04:29:15.000Z | 2021-06-28T18:26:44.000Z | backtracking/python/rat_in_a_maze.py | CHuante/CSCognisanse | df0def906b41bd9651c72c6a76bb2d437eb257c1 | [
"MIT"
] | 4 | 2021-06-15T03:56:56.000Z | 2021-06-29T07:34:27.000Z | #To solve Rat in a maze problem using backtracking
#initializing the size of the maze and soution matrix
N = 4
solution_maze = [ [ 0 for j in range(N) ] for i in range(N) ]
def is_safe(maze, x, y ):
'''A utility function to check if x, y is valid
return true if it is valid move,
return false otherwise
'''
if x >= 0 and x < N and y >= 0 and y < N and maze[x][y] == 1:
return True
return False
def check_if_solution_exists(maze):
if solve_maze(maze) == False:
print("Solution doesn't exist");
return False
# recursive function to solve rat in a maze problem
def solve_maze(maze, x=0,y=0):
'''
This function will make several recursive calls
until we reach to some finding, if we reach to destination
following asafe path, then it prints the solution and return true,
will return false otherwise.
'''
# if (x, y is goal) return True
if x == N - 1 and y == N - 1:
solution_maze[x][y] = 1
print("solution:", solution_maze)
return True
# check if the move is valid
if is_safe(maze, x, y) == True:
# mark x, y as part of solution path
# for(0,0) it sets up 1 in solution maze
solution_maze[x][y] = 1
# Move forward in x direction (recursive call)
if solve_maze(maze, x + 1, y) == True:
return True
# Move down in y direction if moving in x direction is not fruitful
#(recursive call)
if solve_maze(maze, x, y + 1) == True:
return True
#no option for rat to move, backtrack
solution_maze[x][y] = 0
return False
# Driver program to test above function
if __name__ == "__main__":
maze = [ [1, 0, 0, 0],
[1, 1, 0, 1],
[1, 0, 0, 0],
[1, 1, 1, 1] ]
check_if_solution_exists(maze)
| 25.757576 | 69 | 0.647059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 871 | 0.512353 |
1a67b90817fbafe39b6a3a57235954cc4307f058 | 1,014 | py | Python | locate_cell.py | NCBI-Hackathons/Cells2Image | 96fa7e4b3df63b6fecc415412693040bb59ba8d1 | [
"MIT"
] | null | null | null | locate_cell.py | NCBI-Hackathons/Cells2Image | 96fa7e4b3df63b6fecc415412693040bb59ba8d1 | [
"MIT"
] | 1 | 2018-03-21T15:16:40.000Z | 2018-03-21T17:59:01.000Z | locate_cell.py | NCBI-Hackathons/Cells2Image | 96fa7e4b3df63b6fecc415412693040bb59ba8d1 | [
"MIT"
] | 3 | 2018-03-19T16:22:42.000Z | 2018-03-20T16:45:27.000Z | import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
import time
import skimage.draw
import image_data
import image_processing as ip
if __name__ == "__main__":
movgen = image_data.all_movies()
for em,movie in enumerate(movgen):
framegen = image_data.all_frames(movie)
for ef,frame in enumerate(framegen):
#imgplot = plt.imshow(frame[1,:,:])
#imgplot.set_cmap('gray')
com_out = np.zeros([frame.shape[1],frame.shape[2]])
com, labels, numlabels = ip.find_food_vacuole_centroid(frame[1,:,:])
#com_out[labels==1] = 1
rr,cc = skimage.draw.circle(com[0],com[1],10)
com_out[rr,cc] = 1
patch,stuff = ip.topolar(frame[0,int(com[0])-50:int(com[0])+50,int(com[1])-50:int(com[1])+50])
#plt.imshow(com_out, alpha=0.25, cmap=plt.get_cmap('Oranges'))
print ef
plt.imshow(patch)
plt.draw()
plt.pause(0.5)
| 28.166667 | 106 | 0.589744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.164694 |
1a68efd44ff26d3fb4f69d27f40c9050e1b02446 | 875 | py | Python | 03/main.py | ajouellette/advent-of-code21 | 910d790ca6ac8c844e277c71d458726ff92a187d | [
"MIT"
] | null | null | null | 03/main.py | ajouellette/advent-of-code21 | 910d790ca6ac8c844e277c71d458726ff92a187d | [
"MIT"
] | null | null | null | 03/main.py | ajouellette/advent-of-code21 | 910d790ca6ac8c844e277c71d458726ff92a187d | [
"MIT"
] | null | null | null | import numpy as np
def bin_to_dec(bin_str):
"""Convert a string of bits to decimal."""
result = 0
for i, bit in enumerate(bin_str[::-1]):
result += int(bit) * 2**i
return result
if __name__ == "__main__":
data = []
with open("input", 'r') as file:
for line in file:
row = []
for char in line.strip():
row.append(int(char))
data.append(row)
data = np.asarray(data)
eps_bits = ''
gam_bits = ''
for i in range(len(data[0])):
if np.sum(data[:,i] == 1) > len(data)/2:
eps_bits += '1'
gam_bits += '0'
else:
eps_bits += '0'
gam_bits += '1'
print(eps_bits, gam_bits)
eps_rate = bin_to_dec(eps_bits)
gam_rate = bin_to_dec(gam_bits)
print(eps_rate, gam_rate)
print(eps_rate * gam_rate)
| 23.026316 | 48 | 0.521143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.089143 |
1a6a468047e8c5ffc11c31806e4527c666198d73 | 5,535 | py | Python | firmware/m5mw.micropython.py | RAWSEQ/M5MouseWheel | 08e89d5e5e1b60eb40aba81a16d015bc48077a89 | [
"MIT"
] | null | null | null | firmware/m5mw.micropython.py | RAWSEQ/M5MouseWheel | 08e89d5e5e1b60eb40aba81a16d015bc48077a89 | [
"MIT"
] | null | null | null | firmware/m5mw.micropython.py | RAWSEQ/M5MouseWheel | 08e89d5e5e1b60eb40aba81a16d015bc48077a89 | [
"MIT"
] | 2 | 2021-05-29T16:19:26.000Z | 2021-09-05T13:24:02.000Z | from m5stack import *
from m5stack_ui import *
from uiflow import *
from ble import ble_uart
import face
screen = M5Screen()
screen.clean_screen()
screen.set_screen_bg_color(0x000000)
mb_click = None
rb_click = None
lb_click = None
snd_val = None
st_mode = None
stval = None
prval = None
faces_encode = face.get(face.ENCODE)
direction = M5Label('M5MouseWheel - Please dont touch for processing...', x=0, y=228, color=0xc7c7c7, font=FONT_MONT_12, parent=None)
LBtn = M5Btn(text='L', x=170, y=6, w=65, h=100, bg_c=0x000000, text_c=0xbcbcbc, font=FONT_UNICODE_24, parent=None)
RBtn = M5Btn(text='R', x=240, y=6, w=70, h=48, bg_c=0x000000, text_c=0xbebebe, font=FONT_UNICODE_24, parent=None)
d_w_x = M5Btn(text='WX', x=0, y=162, w=48, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
MBtn = M5Btn(text='M', x=240, y=58, w=70, h=48, bg_c=0x000000, text_c=0xbebebe, font=FONT_UNICODE_24, parent=None)
d_w_y = M5Btn(text='WY', x=52, y=162, w=48, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
b_step = M5Btn(text='STEP', x=0, y=6, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
d_y = M5Btn(text='Y', x=220, y=110, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_MONT_48, parent=None)
d_scr = M5Btn(text='SCR', x=0, y=110, w=100, h=48, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_UNICODE_24, parent=None)
d_x = M5Btn(text='X', x=110, y=110, w=100, h=100, bg_c=0x000000, text_c=0xd4d4d4, font=FONT_MONT_48, parent=None)
v_step = M5Label('1', x=121, y=38, color=0xc7c7c7, font=FONT_MONT_24, parent=None)
# Change Mode
def changeMode():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
snd_val = 0
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
# Reset Mode
def resetMode():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
st_mode = ''
b_step.set_bg_color(0x000000)
d_y.set_bg_color(0x000000)
d_scr.set_bg_color(0x000000)
d_w_x.set_bg_color(0x000000)
d_w_y.set_bg_color(0x000000)
d_x.set_bg_color(0x000000)
def MBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
mb_click = 0 if mb_click == 1 else 1
uart_ble.write((str('M') + str(str(mb_click))))
if mb_click == 1:
MBtn.set_bg_color(0x666666)
else:
MBtn.set_bg_color(0x000000)
direction.set_text(str((str('M') + str(str(mb_click)))))
pass
MBtn.pressed(MBtn_pressed)
def LBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
lb_click = 0 if lb_click == 1 else 1
uart_ble.write((str('L') + str(str(lb_click))))
if lb_click == 1:
LBtn.set_bg_color(0x666666)
else:
LBtn.set_bg_color(0x000000)
direction.set_text(str((str('L') + str(str(lb_click)))))
pass
LBtn.pressed(LBtn_pressed)
def RBtn_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
rb_click = 0 if rb_click == 1 else 1
uart_ble.write((str('R') + str(str(rb_click))))
if rb_click == 1:
RBtn.set_bg_color(0x666666)
else:
RBtn.set_bg_color(0x000000)
direction.set_text(str((str('R') + str(str(rb_click)))))
pass
RBtn.pressed(RBtn_pressed)
def b_step_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'T':
resetMode()
st_mode = 'T'
b_step.set_bg_color(0x666666)
faces_encode.setLed(0, 0xffffff)
changeMode()
pass
b_step.pressed(b_step_pressed)
def d_scr_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'S':
resetMode()
st_mode = 'S'
d_scr.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff9900)
changeMode()
pass
d_scr.pressed(d_scr_pressed)
def d_x_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'X':
resetMode()
st_mode = 'X'
d_x.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff0000)
changeMode()
pass
d_x.pressed(d_x_pressed)
def d_y_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'Y':
resetMode()
st_mode = 'Y'
d_y.set_bg_color(0x666666)
faces_encode.setLed(0, 0x3333ff)
changeMode()
pass
d_y.pressed(d_y_pressed)
def d_w_x_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'U':
resetMode()
st_mode = 'U'
d_w_x.set_bg_color(0x666666)
faces_encode.setLed(0, 0x33ff33)
changeMode()
pass
d_w_x.pressed(d_w_x_pressed)
def d_w_y_pressed():
global mb_click, lb_click, rb_click, snd_val, st_mode, stval, prval
if st_mode != 'V':
resetMode()
st_mode = 'V'
d_w_y.set_bg_color(0x666666)
faces_encode.setLed(0, 0x00cccc)
changeMode()
pass
d_w_y.pressed(d_w_y_pressed)
resetMode()
uart_ble = ble_uart.init('m5mw_01')
stval = 1
st_mode = 'S'
prval = faces_encode.getValue()
snd_val = 0
d_scr.set_bg_color(0x666666)
faces_encode.setLed(0, 0xff9900)
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
while True:
if (faces_encode.getValue()) != prval:
if st_mode == 'T':
stval = stval + ((faces_encode.getValue()) - prval)
v_step.set_text(str(stval))
else:
snd_val = snd_val + ((faces_encode.getValue()) - prval) * stval
uart_ble.write((str(st_mode) + str(str(snd_val))))
direction.set_text(str((str(st_mode) + str(str(snd_val)))))
prval = faces_encode.getValue()
wait_ms(2)
| 31.271186 | 133 | 0.70551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.033424 |
1a6b813a27738acc87f6c6871852f93cc42f2b00 | 6,250 | py | Python | python/cvi_toolkit/test/test_model.py | sophgo/tpu_compiler | 6299ea0a3adae1e5c206bcb9bedf225d16e636db | [
"Apache-2.0"
] | 3 | 2022-03-14T11:47:20.000Z | 2022-03-16T01:45:37.000Z | python/cvi_toolkit/test/test_model.py | sophgo/tpu_compiler | 6299ea0a3adae1e5c206bcb9bedf225d16e636db | [
"Apache-2.0"
] | null | null | null | python/cvi_toolkit/test/test_model.py | sophgo/tpu_compiler | 6299ea0a3adae1e5c206bcb9bedf225d16e636db | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import shutil
import argparse
import subprocess
import numpy as np
import contextlib
import onnx
from cvi_toolkit.utils.mlir_shell import *
from cvi_toolkit.utils.intermediate_file import IntermediateFile
@contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
class ModelTest(object):
def __init__(self, chip, model_path, batch_size):
self.chip = chip
self.model_path = model_path
self.batch_size = batch_size
self.model_name = os.path.split(model_path)[-1].split(".")[0]
self.fp32_mlir = self.model_name + ".mlir"
self.cvimodel = self.model_name + ".cvimodel"
self.input_path = "./input.npz"
def __make_test_calibration_table__(self, table_name):
blobs_interp_npz = IntermediateFile(self.model_name, 'full_precision_interp.npz', False)
ret = mlir_inference(self.fp32_mlir, self.input_path, None, str(blobs_interp_npz))
if ret != 0:
raise RuntimeError("{} mlir inference failed".format(self.model_path))
tensors = np.load(str(blobs_interp_npz))
with open(table_name, "w") as f:
for name in tensors:
threshold = np.abs(np.max(tensors[name]))
if np.isnan(threshold):
threshold = 10.0
elif threshold >= 127.0:
threshold = 127.0
elif threshold <= 0.001:
threshold = 1.0
else:
pass
f.write("{} {}\n".format(name, threshold))
def run(self, quant_mode, input=None):
if self.model_path.endswith(".onnx"):
onnx_model = onnx.load(self.model_path)
input_nodes = onnx_model.graph.input
self.__gen_onnx_input__(input_nodes)
transform_cmd = [
'model_transform.py', '--model_type', 'onnx', '--model_name', self.model_name, '--model_def', self.model_path,
'--image', self.input_path, '--net_input_dims', '1,100', '--tolerance', '0.99,0.99,0.99', '--mlir',
self.fp32_mlir
]
subprocess.run(transform_cmd)
elif self.model_path.endswith(".mlir"):
tmp_mlir_file = IntermediateFile(self.model_name, 'fp32.mlir.tmp', False)
op_info_csv = IntermediateFile(self.model_name, 'op_info.csv', True)
ret = mlir_pseudo_weight(self.model_path, str(tmp_mlir_file))
ret = mlir_opt(str(tmp_mlir_file), self.fp32_mlir, str(op_info_csv))
if ret != 0:
raise RuntimeError("{} opt failed".format(self.model_path))
if quant_mode in ['bf16', 'mix_bf16']:
deploy_cmd = [
'model_deploy.py', '--model_name', self.model_name, '--mlir', self.fp32_mlir, '--quantize',
quant_mode.upper(), '--chip', self.chip, '--image', self.input_path, '--inputs_type', 'SAME',
'--outputs_type', 'SAME', '--tolerance', '0.99,0.99,0.87', '--correctness', '0.99,0.99,0.95', '--debug',
'--cvimodel', self.cvimodel
]
elif "int8" == quant_mode:
# simple cali and convert to cvimodel
table_file = IntermediateFile(self.model_name, 'calibration_table', True)
self.__make_test_calibration_table__(str(table_file))
deploy_cmd = [
'model_deploy.py', '--model_name', self.model_name, '--mlir', self.fp32_mlir, '--calibration_table',
str(table_file), '--chip', self.chip, '--image', self.input_path, '--inputs_type', 'SAME',
'--outputs_type', 'SAME', '--tolerance', '0.10,0.10,0.1', '--correctness', '0.99,0.99,0.93', '--debug',
'--cvimodel', self.cvimodel
]
else:
raise ValueError("Now just support bf16/int8")
subprocess.run(deploy_cmd)
def __gen_onnx_input__(self, input_nodes):
self.input_data = {}
for input in input_nodes:
input_shape = []
for i, dim in enumerate(input.type.tensor_type.shape.dim):
if i == 0 and dim.dim_value <= 0 and self.batch_size != 0:
input_shape.append(self.batch_size)
else:
input_shape.append(dim.dim_value)
if 1 == input.type.tensor_type.elem_type: # 1 for np.float32
self.input_data[input.name] = np.random.randn(*input_shape).astype(np.float32)
# self.input_data[input.name] = np.random.uniform(1, 6, input_shape).astype(np.float32)
elif 7 == input.type.tensor_type.elem_type: # 7 for np.int64 / torch.long
self.input_data[input.name] = np.random.randint(0, 3, input_shape).astype(np.int64)
elif 9 == input.type.tensor_type.elem_type: # 9 for boolean
self.input_data[input.name] = np.random.randint(0, 2, input_shape).astype(np.float32)
else:
raise ValueError("Not support now, add here")
np.savez("input.npz", **self.input_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="model definition file, mlir or onnx")
parser.add_argument("--quantize", choices=['bf16', 'int8', 'mix_bf16'], default="bf16", help="quant mode")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--chip", type=str, default="cv182x", help="chip type")
parser.add_argument("--out_dir", type=str, default="tmp", help="out folder")
# parser.add_argument("--excepts", default='-', help="excepts")
# parser.add_argument("--graph", action='store_true', help="generate graph to pb file")
args = parser.parse_args()
if os.path.exists(args.out_dir):
shutil.rmtree(args.out_dir)
os.makedirs(args.out_dir)
tmp_model_file = os.path.split(args.model)[-1]
shutil.copy(args.model, os.path.join(args.out_dir, tmp_model_file))
with pushd(args.out_dir):
tool = ModelTest(args.chip, tmp_model_file, args.batch_size)
tool.run(args.quantize)
| 45.955882 | 126 | 0.60368 | 4,770 | 0.7632 | 139 | 0.02224 | 166 | 0.02656 | 0 | 0 | 1,359 | 0.21744 |
1a6d2d07b82c65f1c0ee7a25477d2386875f5077 | 12,181 | py | Python | main.py | FSlowkey/_csmentor_ | 19db2e43a9418df9cd999cdeaa5845b4e9b721c8 | [
"MIT"
] | null | null | null | main.py | FSlowkey/_csmentor_ | 19db2e43a9418df9cd999cdeaa5845b4e9b721c8 | [
"MIT"
] | null | null | null | main.py | FSlowkey/_csmentor_ | 19db2e43a9418df9cd999cdeaa5845b4e9b721c8 | [
"MIT"
] | null | null | null | import os
import webapp2
import data
import datetime
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.api import images
from google.appengine.api import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext import ndb
# email stuff
from google.appengine.api import app_identity
from google.appengine.api import mail
import datetime
# FUNCTION
def render_template(handler, file_name, template_values):
path = os.path.join(os.path.dirname(__file__), 'templates/', file_name)
handler.response.out.write(template.render(path, template_values))
def get_user_email():
user = users.get_current_user()
print(user)
if user:
return user.email()
else:
return None
def get_template_parameters():
values = {}
email = get_user_email()
if email:
values['learner'] = data.is_learner(email)
values['expert'] = data.is_expert(email)
values['logout_url'] = users.create_logout_url('/')
values['upload_url'] = blobstore.create_upload_url('/profile-save')
values['user'] = email
else:
values['login_url'] = users.create_login_url('/welcome')
values['upload_url'] = blobstore.create_upload_url('/profile-save')
return values
class MainHandler(webapp2.RequestHandler):
def get(self):
values = get_template_parameters()
email = get_user_email()
render_template(self, 'mainpage.html', values)
#PROFILE SETTING CODE STARS HERE
class DefineHandler(webapp2.RequestHandler):
def get(self):
values = get_template_parameters()
render_template(self, 'areyouor.html', values)
class SaveDefineHandler(webapp2.RequestHandler):
def post(self):
print('testing')
email = get_user_email()
data.save_email(email)
defineStat = self.request.get('defineStat')
if defineStat == "isLearner":
learnerStat = True
expertStat = False
elif defineStat == "isExpert":
expertStat = True
learnerStat = False
data.define_stat(email,learnerStat,expertStat)
self.response.out.write('hello?')
self.redirect('/edit-profile-student')
#PROFILE SAVING CODE STARTS HERE
class EditProfileHandler(webapp2.RequestHandler):
def get(self):
values = get_template_parameters()
render_template(self, 'edit-profile-student.html', values)
#IMAGE SAVING CODE STARTS HERE
class SaveProfileHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
values = get_template_parameters()
if get_user_email():
upload_files = self.get_uploads()
blob_info = upload_files[0]
type = blob_info.content_type
defineStat = self.request.get('defineStat')
email = get_user_email()
name = self.request.get('name')
biography = self.request.get('biography')
location =self.request.get('cityhidden')
if type in ['image/jpeg', 'image/png', 'image/gif', 'image/webp']:
name= self.request.get('name')
data.save_profile(email, name, biography, location, blob_info.key())
self.redirect('/my-feed')
class ImageHandler(webapp2.RequestHandler):
def get(self):
values = get_template_parameters()
image_id=self.request.get('id')
my_image = ndb.Key(urlsafe=image_id).get()
values['image_id'] = image_id
values['image_url'] = images.get_serving_url(
my_image.image, size=150, crop=True
)
values['image_name'] = my_image.name
values['biography'] = self.request.get('biography')
render_template(self, 'profilefeed.html', values)
class ViewPhotoHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self):
user_id = self.request.get('id')
user_profile = ndb.Key(urlsafe=user_id).get()
blob_key = user_profile.profile_pic
self.send_blob(blob_key)
class ImageManipulationHandler(webapp2.RequestHandler):
def get(self):
image_id = self.request.get("id")
my_image = ndb.Key(urlsafe=image_id).get()
blob_key = my_image.image
img = images.Image(blob_key=blob_key)
print(img)
modified = False
h = self.request.get('height')
w = self.request.get('width')
fit = False
if self.request.get('fit'):
fit = True
if h and w:
img.resize(width=int(w), height=int(h), crop_to_fit=fit)
modified = True
optimize = self.request.get('opt')
if optimize:
img.im_feeling_lucky()
modified = True
flip = self.request.get('flip')
if flip:
img.vertical_flip()
modified = True
mirror = self.request.get('mirror')
if mirror:
img.horizontal_flip()
modified = True
rotate = self.request.get('rotate')
if rotate:
img.rotate(int(rotate))
modified = True
result = img
if modified:
result = img.execute_transforms(output_encoding=images.JPEG)
print("about to render image")
img.im_feeling_lucky()
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.execute_transforms(output_encoding=images.JPEG))
#IMAGE MANIPULATION CODE ENDS HERE
#FEED CONTROLLER STARTS HERE
def InterestsMatch(userExpert):
#This function checks to see that the user and expert have at least one interest in common
current_user_interests = data.get_user_interests(get_user_email())
expert_user_interests = data.get_user_interests(userExpert.email)
i = 0
for interest in current_user_interests:
if current_user_interests[interest] and expert_user_interests[interest]:
return True
return False
class FeedHandler(webapp2.RequestHandler):
def get(self):
p = get_user_email()
if p:
values = get_template_parameters()
profile = data.get_user_profile(p)
neededlocation = profile.location
values['image_url'] = '/profilepic?id=' + profile.key.urlsafe()
expert_profiles = data.get_expert_profiles(neededlocation)
expert_list = []
for expert_profile in expert_profiles:
if InterestsMatch(expert_profile):
expert_profile.keyUrl = expert_profile.key.urlsafe()
expert_list.append(expert_profile)
values['available_experts'] = expert_list
for expert in values['available_experts']:
values['expimg']='/profilepic?id=' + expert.key.urlsafe()
values['events'] = []
events_key_list = data.get_user_profile(get_user_email()).events_list
for events_key in events_key_list:
event = events_key.get()
values['events'].append(event)
values['name'] = profile.name
values['location'] = profile.location
values['biography'] = profile.biography
values['interests']= profile.interests
render_template(self, 'profilefeed.html', values)
else:
self.redirect('/')
#FEED CONTROLLER ENDS HERE
#PROFILE SAVING CODE ENDS HERE
#INTERESTS CODE STARTS HERE
class SaveInterestsHandler(webapp2.RequestHandler):
def post(self):
interests = self.request.get('interests')
values = get_template_parameters()
values['interests'] = data.get_user_interests(get_user_email())
for key in values['interests']:
enabled = self.request.get(key)
print(enabled)
if enabled == key:
values['interests'][key]=True
else:
values['interests'][key]=False
new_interests = values['interests']
data.save_interests(get_user_email(), new_interests)
print(new_interests)
self.redirect('/my-feed')
class EditInterestsHandler(webapp2.RequestHandler):
def get(self):
values = get_template_parameters()
if get_user_email():
if data.get_user_interests(get_user_email()):
values['interests'] = data.get_user_interests(get_user_email())
print(values['interests'])
values['interests']= values['interests'].items()
render_template(self, 'interest.html', values)
else:
interests={
"Java":False,
"Python":False,
"JavaScript":False,
"HTML":False,
"CSS":False,
"C#":False,
"Industry Insight":False,
"Internships and Experience":False,
"AI":False,
"Machine Learning":False,
}
render_template(self, 'interest.html', values)
#INTERESTS CODE ENDS HERE
#VIEWING EXPERT PROFILE CODE STARTS HERE
class ExpertProfileViewHandler(webapp2.RequestHandler):
def get(self, name):
values = get_template_parameters()
profile = data.get_user_profile(data.get_user_email_by_name(name))
print ">>>>Profile:"
print profile
if profile:
values['image_url'] = '/profilepic?id=' + profile.key.urlsafe()
values['profileid'] = profile.key.urlsafe()
values['name'] = profile.name
values['biography'] = profile.biography
values['location'] = profile.location
values['profile_pic'] = profile.profile_pic
values['interests'] = data.get_user_interests(get_user_email())
values['interests'] = values['interests'].items()
values['email'] = get_user_email()
values['events'] = []
events_key_list = profile.events_list
for events_key in events_key_list:
event = events_key.get()
values['events'].append(event)
render_template(self, 'expert-from-student.html', values)
class SendMailHandler(webapp2.RequestHandler):
def post(self):
values = get_template_parameters()
subject = "Hi! you have a new message from Hyperlink: " + self.request.get('subject')
body = get_user_email() + " sent you: " + self.request.get('body')
profile_id = self.request.get('profileid')
profile = data.get_profile_by_id(profile_id)
sender_address = 'NoReply@cssi-chat-2.appspotmail.com'
mail.send_mail(sender_address, profile.email, subject, body)
render_template(self, 'profilefeed.html', values)
class SaveEventHandler(webapp2.RequestHandler):
def post(self):
print("hello")
email = get_user_email()
name = self.request.get('name')
description = self.request.get('description')
cap= self.request.get('cap')
date = datetime.datetime.strptime(self.request.get('date'), "%Y-%m-%d")
data.save_event(email, name, date, description,cap)
self.redirect('/my-feed')
class SetUserHandler(webapp2.RequestHandler):
def get(self):
get_template_parameters()
email = get_user_email()
setvallea = data.is_learner(email)
setvalexp = data.is_expert(email)
if setvallea or setvalexp:
print('EMAIL REC.')
self.redirect('/my-feed')
else:
print('EMAIL UNREC.')
self.redirect('/set-profile')
app = webapp2.WSGIApplication([
('/welcome', SetUserHandler),
('/set-profile', DefineHandler),
('/definition', SaveDefineHandler),
('/edit-profile-student', EditProfileHandler),
('/profile-save', SaveProfileHandler),
('/image', ImageHandler),
('/my-feed', FeedHandler),
('/interests', EditInterestsHandler),
('/interests-save', SaveInterestsHandler),
('/p/(.*)', ExpertProfileViewHandler),
('/send-mail', SendMailHandler),
('/img', ImageManipulationHandler),
('/create_event', SaveEventHandler),
('/profilepic', ViewPhotoHandler),
('/.*', MainHandler)
])
| 33.372603 | 93 | 0.628848 | 9,411 | 0.772597 | 0 | 0 | 0 | 0 | 0 | 0 | 1,983 | 0.162795 |
1a6e2ebd0bdf9701fe0279dfebacee08eeaaec3c | 500 | py | Python | tests/class/alias02.py | ktok07b6/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 83 | 2015-11-30T09:59:13.000Z | 2021-08-03T09:12:28.000Z | tests/class/alias02.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 4 | 2017-02-10T01:43:11.000Z | 2020-07-14T03:52:25.000Z | tests/class/alias02.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 11 | 2016-11-18T14:39:15.000Z | 2021-02-23T10:05:20.000Z | from polyphony import testbench
class C:
def __init__(self, x):
self.x = x
class D:
def __init__(self, c):
self.c = c
def alias02(x):
c0 = C(x)
c1 = C(x*x)
d = D(c0)
result0 = d.c.x == x
d.c = c1
result1 = d.c.x == x*x
c1.x = 0
result2 = d.c.x == 0
d.c = c0
result3 = d.c.x == x
return result0 and result1 and result2 and result3
@testbench
def test():
assert alias02(1)
assert alias02(2)
assert alias02(3)
test()
| 15.625 | 54 | 0.54 | 108 | 0.216 | 0 | 0 | 88 | 0.176 | 0 | 0 | 0 | 0 |
1a6f01cf5ba5e8856303cd40d4810d936d1cb8f1 | 7,775 | py | Python | text_scrambler/text_scrambler.py | GuillaumeLNB/text-scrambler | 3c46b0cb5c516bdb7c93d0ac3e94595870a432b3 | [
"MIT"
] | null | null | null | text_scrambler/text_scrambler.py | GuillaumeLNB/text-scrambler | 3c46b0cb5c516bdb7c93d0ac3e94595870a432b3 | [
"MIT"
] | null | null | null | text_scrambler/text_scrambler.py | GuillaumeLNB/text-scrambler | 3c46b0cb5c516bdb7c93d0ac3e94595870a432b3 | [
"MIT"
] | null | null | null | import inspect
import os
import sys
from random import choice
from typing import List
__author__ = "GLNB"
__copyright__ = "GLNB"
__license__ = "mit"
try:
from .dictionaries import invisible_chars, dict_latin
except ImportError:
from dictionaries import invisible_chars, dict_latin
__location__ = os.path.join(
os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))
)
class Scrambler:
# This is done by parsing the Unicode list of confusable characters.
"""
.. code:: python
>>> from text_scrambler import Scrambler
>>> scr = Scrambler()
>>> text = "This is an example"
>>> text_1 = scr.scramble(text, level=1)
>>> #############
>>> # adding only zwj/zwnj characters
>>> print(text, text_1, sep="\\n")
This is an example
This is an example
>>> assert text != text_1
>>> print(len(text), len(text_1))
18 35
>>> # though the texts look similar, the second one has more characters
>>> #############
>>> text_2 = scr.scramble(text, level=2)
>>> # replacing some latin letters by their cyrillic/greek equivalent
>>> print(text_2)
Тhiѕ iѕ an ехаmple
>>> for char, char_2 in zip(text, text_2):
... if char != char_2:
... print(char, char_2)
...
T Т
s ѕ
s ѕ
e е
x х
a а
>>> #############
>>> text_3 = scr.scramble(text, level=3)
>>> # adding zwj/zwnj characters and replacing latin letters
>>> print(text_3)
Thіs iѕ аn eхаmple
>>> print(text, text_3, sep="\\n")
This is an example
Thіs iѕ аn eхаmple
>>> assert text_3 != text
>>> #############
>>> text_4 = scr.scramble(text, level=4)
>>> # replacing all characters by any unicode looking like character
>>> print(text_4)
⊤𝒽𝐢𝘴 𝘪𝙨 𝞪ռ 𝙚⨯𝚊mρ𝟙ҽ
>>> #
>>> # generating several versions
>>> versions = scr.generate(text, 10, level=4)
>>> for txt in versions:
... print(txt)
...
𝕋𝗵𝕚𝔰 𝙞ѕ ɑ𝗇 ꬲ𝗑𝒂m𝛠Ⲓ𝚎
𝔗һ𑣃ƽ ˛ꜱ 𝛼𝐧 𝐞𝖝𝛼m𝜌𝟏ℯ
Th𝓲𝔰 ⅈ𝔰 αn ꬲ⤬αm⍴𞸀e
𝗧𝗵i𝑠 i𝖘 ⍺𝘯 𝗲𝔁аm𝘱𝙸𝔢
⊤𝚑𝑖s ɪ𝚜 𝜶𝑛 𝖾𝘅𝒶m𝛒𝑙𝓮
𝘛h𝙞ꮪ ⅈ𝗌 𝗮𝐧 ꬲᕽ𝓪m𝜌⏽𝓮
𝙏𝕙і𝓈 ıꜱ 𝔞𝕟 𝗲𝕩𝛂mр𐌉𝚎
𝕿Ꮒℹ𝐬 𝗶𝗌 𝛼𝔫 𝗲𝐱𝓪m𝞎𝙡𝖊
⟙h𝜾ꮪ i𝘴 𝝰𝒏 𝙚ᕽ𝗮m𝗽𝗜𝗲
𝖳հ𝒊s 𝕚𝙨 𝖆𝑛 𝘦𝔁аm𝜌𝐈𝗲
>>> versions = scr.generate(text, 1000, level=1)
>>> assert len(versions) == len(set(versions))
>>> # all unique
"""
def __init__(
self,
confusables_file=os.path.join(
__location__, "txt_files", "confusablesSummary.txt"
),
):
# The confusables can be found at:
# https://www.unicode.org/Public/security/13.0.0/confusables.txt
self.confusables_file = confusables_file
self.invisible_chars = invisible_chars
self.dict_latin = dict_latin
self._parse_unicode_file()
def __str__(self):
return self.scramble("<__main__.Scrambler object>", level=4)
__repr__ = __str__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type:
print(f"exc_type: {exc_type}", file=sys.stderr)
print(f"exc_value: {exc_value}", file=sys.stderr)
print(f"exc_traceback: {exc_traceback}", file=sys.stderr)
def _parse_unicode_file(self) -> dict:
"""return a dict of the unicode confusable given
the self.confusables_file"""
self.unicode_dict = {}
file = open(self.confusables_file, encoding="utf-8")
ls_lines_confusable = []
for _ in range(32):
file.readline()
for line in file:
if line.startswith("#"):
ls_lines_confusable.append(line[:-1]) # not taking the \n
file.close()
ls_lines_confusable = ls_lines_confusable[
:-1
] # not taking the last line (total)
for line in ls_lines_confusable:
_, char, *ls_chars = line.split("\t")
if len(char) > 1:
continue
self.unicode_dict[char] = ls_chars
def scramble(self, text: str, level: int = 1) -> str:
"""return the text scrambled
:param text: the text to scramble
:type text: str
:param level: default to 1
:type level: int, optional
**level**:
1: insert non printable characters within the text
2: replace some latin letters to their Greek or Cyrillic equivalent
3: insert non printable characters and change the some latin letters to their Greek or Cyrillic equivalent
4: insert non printable chraracters change all possible letter to a randomly picked unicode letter equivalent
:return: the scrambled string
:rtype: str
"""
if level not in range(1, 5):
raise ValueError(f"level {level} not implemented")
new_text = ""
if level == 1:
for char in text:
new_text += char + choice(self.invisible_chars)
elif level == 2:
for char in text:
new_text += choice(self.dict_latin.get(char, []) + [char])
new_text += " "
elif level == 3:
for char in text:
new_text += choice(self.dict_latin.get(char, []) + [char]) + choice(
self.invisible_chars
)
elif level == 4:
for char in text:
new_text += choice(self.unicode_dict.get(char, []) + [char]) + choice(
self.invisible_chars
)
else:
raise ValueError(f"level '{level}' not implemented")
return new_text[:-1]
def generate(self, text: str, n: int = 1000, level: int = 3) -> List[str]:
"""return a list containing n versions of the text jammed
:param text: the text to be scrambled
:type text: str
:param n: the number of time the text should be scrambled, defaults to 1000
:type n: int, optional
:param level: the level of the scrambling, defaults to 3
:type level: int, optional
:return: a list of scrambled texts, all differents
:rtype: List[str]
.. code:: python
>>> from text_scrambler import Scrambler
>>> scr = Scrambler()
>>> text = "A cranial nerve nucleus is a collection of neurons in the brain stem that is associated with one or more of the cranial nerves."
>>> texts = scr.generate(text, 1000, level=1)
>>> assert texts[0] != text
>>> for scrambled_text in texts:
... assert text != scrambled_text
...
>>> print(texts[0])
A cranial nerve nucleus is a collection of neurons in the brain stem that is associated with one or more of the cranial nerves.
>>> # different from the original text
"""
ls_new_text = []
num_generated = 0
while True:
new_text = self.scramble(text, level=level)
if new_text not in ls_new_text:
ls_new_text.append(new_text)
num_generated += 1
if num_generated == n:
break
return ls_new_text
| 33.951965 | 265 | 0.532862 | 8,512 | 0.95501 | 0 | 0 | 0 | 0 | 0 | 0 | 5,911 | 0.663189 |
1a7483bf107ea0fb77bb68f2d2dcf10700bcb562 | 443 | py | Python | 12_find the output/03_In Python/01_GeeksForGeeks/02_Set two/problem_2.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
] | 1 | 2021-11-16T14:14:38.000Z | 2021-11-16T14:14:38.000Z | 12_find the output/03_In Python/01_GeeksForGeeks/02_Set two/problem_2.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
] | null | null | null | 12_find the output/03_In Python/01_GeeksForGeeks/02_Set two/problem_2.py | Magdyedwar1996/python-level-one-codes | 066086672f43488bc8b32c620b5e2f94cedfe3da | [
"MIT"
] | null | null | null | for i in range(2):
print(i) # print 0 then 1
for i in range(4,6):
print (i) # print 4 then 5
"""
Explanation:
If only single argument is passed to the range method,
Python considers this argument as the end of the range and the default start value of range is 0.
So, it will print all the numbers starting from 0 and before the supplied argument.
For the second for loop the starting value is explicitly supplied as 4 and ending is 5.
""" | 36.916667 | 98 | 0.742664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 379 | 0.85553 |
1a74c0efdf419694e46cd14eaaab1955587849bb | 1,709 | py | Python | bin/terminology.py | cedzz/python-patterns | 7bd887d83f1081f5f00f199988736e452c2fea74 | [
"MIT"
] | 631 | 2018-01-31T14:55:59.000Z | 2022-03-29T20:19:36.000Z | bin/terminology.py | cedzz/python-patterns | 7bd887d83f1081f5f00f199988736e452c2fea74 | [
"MIT"
] | 11 | 2018-02-06T10:46:57.000Z | 2022-03-18T11:31:31.000Z | bin/terminology.py | cedzz/python-patterns | 7bd887d83f1081f5f00f199988736e452c2fea74 | [
"MIT"
] | 74 | 2018-02-04T08:55:37.000Z | 2022-03-30T20:30:50.000Z | #!/usr/bin/env python3
"""Count the frequency of various phrases, given the path to the Python PEPs.
In Python PEPs, the opposite of “subclass” is almost always “base class” — just remember that the builtin is named super(), not base()! Stats:
216 base class
0 child class
10 derived class
12 parent class
372 subclass
10 super class
44 superclass
"""
import argparse
import os
import re
import sys
TERMS = (
'superclass',
'super class',
'subclass',
'base class',
'derived class',
'parent class',
'child class',
)
def main(argv):
parser = argparse.ArgumentParser(description='PEP terminology counts')
parser.add_argument('pepsdir', help='path to PEPs repo')
try:
args = parser.parse_args(argv)
except SystemExit:
print('\nTo checkout the PEPs from version control, git clone:'
'\nhttps://github.com/python/peps.git', file=sys.stderr)
raise
peps = []
for dirpath, dirnames, filenames in os.walk(args.pepsdir):
for filename in filenames:
if filename.endswith(('.rst', '.txt')):
peps.append(os.path.join(dirpath, filename))
counts = {term: 0 for term in TERMS}
for pep in peps:
with open(pep) as f:
content = f.read()
text = ' '.join(re.findall('\w+', content.lower()))
#text = ' '.join(content.lower().replace('.'), ' ').split())
for term in TERMS:
n = text.count(' ' + term + ' ')
m = text.count(' ' + term + 'es ')
counts[term] += n + m
for term in sorted(TERMS):
print('{:5} {}'.format(counts[term], term))
if __name__ == '__main__':
main(sys.argv[1:])
| 26.703125 | 142 | 0.59684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 726 | 0.422339 |
1a7607520d4b5dd399a1eb2353fdc3d3eed8595e | 2,296 | py | Python | sources/tests/test_regionsweep.py | tipech/OverlapGraph | 0aa132802f2e174608ce33c6bfc24ff14551bf4a | [
"MIT"
] | null | null | null | sources/tests/test_regionsweep.py | tipech/OverlapGraph | 0aa132802f2e174608ce33c6bfc24ff14551bf4a | [
"MIT"
] | 1 | 2018-10-07T08:06:01.000Z | 2018-10-07T08:06:01.000Z | sources/tests/test_regionsweep.py | tipech/OverlapGraph | 0aa132802f2e174608ce33c6bfc24ff14551bf4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Unit tests for Generalized One-Pass Sweep-line Algorithm
- test_regionsweep_simple
- test_regionsweep_random
"""
from typing import List
from unittest import TestCase
from sources.algorithms import \
RegionSweep, RegionSweepDebug, RegionSweepOverlaps
from sources.core import \
Region, RegionPair, RegionSet
class TestRegionSweep(TestCase):
def _evaluate_regionsweep(self, regions: RegionSet, i: int) -> List[RegionPair]:
subscribers = [] #[RegionSweepDebug()]
return RegionSweepOverlaps.prepare(regions, *subscribers)(i)
def test_regionsweep_simple(self):
regionset = RegionSet(dimension=2)
regionset.add(Region([0, 0], [3, 5]))
regionset.add(Region([3, 1], [5, 5]))
regionset.add(Region([2, 4], [6, 6]))
for i in range(regionset.dimension):
expect = regionset.overlaps(i)
actual = self._evaluate_regionsweep(regionset, i)
#for pair in expect: print(f'Expect {i}:\t{pair[0]}\n\t{pair[1]}')
#for pair in actual: print(f'Actual {i}:\t{pair[0]}\n\t{pair[1]}')
for pair in expect:
#passed = "Passed" if pair in actual else "Failed"
#print(f'{passed} {i}: {pair[0]} {pair[1]}')
self.assertTrue(pair in actual)
self.assertEqual(len(expect), len(actual))
def test_regionsweep_random(self):
regionset = RegionSet.from_random(30, Region([0]*3, [100]*3), sizepc=Region([0]*3, [0.5]*3), precision=0)
actuals = []
#for region in regionset: print(f'{region}')
for i in range(regionset.dimension):
#print(f'Dimension: {i}')
expect = regionset.overlaps(i)
actual = self._evaluate_regionsweep(regionset, i)
#for pair in expect: print(f'Expect {i}: {pair[0].id} {pair[1].id}')
#for pair in actual: print(f'Actual {i}: {pair[0].id} {pair[1].id}')
for pair in expect:
#passed = "Passed" if pair in actual else "Failed"
#print(f'{passed} {i}: {pair[0].id} {pair[1].id}')
self.assertTrue(pair in actual)
self.assertEqual(len(expect), len(actual))
actuals.append(actual)
self.assertTrue(all([len(actual) for actual in actuals]))
for pair in actuals[0]:
for d in range(1, regionset.dimension):
self.assertTrue(pair in actuals[d] or (pair[1], pair[0]) in actuals[d])
| 36.444444 | 109 | 0.655488 | 1,945 | 0.847125 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.300523 |
1a770b7b2184cec2d0be10a14b82d524840dca04 | 1,566 | py | Python | rotkehlchen/externalapis/bisq_market.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 137 | 2018-03-05T11:53:29.000Z | 2019-11-03T16:38:42.000Z | rotkehlchen/externalapis/bisq_market.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 385 | 2018-03-08T12:43:41.000Z | 2019-11-10T09:15:36.000Z | rotkehlchen/externalapis/bisq_market.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 59 | 2018-03-08T10:08:27.000Z | 2019-10-26T11:30:44.000Z | import json
import requests
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.history.deserialization import deserialize_price
from rotkehlchen.types import Price
PRICE_API_URL = 'https://bisq.markets/api/ticker?market={symbol}_BTC'
def get_bisq_market_price(asset: Asset) -> Price:
"""
Get price for pair at bisq marketplace. Price is returned against BTC.
Can raise:
- RemoteError: If the market doesn't exists or request fails
- DeserializationError: If the data returned is not a valid price
"""
url = PRICE_API_URL.format(symbol=asset.symbol)
try:
response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'bisq.markets request {url} failed due to {str(e)}') from e
try:
data = response.json()
except json.decoder.JSONDecodeError as e:
raise RemoteError(
f'Failed to read json response from bisq.markets. {response.text}. {str(e)}',
) from e
if 'error' in data:
raise RemoteError(f'Request data from bisq.markets {url} is not valid {data["error"]}')
try:
price = data['last']
except KeyError as e:
raise DeserializationError(
f'Response from bisq.markets didnt contain expected key "last". {data}',
) from e
return deserialize_price(price)
| 35.590909 | 95 | 0.713921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.363346 |
1a7735124d5e69d466b80a312a23be896f940f79 | 505 | py | Python | game.py | pricob/Strategy-Game | df9011b87b6521d1bb156e512eeb120e0b09962e | [
"MIT"
] | null | null | null | game.py | pricob/Strategy-Game | df9011b87b6521d1bb156e512eeb120e0b09962e | [
"MIT"
] | null | null | null | game.py | pricob/Strategy-Game | df9011b87b6521d1bb156e512eeb120e0b09962e | [
"MIT"
] | null | null | null | def game_main():
### IMPORTS ###
import colorama
from colorama import Fore
from engine import engineScript
from engine import clearScript
from os import environ
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import pygame
### ENGINE INITIALIZATION ###
settings = ["width", "height"]
engineScript.InitEngine(Fore, settings)
pygame.init()
### PROGRAM TERMINATED ###
clearScript.run()
if __name__ == "__main__":
game_main() | 24.047619 | 48 | 0.625743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.255446 |
1a77e8d22e2de7b69e6a81de466c1158f6d73dc8 | 1,888 | py | Python | catalyst/contrib/scripts/tests/test_tag2label.py | ferrine/catalyst | b5bc4fb5f692e1fde2d95ef4a534296dccd0f717 | [
"MIT"
] | null | null | null | catalyst/contrib/scripts/tests/test_tag2label.py | ferrine/catalyst | b5bc4fb5f692e1fde2d95ef4a534296dccd0f717 | [
"MIT"
] | null | null | null | catalyst/contrib/scripts/tests/test_tag2label.py | ferrine/catalyst | b5bc4fb5f692e1fde2d95ef4a534296dccd0f717 | [
"MIT"
] | null | null | null | import shutil
from pathlib import Path
from ..tag2label import prepare_df_from_dirs
def prepare_dataset():
shutil.rmtree('datasets', ignore_errors=True)
# dummy datasets e.g. root1 and root2
root1 = Path('datasets/root1')
root1.mkdir(parents=True, exist_ok=True)
root2 = Path('datasets/root2')
root2.mkdir(parents=True, exist_ok=True)
# dummy labels folders for root1
root1_act1 = root1 / 'act1'
root1_act2 = root1 / 'act2'
root1_act1.mkdir()
root1_act2.mkdir()
# dummy labels folders for root2
root2_act1 = root2 / 'act1'
root2_act2 = root2 / 'act2'
root2_act1.mkdir()
root2_act2.mkdir()
# dummy files for root1
a = root1_act1 / 'a.txt'
b = root1_act1 / 'b.txt'
c = root1_act2 / 'c.txt'
# dummy files for root2
d = root2_act1 / 'd.txt'
e = root2_act2 / 'e.txt'
f = root2_act2 / 'f.txt'
for file in [a, b, c, d, e, f]:
file.touch()
def test_prepare_df_from_dirs_one():
def check_filepath(f):
return f.startswith('act1') or f.startswith('act2')
prepare_dataset()
df = prepare_df_from_dirs('datasets/root1', 'label')
assert df.shape[0] == 3
assert df.filepath.apply(check_filepath).sum().all()
assert df.label.isin(['act1', 'act2']).all()
shutil.rmtree('datasets', ignore_errors=True)
def test_prepare_df_from_dirs_multi():
def check_filepath(f):
return f.startswith('root1/act1') or \
f.startswith('root1/act2') or \
f.startswith('root2/act1') or \
f.startswith('root2/act2')
prepare_dataset()
df = prepare_df_from_dirs(
'datasets/root1,datasets/root2',
'label')
assert df.shape[0] == 6
assert df.filepath.apply(check_filepath).sum().all()
assert df.label.isin(['act1', 'act2']).all()
shutil.rmtree('datasets', ignore_errors=True)
| 26.591549 | 59 | 0.635593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.222458 |
1a78717f3ade0f1b49b87652920497f50424fe03 | 387 | py | Python | src/elementary_flask/components/general/favicon.py | xaled/flaskly | 2ed66d89e42afba830d6c73c9f70f00d1dcac573 | [
"MIT"
] | null | null | null | src/elementary_flask/components/general/favicon.py | xaled/flaskly | 2ed66d89e42afba830d6c73c9f70f00d1dcac573 | [
"MIT"
] | null | null | null | src/elementary_flask/components/general/favicon.py | xaled/flaskly | 2ed66d89e42afba830d6c73c9f70f00d1dcac573 | [
"MIT"
] | null | null | null | __all__ = ['FavIcon']
from dataclasses import dataclass, field
from html import escape as html_escape
@dataclass
class FavIcon:
href: str
rel: str = "icon"
mimetype: str = "image/x-icon"
rendered: str = field(init=False, repr=False)
def __post_init__(self):
self.rendered = f'<link rel="{self.rel}" type="{self.mimetype}" href="{html_escape(self.href)}">'
| 25.8 | 105 | 0.674419 | 271 | 0.700258 | 0 | 0 | 282 | 0.728682 | 0 | 0 | 110 | 0.284238 |
1a79df4bedf9addcf7ba1bf876755a8c99a23c61 | 1,102 | py | Python | assets/urls.py | ChanTerelLy/broker-account-analist | a723c83fe9a924905eb0754b4acb1231b31f9c87 | [
"MIT"
] | null | null | null | assets/urls.py | ChanTerelLy/broker-account-analist | a723c83fe9a924905eb0754b4acb1231b31f9c87 | [
"MIT"
] | 11 | 2021-02-21T19:39:41.000Z | 2021-06-13T16:29:47.000Z | assets/urls.py | ChanTerelLy/broker-account-analist | a723c83fe9a924905eb0754b4acb1231b31f9c87 | [
"MIT"
] | 2 | 2021-11-16T16:31:37.000Z | 2022-02-11T02:55:37.000Z | from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.urls import path
from django.views.generic import RedirectView
from .views import *
urlpatterns = [
path('', login_required(assets), name='home'),
path('assets/', login_required(assets), name='assets'),
# Data Model views
path('moex-portfolio/', login_required(MoexPortfolioView.as_view()), name='moex-portfolio'),
path('transfers/', login_required(TransfersView.as_view()), name='transfers'),
path('deals/', login_required(DealsView.as_view()), name='deals'),
path('portfolio/', login_required(ReportPortfolioView.as_view()), name='portfolio'),
path('coupons-dividends/', login_required(CouponsDividendsView.as_view()), name='coupons-dividends'),
# Moex operations
path('update-bounds/', login_required(update_bounds), name='update-bounds'),
path('corp-bounds/', login_required(CorpBounView.as_view()), name='corp-bounds'),
# other
path('google-callback/', google_callback, name='google-callback'),
path('sentry-debug/', trigger_error),
] | 44.08 | 105 | 0.725045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.279492 |
1a7a75323af52a5c4b5c6794853199c02e971141 | 616 | py | Python | chapter4/chapter4_pydantic_types_01.py | GoodMonsters/Building-Data-Science-Applications-with-FastAPI | d2218d225c5b93723ecf46c19619ed5d3f2473e6 | [
"MIT"
] | 107 | 2021-03-26T20:18:51.000Z | 2022-03-26T03:38:08.000Z | chapter4/chapter4_pydantic_types_01.py | GoodMonsters/Building-Data-Science-Applications-with-FastAPI | d2218d225c5b93723ecf46c19619ed5d3f2473e6 | [
"MIT"
] | 4 | 2021-06-09T08:48:21.000Z | 2021-12-27T09:04:43.000Z | chapter4/chapter4_pydantic_types_01.py | GoodMonsters/Building-Data-Science-Applications-with-FastAPI | d2218d225c5b93723ecf46c19619ed5d3f2473e6 | [
"MIT"
] | 58 | 2021-03-12T20:51:19.000Z | 2022-03-27T15:49:49.000Z | from pydantic import BaseModel, EmailStr, HttpUrl, ValidationError
class User(BaseModel):
email: EmailStr
website: HttpUrl
# Invalid email
try:
User(email="jdoe", website="https://www.example.com")
except ValidationError as e:
print(str(e))
# Invalid URL
try:
User(email="jdoe@example.com", website="jdoe")
except ValidationError as e:
print(str(e))
# Valid
user = User(email="jdoe@example.com", website="https://www.example.com")
# email='jdoe@example.com' website=HttpUrl('https://www.example.com', scheme='https', host='www.example.com', tld='com', host_type='domain')
print(user)
| 22.814815 | 140 | 0.704545 | 63 | 0.102273 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.443182 |
1a7c20d2c7403ba77a9361f7048f0edc3fee0e49 | 226 | py | Python | webware/Tests/TestSessions/Transaction.py | PeaceWorksTechnologySolutions/w4py3 | 7f9e7088034e3e3ac53158edfa4f377b5b2f45fe | [
"MIT"
] | 11 | 2020-10-18T07:33:56.000Z | 2021-09-27T21:03:38.000Z | webware/Tests/TestSessions/Transaction.py | PeaceWorksTechnologySolutions/w4py3 | 7f9e7088034e3e3ac53158edfa4f377b5b2f45fe | [
"MIT"
] | 9 | 2020-01-03T18:58:25.000Z | 2020-01-09T18:36:23.000Z | webware/Tests/TestSessions/Transaction.py | PeaceWorksTechnologySolutions/w4py3 | 7f9e7088034e3e3ac53158edfa4f377b5b2f45fe | [
"MIT"
] | 4 | 2020-06-30T09:41:56.000Z | 2021-02-20T13:48:08.000Z | """"Mock Webware Transaction class."""
from .Application import Application
class Transaction:
def __init__(self):
self._application = Application()
def application(self):
return self._application
| 17.384615 | 41 | 0.69469 | 146 | 0.646018 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.168142 |
1a7dedfc90a55341231451d1bcca08ff107c5213 | 2,517 | py | Python | src/KENN2/layers/Kenn.py | DanieleAlessandro/KENN2 | 949620ca1734e85239115954b88370393d97a42e | [
"BSD-3-Clause"
] | 6 | 2020-12-23T11:56:14.000Z | 2022-02-10T10:28:09.000Z | src/KENN2/layers/Kenn.py | DanieleAlessandro/KENN2 | 949620ca1734e85239115954b88370393d97a42e | [
"BSD-3-Clause"
] | null | null | null | src/KENN2/layers/Kenn.py | DanieleAlessandro/KENN2 | 949620ca1734e85239115954b88370393d97a42e | [
"BSD-3-Clause"
] | 1 | 2021-03-20T16:18:34.000Z | 2021-03-20T16:18:34.000Z | import tensorflow as tf
from KENN2.layers.residual.KnowledgeEnhancer import KnowledgeEnhancer
class Kenn(tf.keras.layers.Layer):
def __init__(self, predicates, clauses, activation=lambda x: x, initial_clause_weight=0.5, save_training_data=False, **kwargs):
"""Initialize the knowledge base.
:param predicates: a list of predicates names
:param clauses: a list of constraints. Each constraint is a string on the form:
clause_weight:clause
The clause_weight should be either a real number (in such a case this value is fixed) or an underscore
(in this case the weight will be a tensorflow variable and learned during training).
The clause must be represented as a list of literals separated by commas (that represent disjunctions).
Negation must specified by adding the letter 'n' before the predicate name.
An example:
_:nDog,Animal
"""
super(Kenn, self).__init__(**kwargs)
self.predicates = predicates
self.clauses = clauses
self.activation = activation
self.initial_clause_weight = initial_clause_weight
self.save_training_data = save_training_data
self.knowledge_enhancer = None
def build(self, input_shape):
"""Build the layer
:param input_shape: the input shape
"""
self.knowledge_enhancer = KnowledgeEnhancer(
self.predicates, self.clauses, self.initial_clause_weight, self.save_training_data)
super(Kenn, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Improve the satisfaction level of a set of clauses.
:param inputs: the tensor containing predicates' pre-activation values for many entities
:return: final preactivations"""
if self.save_training_data:
deltas, deltas_list = self.knowledge_enhancer(inputs)
return self.activation(inputs + deltas), deltas_list
else:
deltas = self.knowledge_enhancer(inputs)
return self.activation(inputs + deltas)
def get_config(self):
config = super(Kenn, self).get_config()
config.update({'predicates': self.predicates})
config.update({'clauses': self.clauses})
config.update({'activation': self.activation})
config.update({'initial_clause_weight': self.initial_clause_weight})
# config['output_size'] = # say self. _output_size if you store the argument in __init__
return config
| 38.136364 | 131 | 0.680572 | 2,420 | 0.961462 | 0 | 0 | 0 | 0 | 0 | 0 | 1,080 | 0.429082 |
1a7fc96b729905953b1c7215ffb1a13a615d4713 | 518 | py | Python | babysteps/6.combine_strings.py | mvoltz/realpython | 622d700721d8475b1e81964d14c781e7936d120f | [
"BSD-2-Clause"
] | null | null | null | babysteps/6.combine_strings.py | mvoltz/realpython | 622d700721d8475b1e81964d14c781e7936d120f | [
"BSD-2-Clause"
] | null | null | null | babysteps/6.combine_strings.py | mvoltz/realpython | 622d700721d8475b1e81964d14c781e7936d120f | [
"BSD-2-Clause"
] | null | null | null | # called concatenation sometimes..
str1 = 'abra, '
str2 = 'cadabra. '
str3 = 'i wanna reach out and grab ya.'
combo = str1 + str1 + str2 + str3
# you probably don't remember the song.
print(combo)
# you can also do it this way
print('I heat up', '\n', "I can't cool down", '\n', 'my life is spinning', '\n', 'round and round')
# notice the change in single and double quotes. hopefully the change makes sense.
print('not sure why the space for lines 2,3,4 above.', '\n', "i guess there's more to learn... :)")
| 25.9 | 99 | 0.660232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.777992 |
1a806f8b240d5ea2934fa21a668326f3e4f866cb | 14,756 | py | Python | lake/modules/strg_RAM.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | null | null | null | lake/modules/strg_RAM.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | null | null | null | lake/modules/strg_RAM.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | null | null | null | from lake.top.memory_interface import MemoryPort, MemoryPortType
from lake.top.memory_controller import MemoryController
from kratos import *
from lake.attributes.config_reg_attr import ConfigRegAttr
from lake.passes.passes import lift_config_reg
from lake.modules.reg_fifo import RegFIFO
import kratos as kts
class StrgRAM(MemoryController):
'''
Storage RAM
Does ROM/RAM from storage
'''
def __init__(self,
data_width=16,
banks=1,
memory_width=64,
memory_depth=512,
num_tiles=1,
rw_same_cycle=False, # Same as separate addresses
read_delay=1,
addr_width=16,
prioritize_write=True):
super().__init__("strg_ram", debug=True)
# Generation parameters
self.banks = banks
self.data_width = data_width
self.memory_width = memory_width
self.memory_depth = memory_depth
self.num_tiles = num_tiles
self.rw_same_cycle = rw_same_cycle
self.read_delay = read_delay
self.addr_width = addr_width
self.fw_int = int(self.memory_width / self.data_width)
self.prioritize_write = prioritize_write
self.bank_width = clog2(self.banks)
self.word_width = max(1, clog2(self.fw_int))
self.mem_addr_width = clog2(self.num_tiles * self.memory_depth)
self.b_a_off = clog2(self.fw_int) + clog2(self.num_tiles * self.memory_depth)
# assert banks > 1 or rw_same_cycle is True or self.fw_int > 1, \
# "Can't sustain throughput with this setup. Need potential bandwidth for " + \
# "1 write and 1 read in a cycle - try using more banks or a macro that supports 1R1W"
# Clock and Reset
self._clk = self.clock("clk")
self._rst_n = self.reset("rst_n")
# Inputs + Outputs
self._wen = self.input("wen", 1)
self._ren = self.input("ren", 1)
self._data_in = self.input("data_in", self.data_width)
self._wr_addr_in = self.input("wr_addr_in", self.addr_width)
self._rd_addr_in = self.input("rd_addr_in", self.addr_width)
self._wr_addr = self.var("wr_addr", self.addr_width)
self._rd_addr = self.var("rd_addr", self.addr_width)
# Separate addressing...
if self.rw_same_cycle:
self.wire(self._wr_addr, self._wr_addr_in)
self.wire(self._rd_addr, self._rd_addr_in)
# Use the wr addr for both in this case...
else:
self.wire(self._wr_addr, self._wr_addr_in)
self.wire(self._rd_addr, self._wr_addr_in)
self._data_out = self.output("data_out", self.data_width)
self._valid_out = self.output("valid_out", 1)
# get relevant signals from the storage banks
self._data_from_strg = self.input("data_from_strg", self.data_width,
size=(self.banks,
self.fw_int),
explicit_array=True,
packed=True)
self._wen_addr = self.var("wen_addr", self.addr_width,
size=self.banks,
explicit_array=True,
packed=True)
self._ren_addr = self.var("ren_addr", self.addr_width,
size=self.banks,
explicit_array=True,
packed=True)
self._data_to_strg = self.output("data_to_strg", self.data_width,
size=(self.banks,
self.fw_int),
explicit_array=True,
packed=True)
self._wen_to_strg = self.output("wen_to_strg", self.banks)
self._ren_to_strg = self.output("ren_to_strg", self.banks)
self._addr_out = self.output("addr_out", self.mem_addr_width,
size=self.banks,
packed=True,
explicit_array=True)
self._rd_bank = self.var("rd_bank", max(1, clog2(self.banks)))
self.set_read_bank()
self._rd_valid = self.var("rd_valid", 1)
self.set_read_valid()
if self.fw_int == 1:
self.wire(self._valid_out, self._rd_valid)
# Fetch width of 1 is simpler...
if self.fw_int == 1:
# Set data to storage
if self.banks == 1:
self.wire(self._wen_to_strg, self._wen)
self.wire(self._ren_to_strg, self._ren)
self.wire(self._data_to_strg[0], self._data_in)
self.wire(self._addr_out[0],
kts.ternary(self._wen_to_strg[0],
self._wr_addr[self.mem_addr_width - 1, 0],
self._rd_addr[self.mem_addr_width - 1, 0]))
else:
for i in range(self.banks):
self.wire(self._data_to_strg[i], self._data_in)
self.add_code(self.decode_wen, idx=i)
self.add_code(self.decode_ren, idx=i)
self.wire(self._addr_out[i],
kts.ternary(self._wen_to_strg[i],
self._wr_addr[self.mem_addr_width - 1, 0],
self._rd_addr[self.mem_addr_width - 1, 0]))
self.wire(self._data_out, self._data_from_strg[self._rd_bank])
elif self.read_delay == 1:
self._data_to_write = self.var("data_to_write", self.data_width)
self._addr_to_write = self.var("addr_to_write", self.addr_width)
self.add_code(self.set_dat_to_write)
self.add_code(self.set_addr_to_write)
self._write_gate = self.var("write_gate", 1)
self._read_gate = self.var("read_gate", 1)
self._data_combined = self.var("data_combined", self.data_width,
size=self.fw_int,
explicit_array=True,
packed=True)
for i in range(self.banks):
self.wire(self._data_to_strg[i], self._data_combined)
# read-modify-write implies we need to stall upstream
self._ready = self.output("ready", 1)
# Otherwise implement the state machine for read-modify-write
self.rmw_fsm = self.add_fsm("r_w_seq", reset_high=False)
IDLE = self.rmw_fsm.add_state("IDLE")
READ = self.rmw_fsm.add_state("READ")
MODIFY = self.rmw_fsm.add_state("MODIFY")
DEFAULT = self.rmw_fsm.add_state("_DEFAULT")
self.rmw_fsm.output(self._ready)
self.rmw_fsm.output(self._valid_out)
self.rmw_fsm.output(self._data_out)
self.rmw_fsm.output(self._write_gate)
self.rmw_fsm.output(self._read_gate)
# In IDLE we go to a read state if reading, and modify state
# if writing....
IDLE.next(IDLE, ~(self._wen) & ~(self._ren))
IDLE.next(READ, self._ren & ~self._wen)
IDLE.next(MODIFY, self._wen)
# OUT
IDLE.output(self._ready, 1)
IDLE.output(self._valid_out, 0)
IDLE.output(self._data_out, 0)
IDLE.output(self._write_gate, 0)
IDLE.output(self._read_gate, 1)
# In READ, we effectively use the same transitions as IDLE
READ.next(IDLE, ~self._wen & ~self._ren)
READ.next(READ, self._ren & ~self._wen)
READ.next(MODIFY, self._wen)
# OUT
READ.output(self._ready, 1)
READ.output(self._valid_out, 1)
READ.output(self._data_out,
self._data_from_strg[self._rd_bank][self._addr_to_write[self.word_width - 1, 0]])
READ.output(self._write_gate, 0)
READ.output(self._read_gate, 1)
# In MODIFY we always go back to idle
MODIFY.next(IDLE, const(1, 1))
MODIFY.output(self._ready, 0)
MODIFY.output(self._valid_out, 0)
MODIFY.output(self._data_out, 0)
MODIFY.output(self._write_gate, 1)
MODIFY.output(self._read_gate, 0)
# In DEFAULT we always stick in DEFAULT because it's over...
DEFAULT.next(DEFAULT, const(1, 1))
DEFAULT.output(self._ready, 0)
DEFAULT.output(self._valid_out, 0)
DEFAULT.output(self._data_out, 0)
DEFAULT.output(self._write_gate, 0)
DEFAULT.output(self._read_gate, 0)
self.rmw_fsm.set_start_state(IDLE)
if self.banks == 1:
self.wire(self._ren_to_strg, (self._wen | self._ren) & self._read_gate)
self.wire(self._wen_to_strg, self._write_gate)
else:
for i in range(self.banks):
self.add_code(self.set_wen_rmw, idx=i)
self.add_code(self.set_ren_rmw, idx=i)
for i in range(self.banks):
self.add_code(self.set_addr_rmw, idx=i)
for i in range(self.fw_int):
self.add_code(self.set_data_combined, idx=i)
# If read delay is 0, we can rmw in the same cycle (TIMING?)
else:
assert self.read_delay == 0
raise NotImplementedError
self.base_ports = [[None]]
rw_port = MemoryPort(MemoryPortType.READWRITE)
rw_port_intf = rw_port.get_port_interface()
rw_port_intf['data_in'] = self._data_to_strg
rw_port_intf['data_out'] = self._data_from_strg
rw_port_intf['write_addr'] = self._addr_out
rw_port_intf['write_enable'] = self._wen_to_strg
rw_port_intf['read_addr'] = self._addr_out
rw_port_intf['read_enable'] = self._ren_to_strg
rw_port.annotate_port_signals()
self.base_ports[0][0] = rw_port
def set_read_bank(self):
if self.banks == 1:
self.wire(self._rd_bank, kts.const(0, 1))
else:
# The read bank is comb if no delay, otherwise delayed
if self.read_delay == 1:
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def read_bank_ff(self):
if ~self._rst_n:
self._rd_bank = 0
else:
self._rd_bank = \
self._rd_addr[self.b_a_off + self.bank_width - 1, self.b_a_off]
self.add_code(read_bank_ff)
else:
@always_comb
def read_bank_comb(self):
self._rd_bank = \
self._rd_addr[self.b_a_off + self.bank_width - 1, self.b_a_off]
self.add_code(read_bank_comb)
def set_read_valid(self):
# The read bank is comb if no delay, otherwise delayed
if self.read_delay == 1:
if self.rw_same_cycle:
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def read_valid_ff(self):
if ~self._rst_n:
self._rd_valid = 0
else:
# Don't need write priority if both go at once
self._rd_valid = self._ren
self.add_code(read_valid_ff)
else:
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def read_valid_ff(self):
if ~self._rst_n:
self._rd_valid = 0
else:
# Assumes write priority
self._rd_valid = self._ren & ~self._wen
self.add_code(read_valid_ff)
else:
if self.rw_same_cycle:
self.wire(self._rd_valid, self._ren)
else:
self.wire(self._rd_valid, self._ren & ~self._wen)
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def set_dat_to_write(self):
if ~self._rst_n:
self._data_to_write = 0
else:
self._data_to_write = self._data_in
@always_ff((posedge, "clk"), (negedge, "rst_n"))
def set_addr_to_write(self):
if ~self._rst_n:
self._addr_to_write = 0
else:
self._addr_to_write = self._wr_addr
@always_comb
def decode_wen(self, idx):
self._wen_to_strg[idx] = \
self._wen & (self._wr_addr[self.b_a_off + self.bank_width - 1, self.b_a_off] == idx)
@always_comb
def decode_ren(self, idx):
self._ren_to_strg[idx] = \
self._ren & (self._rd_addr[self.b_a_off + self.bank_width - 1, self.b_a_off] == idx)
@always_comb
def set_ren_rmw(self, idx):
self._ren_to_strg[idx] = \
((self._wen | self._ren) & self._read_gate &
(self._rd_addr[self.b_a_off + self.bank_width - 1, self.b_a_off] == idx))
@always_comb
def set_wen_rmw(self, idx):
self._wen_to_strg[idx] = \
self._write_gate & (self._addr_to_write[self.b_a_off + self.bank_width - 1, self.b_a_off] == idx)
@always_comb
def set_addr_rmw(self, idx):
self._addr_out[idx] = self._rd_addr[self.mem_addr_width + self.word_width - 1, self.word_width]
# If we are performing the write
if self._wen & ~self._write_gate:
self._addr_out[idx] = self._wr_addr[self.mem_addr_width + self.word_width - 1, self.word_width]
elif self._write_gate:
self._addr_out[idx] = \
self._addr_to_write[self.mem_addr_width + self.word_width - 1, self.word_width]
@always_comb
def set_data_combined(self, idx):
# If the word matches the index, use the data to write
# (replace the word)
if self._addr_to_write[self.word_width - 1, 0] == idx:
self._data_combined[idx] = self._data_to_write
# Otherwise keep the data
else:
self._data_combined[idx] = self._data_from_strg[self._rd_bank][idx]
def get_memory_ports(self):
return self.base_ports
def get_bitstream(self, config_json):
config = []
return config
# raise NotImplementedError
def get_config_mode_str(self):
return "ROM"
if __name__ == "__main__":
stg_dut = StrgRAM()
verilog(stg_dut, filename="strg_ram.sv",
additional_passes={"lift config regs": lift_config_reg})
| 41.683616 | 109 | 0.553131 | 14,276 | 0.967471 | 0 | 0 | 3,196 | 0.21659 | 0 | 0 | 1,798 | 0.121849 |
1a82bd2a3228a557a4e93765d69c4bc3cf1313d3 | 3,781 | py | Python | mdot_rest/migrations/0002_auto_20150722_2054.py | uw-it-aca/mdot-rest | 3f5aa88ae2ac9693f283b8843ac8998b10dc7bb8 | [
"Apache-2.0"
] | null | null | null | mdot_rest/migrations/0002_auto_20150722_2054.py | uw-it-aca/mdot-rest | 3f5aa88ae2ac9693f283b8843ac8998b10dc7bb8 | [
"Apache-2.0"
] | 67 | 2015-07-23T23:22:14.000Z | 2022-02-04T21:39:43.000Z | mdot_rest/migrations/0002_auto_20150722_2054.py | uw-it-aca/mdot-rest | 3f5aa88ae2ac9693f283b8843ac8998b10dc7bb8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mdot_rest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='IntendedAudience',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('slug', models.SlugField(max_length=30)),
],
),
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=60)),
('slug', models.SlugField(max_length=60)),
('feature_desc', models.CharField(max_length=120)),
('featured', models.BooleanField(default=False)),
('accessible', models.BooleanField(default=False)),
('responsive_web', models.BooleanField(default=False)),
('created_date', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
],
),
migrations.RemoveField(
model_name='resourcelink',
name='Google_Play_url',
),
migrations.RemoveField(
model_name='resourcelink',
name='Windows_Store_url',
),
migrations.RemoveField(
model_name='resourcelink',
name='created_date',
),
migrations.RemoveField(
model_name='resourcelink',
name='feature_desc',
),
migrations.RemoveField(
model_name='resourcelink',
name='iTunes_url',
),
migrations.RemoveField(
model_name='resourcelink',
name='last_modified',
),
migrations.RemoveField(
model_name='resourcelink',
name='name',
),
migrations.RemoveField(
model_name='resourcelink',
name='short_desc',
),
migrations.RemoveField(
model_name='resourcelink',
name='support_url',
),
migrations.RemoveField(
model_name='resourcelink',
name='web_url',
),
migrations.AddField(
model_name='resourcelink',
name='link_type',
field=models.CharField(default='WEB', max_length=3, choices=[(b'AND', b'Android'), (b'IOS', b'iOS'), (b'WEB', b'Web'), (b'WIP', b'Windows Phone')]),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='slug',
field=models.SlugField(default='default_slug', max_length=60),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='title',
field=models.CharField(default='default_title', max_length=60),
preserve_default=False,
),
migrations.AddField(
model_name='resourcelink',
name='url',
field=models.URLField(default='default_url'),
preserve_default=False,
),
migrations.AddField(
model_name='intendedaudience',
name='resource',
field=models.ManyToManyField(to='mdot_rest.Resource'),
),
migrations.AddField(
model_name='resourcelink',
name='resource',
field=models.ManyToManyField(to='mdot_rest.Resource'),
),
]
| 34.063063 | 160 | 0.545887 | 3,672 | 0.971172 | 0 | 0 | 0 | 0 | 0 | 0 | 754 | 0.199418 |
1a854c8245d49247df7634f7b623ae8c8ca9bc39 | 1,145 | py | Python | TEMP_PYTHON/lruCache.py | Tianyi6679/mincemeatpy | d0003ffb419417ba49413289ff3ec7bce17423d6 | [
"MIT"
] | 3 | 2019-11-05T00:18:33.000Z | 2019-11-05T09:13:38.000Z | map-reduce with caching on HDFS/TEMP_PYTHON/lruCache.py | Tianyi6679/lightweight-MapReduce | d0003ffb419417ba49413289ff3ec7bce17423d6 | [
"MIT"
] | null | null | null | map-reduce with caching on HDFS/TEMP_PYTHON/lruCache.py | Tianyi6679/lightweight-MapReduce | d0003ffb419417ba49413289ff3ec7bce17423d6 | [
"MIT"
] | 1 | 2020-02-16T04:58:46.000Z | 2020-02-16T04:58:46.000Z | from functools4 import lru_cache
import pickle
import copy
class CacheData:
def __init__(self, cache=None, root=None,hit=None,full=None):
self.cache = cache
self.root = root
self.hit = hit
self.full = full
# @lru_cache(maxsize=16)
# def fib(n):
# return n
#
# for x in range(16):
# fib(x)
# print(fib.cache_info())
#
# for x in range(16):
# fib(x)
# print(fib.cache_info())
#
# result=fib.get_cache_dictionary()
# result_1=fib.get_cache_1()
# result_2=fib.get_cache_2()
# result_3=fib.get_cache_3()
#
# cacheData= CacheData(result,result_1,result_2,result_3)
#
#
# file = open('important', 'wb')
#
# #dump information to that file
# pickle.dump(cacheData, file)
#
# # close the file
# file.close()
#
# var1=copy.deepcopy(result)
# var2=copy.deepcopy(result_1)
# var3=copy.deepcopy(result_2)
# var4=copy.deepcopy(result_3)
#
#
#
# print(var1)
# print(var2)
# print(result_2)
# print(result_3)
# fib.set_cache_dictionary(var1,var2,result_2,result_3)
# #print(var1 )
#
#
# result10=fib.get_cache_dictionary()
#
# #print(fib.cache_info())
# print(result10)
#for x in range(16):
# fib(x)
#print(fib.cache_info())
| 16.357143 | 63 | 0.684716 | 164 | 0.143231 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.751965 |
1a856cc906b1b3e3c1fa8cb307bd1067018e1f8e | 4,761 | py | Python | sandbox/addcoord.py | cmrajan/pygslib | acdf96d9ec17658f18fe9f078104c6259b479f52 | [
"MIT"
] | 94 | 2015-10-23T20:35:26.000Z | 2022-03-23T08:24:49.000Z | sandbox/addcoord.py | kaufmanno/pygslib | 7fb0c201eba6304b1914cf88a437aa9dc42e7021 | [
"MIT"
] | 55 | 2016-09-19T17:20:46.000Z | 2022-03-20T03:44:01.000Z | sandbox/addcoord.py | kaufmanno/pygslib | 7fb0c201eba6304b1914cf88a437aa9dc42e7021 | [
"MIT"
] | 47 | 2016-03-31T08:17:47.000Z | 2022-03-18T02:35:33.000Z | # -*- coding: utf-8 -*-
#!/usr/bin/env python
# using naming on http://www.gslib.com/gslib_help/programs.html
import subprocess
import copy
import pandas as pd
import pygslib
import numpy as np
import os
__addcoord_par = \
""" Parameters for ADDCOORD
***********************
START OF PARAMETERS:
{datafl} -file with data
{outfl} -file for output
{ireal} -realization number
{nx} {xmn} {xsiz} -nx,xmn,xsiz
{ny} {ymn} {ysiz} -ny,ymn,ysiz
{nz} {zmn} {zsiz} -nz,zmn,zsiz
"""
def addcoord(parameters, gslib_path = None, silent = False):
"""addcoord(parameters, gslib_path = None)
Funtion to add coordinates to a pygslib grid using addcoord.exe external
gslib program.
Parameters
----------
parameters : dict or pygslib.blockmodel.Blockmodel
dictionary with parameters or Blockmodel object with full model and IJK sorted
gslib_path : string (default None)
absolute or relative path to gslib excecutable programs
silent: boolean
if true external GSLIB stdout text is printed
Returns
------
pandas.DataFrame with ADDCOORD output
Example
--------
TODO:
Notes
------
The dictionary with parameters may be as follows::
parameters = {
'datafl': str or None, # path to the input grid file or None, to use default '_xxx_.in'
'outfl' : str or None, # path to the output grid file or None, to use default '_xxx_.out'
'ireal' : int, # realization number
'nx' : int, # number of rows, cols and levels
'ny' : int,
'nz' : int,
'xmn' : float, # coordinates of the centroid of first/corner block
'ymn' : float,
'zmn' : float,
'xsiz' : float, # grid node separation
'ysiz' : float,
'zsiz' : float}
see http://www.gslib.com/gslib_help/addcoord.html for more information
"""
if gslib_path is None:
if os.name == "posix":
gslib_path = '~/gslib/addcoord'
else:
gslib_path = 'c:\\gslib\\addcoord.exe'
# chek if we use internal files or external and generate files
if isinstance(parameters, pygslib.blockmodel.Blockmodel):
# redefine parfile
mypar = {}
mypar['nx'] = parameters.nx
mypar['ny'] = parameters.ny
mypar['nz'] = parameters.nz
mypar['xsiz'] = parameters.dx
mypar['ysiz'] = parameters.dy
mypar['zsiz'] = parameters.dz
mypar['xmn'] = parameters.xorg + parameters.dx/2.
mypar['ymn'] = parameters.yorg + parameters.dy/2.
mypar['zmn'] = parameters.zorg + parameters.dz/2.
mypar['ireal'] = 1
# check is sorted and full model
assert ('IJK' in parameters.bmtable.columns)
assert(all(parameters.bmtable['IJK'].values == np.arange(mypar['nx']*mypar['ny']*mypar['nz'])))
# update in/out in parfile
mypar['datafl']='_xxx_.in'
mypar['outfl'] = None
# create dum array
with open('_xxx_.in',"w") as f:
f.write('temp grid file nx={}, ny={}, nz={}'.format(mypar['nx'],
mypar['ny'],
mypar['nz']) +'\n')
f.write('1\n')
f.write('IJK\n')
np.savetxt(f,parameters.bmtable['IJK'].values,fmt='%d')
else:
# chek we have all fields
assert (set(['datafl','outfl','ireal','nx','ny','nz','xmn','ymn','zmn','xsiz','ysiz','zsiz']).issubset(parameters))
# get a working parameter file
mypar = copy.deepcopy(parameters)
if mypar['outfl'] is None:
mypar['outfl'] = '_xxx_.out'
# prepare parameter file and save it
par = __addcoord_par.format(**mypar)
print (par)
fpar ='_xxx_.par'
with open(fpar,"w") as f:
f.write(par)
# call pygslib
# this construction can be used in a loop for parallel execution
p=subprocess.Popen([gslib_path, fpar],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
result = p.returncode
p.wait()
if p.returncode!=0:
raise NameError('gslib addcoord NameError' + str(stderr.decode('utf-8')))
if ~silent:
try:
print (stdout.decode('utf-8'))
except:
print (stdout)
# return results as panndas array
return pygslib.gslib.read_gslib_file(mypar['outfl'])
| 32.834483 | 123 | 0.543583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,712 | 0.569628 |
1a87c649fc76316f54ba7bd2a44723ae18e4d1cd | 1,254 | py | Python | disarm_gears/validators/array_validators.py | disarm-platform/disarm-gears | d9f747687e632102a4ef2821b33936daacb01b6c | [
"MIT"
] | null | null | null | disarm_gears/validators/array_validators.py | disarm-platform/disarm-gears | d9f747687e632102a4ef2821b33936daacb01b6c | [
"MIT"
] | 11 | 2019-02-28T00:18:47.000Z | 2020-02-22T20:36:00.000Z | disarm_gears/validators/array_validators.py | disarm-platform/disarm-gears | d9f747687e632102a4ef2821b33936daacb01b6c | [
"MIT"
] | null | null | null | import numpy as np
def validate_1d_array(x, size=None):
'''Validate type and dimensions of an object x.'''
assert isinstance(x, np.ndarray), 'Expecting a numpy array.'
assert x.ndim == 1, 'Expecting a one-dimensional array.'
if size is not None:
assert x.size == size, 'Array size is different from expected.'
def validate_2d_array(x, n_cols=None, n_rows=None):
'''Validate type and dimensions of an object x.'''
assert isinstance(x, np.ndarray), 'Expecting a numpy array.'
assert x.ndim == 2, 'Expecting a two-dimensional array.'
if n_rows is not None:
assert x.shape[0] == n_rows, 'Array size is different from expected.'
if n_cols is not None:
assert x.shape[1] == n_cols, 'Number of columns is different from expected.'
def validate_integer_array(x):
'''Validate array elements are integers.'''
assert (np.round(x) == x).all(), 'Expecting an array of integers.'
def validate_positive_array(x):
'''Validate array elements are positive.'''
assert (x > 0).all(), 'Expecting array of positive elements.'
def validate_non_negative_array(x):
'''Validate array elements are non-negative.'''
assert (x >= 0).all(), 'Expecting array of non-negative elements.'
| 31.35 | 84 | 0.678628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 599 | 0.477671 |
1a88fb50886f9268470f8318d9b9f7b3031433b6 | 6,629 | py | Python | tests/functional/gcs/test_collections.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | 47 | 2016-04-13T21:28:19.000Z | 2022-02-28T18:28:18.000Z | tests/functional/gcs/test_collections.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | 314 | 2016-04-12T15:07:32.000Z | 2022-03-14T21:00:50.000Z | tests/functional/gcs/test_collections.py | sirosen/globus-sdk-python | 0d4e420f52329ab8f993bfe6f86729fb1ef07570 | [
"ECL-2.0",
"Apache-2.0"
] | 36 | 2016-06-14T14:05:13.000Z | 2022-02-18T17:20:51.000Z | import pytest
from globus_sdk import GCSAPIError, GuestCollectionDocument, MappedCollectionDocument
from tests.common import get_last_request, register_api_route_fixture_file
def test_get_collection_list(client):
register_api_route_fixture_file("gcs", "/collections", "collection_list.json")
res = client.get_collection_list()
assert len(list(res)) == 2
# sanity check some fields
assert res["DATA_TYPE"] == "result#1.0.0"
for item in res:
assert item["DATA_TYPE"] == "collection#1.0.0"
assert "id" in item
assert item["id"] in ("{collection_id_1}", "{collection_id_2}")
assert "display_name" in item
def test_get_collection_list_include_param(client):
register_api_route_fixture_file("gcs", "/collections", "collection_list.json")
client.get_collection_list()
req = get_last_request()
assert "include" not in req.params
client.get_collection_list(include="foo")
req = get_last_request()
assert "include" in req.params
assert req.params["include"] == "foo"
client.get_collection_list(include="foo,bar")
req = get_last_request()
assert "include" in req.params
assert req.params["include"] == "foo,bar"
client.get_collection_list(include=("foo", "bar"))
req = get_last_request()
assert "include" in req.params
assert req.params["include"] == "foo,bar"
def test_error_parsing_forbidden(client):
register_api_route_fixture_file(
"gcs", "/collections", "forbidden_error_data.json", status=403
)
with pytest.raises(GCSAPIError) as excinfo:
client.get_collection_list()
err = excinfo.value
assert err.detail is None
assert err.detail_data_type is None
assert err.message.startswith("Could not list collections")
assert err.code == "permission_denied"
def test_get_collection(client):
register_api_route_fixture_file(
"gcs", "/collections/COLLECTION_ID", "get_collection/normal.json"
)
res = client.get_collection("COLLECTION_ID")
assert res["DATA_TYPE"] == "collection#1.0.0"
assert res.full_data["DATA_TYPE"] == "result#1.0.0"
assert "detail" in res.full_data
assert "data" in res.full_data
assert res.full_data["detail"] == "success"
assert "detail" not in res.data
assert res["display_name"] == "Happy Fun Collection Name"
def test_get_collection_flat(client):
register_api_route_fixture_file(
"gcs", "/collections/COLLECTION_ID", "get_collection/unexpectedly_flat.json"
)
res = client.get_collection("COLLECTION_ID")
assert res["DATA_TYPE"] == "collection#1.0.0"
assert res.full_data["DATA_TYPE"] == "collection#1.0.0"
assert "detail" not in res.full_data
assert "data" not in res.full_data
assert res["display_name"] == "Happy Fun Collection Name"
def test_get_collection_bad_version(client):
register_api_route_fixture_file(
"gcs", "/collections/COLLECTION_ID", "get_collection/bad_version.json"
)
res = client.get_collection("COLLECTION_ID")
assert res["DATA_TYPE"] == "result#1.0.0"
assert res.full_data["DATA_TYPE"] == "result#1.0.0"
assert "detail" in res.full_data
assert "data" in res.full_data
assert res.full_data["detail"] == "success"
assert "detail" in res.data
assert "foo" not in res.data
for x in res.full_data["data"]:
assert "foo" in x
def test_get_collection_includes_sideloaded_data(client):
register_api_route_fixture_file(
"gcs", "/collections/COLLECTION_ID", "get_collection/includes_other.json"
)
res = client.get_collection("COLLECTION_ID")
assert res["DATA_TYPE"] == "collection#1.0.0"
assert res.full_data["DATA_TYPE"] == "result#1.0.0"
assert "detail" in res.full_data
assert "data" in res.full_data
assert res.full_data["detail"] == "success"
assert "detail" not in res.data
assert res["display_name"] == "Happy Fun Collection Name"
def test_get_collection_invalid_datatype_type(client):
register_api_route_fixture_file(
"gcs", "/collections/COLLECTION_ID", "get_collection/invalid_datatype_type.json"
)
res = client.get_collection("COLLECTION_ID")
assert res["DATA_TYPE"] == "result#1.0.0"
assert res.full_data["DATA_TYPE"] == "result#1.0.0"
assert "detail" in res.full_data
assert "detail" in res.data
assert "data" in res.full_data
assert res.full_data["detail"] == "success"
def test_delete_collection(client):
register_api_route_fixture_file(
"gcs", "/collections/COLLECTION_ID", "empty_success.json", method="DELETE"
)
res = client.delete_collection("COLLECTION_ID")
assert res["DATA_TYPE"] == "result#1.0.0"
assert "detail" in res.data
assert res.data["detail"] == "success"
def test_create_mapped_collection(client):
register_api_route_fixture_file(
"gcs", "/collections", "create_collection.json", method="POST"
)
collection = MappedCollectionDocument(
domain_name="i-f3c83.123.globus.org",
display_name="Project Foo Research Data",
identity_id="c8b7ab5c-595c-43c9-8e43-9e8a3debfe4c",
storage_gateway_id="fc1f3ba0-1fa4-42b2-8bb3-53983774fa5f",
collection_base_path="/",
default_directory="/projects",
public=True,
force_encryption=False,
disable_verify=False,
organization="University of Example",
department="Data Science",
keywords=["Project Foo", "Data Intensive Science"],
description='Information related to the "Foo" project.',
contact_email="project-foo@example.edu",
contact_info="+1 (555) 555-1234",
info_link="https://project-foo.example.edu/info",
policies={"DATA_TYPE": "blackpearl_collection_policies#1.0.0"},
allow_guest_collections=True,
sharing_restrict_paths={
"DATA_TYPE": "path_restrictions#1.0.0",
"read": ["/public"],
"read_write": ["/home", "/projects"],
"none": ["/private"],
},
)
res = client.create_collection(collection)
assert res["DATA_TYPE"] == "collection#1.0.0"
assert res["display_name"] == "Project Foo Research Data"
def test_update_guest_collection(client):
register_api_route_fixture_file(
"gcs", "/collections/COLLECTION_ID", "update_collection.json", method="PATCH"
)
collection = GuestCollectionDocument(display_name="Project Foo Research Data")
res = client.update_collection("COLLECTION_ID", collection)
assert res["DATA_TYPE"] == "collection#1.0.0"
assert res["display_name"] == "Project Foo Research Data"
| 36.827778 | 88 | 0.689848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,326 | 0.350882 |
1a89c7c6d905fb7f8ad81df02cfe9dcabd63ca97 | 4,168 | py | Python | extractocr.py | umd-lib/newspaper-batchload | 0897d8526a6ee797540b7ea50c2e114165a8084e | [
"Apache-2.0"
] | 1 | 2017-12-18T09:12:42.000Z | 2017-12-18T09:12:42.000Z | extractocr.py | umd-lib/newspaper-batchload | 0897d8526a6ee797540b7ea50c2e114165a8084e | [
"Apache-2.0"
] | 33 | 2016-09-19T04:40:49.000Z | 2018-04-13T19:37:42.000Z | extractocr.py | umd-lib/newspaper-batchload | 0897d8526a6ee797540b7ea50c2e114165a8084e | [
"Apache-2.0"
] | 1 | 2016-12-06T21:57:07.000Z | 2016-12-06T21:57:07.000Z | #!/usr/bin/env python3
import argparse
import sys
import yaml
import logging
import logging.config
from datetime import datetime
from classes import pcdm,ocr,util
from handler import ndnp
import rdflib
from rdflib import RDF
from lxml import etree as ET
from classes.exceptions import RESTAPIException, DataReadException
logger = logging.getLogger(__name__)
def extract(fcrepo, uri):
fcrepo.open_transaction()
try:
logger.info("Getting {0} from repository".format(uri))
page = ndnp.Page.from_repository(fcrepo, uri)
logger.info("Creating annotations for page {0}".format(page.title))
for annotation in page.textblocks():
annotation.create_object(fcrepo)
annotation.update_object(fcrepo)
fcrepo.commit_transaction()
return True
except (RESTAPIException, DataReadException) as e:
# if anything fails during item creation or commiting the transaction
# attempt to rollback the current transaction
# failures here will be caught by the main loop's exception handler
# and should trigger a system exit
logger.error("OCR extraction failed: {0}".format(e))
fcrepo.rollback_transaction()
logger.warn('Transaction rolled back. Continuing load.')
def main():
'''Parse args and handle options.'''
parser = argparse.ArgumentParser(
description='Extract OCR text and create annotations.'
)
# Path to the repo config (endpoint, relpath, credentials, and WebAC paths)
parser.add_argument('-r', '--repo',
help='Path to repository configuration file.',
action='store',
required=True
)
parser.add_argument('--ignore', '-i',
help='file listing items to ignore',
action='store'
)
args = parser.parse_args()
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
# configure logging
with open('config/logging.yml', 'r') as configfile:
logging_config = yaml.safe_load(configfile)
logfile = 'logs/extractocr.py.{0}.log'.format(now)
logging_config['handlers']['file']['filename'] = logfile
logging.config.dictConfig(logging_config)
# Load required repository config file and create repository object
with open(args.repo, 'r') as repoconfig:
fcrepo = pcdm.Repository(yaml.safe_load(repoconfig))
logger.info('Loaded repo configuration from {0}'.format(args.repo))
fieldnames = ['uri', 'timestamp']
# read the log of completed items
try:
completed = util.ItemLog('logs/annotated.csv', fieldnames, 'uri')
except Exception as e:
logger.error('Non-standard map file specified: {0}'.format(e))
sys.exit(1)
logger.info('Found {0} completed items'.format(len(completed)))
if args.ignore is not None:
try:
ignored = util.ItemLog(args.ignore, fieldnames, 'uri')
except Exception as e:
logger.error('Non-standard ignore file specified: {0}'.format(e))
sys.exit(1)
else:
ignored = []
skipfile = 'logs/skipped.extractocr.{0}.csv'.format(now)
skipped = util.ItemLog(skipfile, fieldnames, 'uri')
with fcrepo.at_path('/annotations'):
for line in sys.stdin:
uri = line.rstrip('\n')
if uri in completed:
continue
elif uri in ignored:
logger.debug('Ignoring {0}'.format(uri))
continue
is_extracted = False
try:
is_extracted = extract(fcrepo, uri)
except RESTAPIException as e:
logger.error(
"Unable to commit or rollback transaction, aborting"
)
sys.exit(1)
row = {
'uri': uri,
'timestamp': str(datetime.utcnow())
}
if is_extracted:
completed.writerow(row)
else:
skipped.writerow(row)
if __name__ == "__main__":
main()
| 32.5625 | 79 | 0.600768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,184 | 0.284069 |
1a8a11ef27c7e2aabe226bd0f4e93280790db032 | 192 | py | Python | 1101-1200/1121-Divide Array Into Increasing Sequences/1121-Divide Array Into Increasing Sequences.py | jiadaizhao/LeetCode | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | [
"MIT"
] | 49 | 2018-05-05T02:53:10.000Z | 2022-03-30T12:08:09.000Z | 1101-1200/1121-Divide Array Into Increasing Sequences/1121-Divide Array Into Increasing Sequences.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 11 | 2017-12-15T22:31:44.000Z | 2020-10-02T12:42:49.000Z | 1101-1200/1121-Divide Array Into Increasing Sequences/1121-Divide Array Into Increasing Sequences.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 28 | 2017-12-05T10:56:51.000Z | 2022-01-26T18:18:27.000Z | import collections
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:
return len(nums) >= K * max(v for v in collections.Counter(nums).values())
| 38.4 | 82 | 0.692708 | 172 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1a8a175fa2387d52d8058ad6b36f147bdb90e616 | 1,490 | py | Python | KnowledgeMapping/SpiderExp/1-Code/dianhua.cn/save_prefix_mysql.py | nickliqian/ralph_doc_to_chinese | be120ce2bb94a8e8395630218985f5e51ae087d9 | [
"MIT"
] | 8 | 2018-05-22T01:11:33.000Z | 2020-03-19T01:44:55.000Z | KnowledgeMapping/SpiderExp/1-Code/dianhua.cn/save_prefix_mysql.py | yangliangguang/keep_learning | 47ab39c726cb28713ad22bf4cf39d6b146715910 | [
"MIT"
] | null | null | null | KnowledgeMapping/SpiderExp/1-Code/dianhua.cn/save_prefix_mysql.py | yangliangguang/keep_learning | 47ab39c726cb28713ad22bf4cf39d6b146715910 | [
"MIT"
] | 3 | 2018-07-25T09:31:53.000Z | 2019-09-14T14:05:31.000Z | """
所有号码段存入192.168.70.40的mysql
"""
import os
import pymysql
# 获取指定文件夹文件列表
def get_files_name(dir):
all_filename = os.listdir(dir)
return all_filename
# 从文件中提取出号码前缀
def split_number(filename, parents_dir):
print("File name is <{}>".format(filename))
# 读取文件
with open(os.path.join(parents_dir, filename), "r") as f:
results = f.readlines()
# 逐行读取文件
for result in results:
data = result.strip().split(",")
if len(data) == 5:
print(data)
sql = "insert into number_prefix(prefix,company,province,city,sub_company) value ('{}','{}','{}','{}','{}')".format(*data)
mysql_cursor.execute(sql)
else:
print("{} {}".format(data, filename))
mysql_conn.commit()
if __name__ == '__main__':
# 连接MySQL
print("Connect to mysql...")
mysql_conn = pymysql.connect(host='192.168.70.40', port=3306, user='root', passwd='mysql', db='phone_number_info', charset='utf8')
mysql_cursor = mysql_conn.cursor()
main_dir = "/home/nick/Desktop/H3-移动号码归属/"
# main_dir = "/home/watson/dc45/telephone_number/bd_dhb/H3-移动号码归属"
# 获取所有文件名称 list
files = get_files_name(main_dir)
print("文件夹<{}>下共<{}>个文件".format(main_dir, len(files)))
# 把每一个文件传入函数提取出电话号码前缀,第一行不要
for file in files:
split_number(file, main_dir)
# 关闭mysql连接
mysql_cursor.close()
mysql_conn.close()
print("Close MySQL Connection...")
print("end")
| 28.113208 | 138 | 0.616107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 699 | 0.4141 |
1a8b18dd5d6a57b2f69ce650aad58222ba730afb | 1,788 | py | Python | scripts/control/tray_balance/tray_renderer.py | adamheins/planar-playground | 24b2eb2e9f50d0ac803539f8860b10dd1a501314 | [
"MIT"
] | null | null | null | scripts/control/tray_balance/tray_renderer.py | adamheins/planar-playground | 24b2eb2e9f50d0ac803539f8860b10dd1a501314 | [
"MIT"
] | null | null | null | scripts/control/tray_balance/tray_renderer.py | adamheins/planar-playground | 24b2eb2e9f50d0ac803539f8860b10dd1a501314 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
from mm2d import util
class TrayRenderer(object):
def __init__(self, radius, p_te_e, p_c1e_e, p_c2e_e, P_ew_w):
self.p_lt_t = np.array([-radius, 0])
self.p_rt_t = np.array([radius, 0])
self.p_te_e = p_te_e
self.p_c1e_e = p_c1e_e
self.p_c2e_e = p_c2e_e
self.P_ew_w = P_ew_w
def set_state(self, P_ew_w):
self.P_ew_w = P_ew_w
def render(self, ax):
p_ew_w, θ_ew = self.P_ew_w[:2], self.P_ew_w[2]
R_we = util.rotation_matrix(θ_ew)
p_tw_w = p_ew_w + R_we @ self.p_te_e
# sides
p_lw_w = p_tw_w + R_we @ self.p_lt_t
p_rw_w = p_tw_w + R_we @ self.p_rt_t
# contact points
p_c1w_w = p_ew_w + R_we @ self.p_c1e_e
p_c2w_w = p_ew_w + R_we @ self.p_c2e_e
self.tray, = ax.plot([p_lw_w[0], p_rw_w[0]], [p_lw_w[1], p_rw_w[1]], color='k')
self.com, = ax.plot(p_tw_w[0], p_tw_w[1], 'o', color='k')
self.contacts, = ax.plot([p_c1w_w[0], p_c2w_w[0]], [p_c1w_w[1], p_c2w_w[1]], 'o', color='r')
def update_render(self):
p_ew_w, θ_ew = self.P_ew_w[:2], self.P_ew_w[2]
R_we = util.rotation_matrix(θ_ew)
p_tw_w = p_ew_w + R_we @ self.p_te_e
# sides
p_lw_w = p_tw_w + R_we @ self.p_lt_t
p_rw_w = p_tw_w + R_we @ self.p_rt_t
# contact points
p_c1w_w = p_ew_w + R_we @ self.p_c1e_e
p_c2w_w = p_ew_w + R_we @ self.p_c2e_e
self.tray.set_xdata([p_lw_w[0], p_rw_w[0]])
self.tray.set_ydata([p_lw_w[1], p_rw_w[1]])
self.com.set_xdata([p_tw_w[0]])
self.com.set_ydata([p_tw_w[1]])
self.contacts.set_xdata([p_c1w_w[0], p_c2w_w[0]])
self.contacts.set_ydata([p_c1w_w[1], p_c2w_w[1]])
| 30.305085 | 100 | 0.587248 | 1,725 | 0.962612 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.045759 |
1a8bd1f13f351b7336c171be459cf320b1683b22 | 4,435 | py | Python | src/web/users/forms.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | 10 | 2017-04-15T05:00:17.000Z | 2019-08-27T21:08:48.000Z | src/web/users/forms.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | 2 | 2017-10-06T12:35:59.000Z | 2018-12-03T07:17:12.000Z | src/web/users/forms.py | werelaxe/drapo | 5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b | [
"MIT"
] | 4 | 2017-03-08T21:17:21.000Z | 2019-05-10T16:22:58.000Z | from django import forms
from django.utils.translation import ugettext_lazy as _
class LoginForm(forms.Form):
email = forms.CharField(
required=True,
label=_('Email'),
max_length=100,
widget=forms.TextInput(attrs={
'placeholder': _('Your email'),
'autofocus': 'autofocus',
'class': 'form-control-short',
})
)
password = forms.CharField(
required=True,
label=_('Password'),
max_length=128,
widget=forms.PasswordInput(attrs={
'placeholder': _('Enter password'),
'class': 'form-control-short',
})
)
class FormWithRepeatedPassword(forms.Form):
password = forms.CharField(
required=True,
label=_('Password'),
max_length=128,
widget=forms.PasswordInput(attrs={
'placeholder': _('Enter password'),
'class': 'form-control-short',
})
)
password_repeat = forms.CharField(
required=True,
label=_('Password again'),
max_length=128,
widget=forms.PasswordInput(attrs={
'placeholder': _('Repeat password'),
'class': 'form-control-short',
})
)
def clean_password_repeat(self):
password = self.cleaned_data.get('password')
password_repeat = self.cleaned_data.get('password_repeat')
if password and password_repeat and password != password_repeat:
self._errors['password_repeat'] = self.error_class(['Password are not equal'])
class RegisterForm(FormWithRepeatedPassword):
username = forms.CharField(
required=True,
label=_('Username'),
max_length=100,
widget=forms.TextInput(attrs={
'placeholder': _('Enter username'),
'autofocus': 'autofocus',
'class': 'form-control-short',
})
)
email = forms.EmailField(
required=True,
label=_('Email'),
max_length=100,
widget=forms.TextInput(attrs={
'placeholder': _('Enter email'),
'class': 'form-control-short',
})
)
first_name = forms.CharField(
label=_('First name'),
max_length=100,
widget=forms.TextInput(attrs={
'placeholder': _('Your first name'),
'class': 'form-control-short',
})
)
last_name = forms.CharField(
label=_('Last name'),
max_length=100,
widget=forms.TextInput(attrs={
'placeholder': _('Your last name'),
'class': 'form-control-short',
})
)
def __init__(self, *args, **kwargs):
if 'field_order' in kwargs:
del kwargs['field_order']
super().__init__(field_order=['username', 'email', 'first_name', 'last_name', 'password', 'password_validation'],
*args, **kwargs)
class EditUserForm(forms.Form):
username = forms.CharField(
required=True,
label=_('Username'),
max_length=100,
widget=forms.TextInput(attrs={
'placeholder': _('Your username'),
'autofocus': 'autofocus',
'class': 'form-control-short',
})
)
first_name = forms.CharField(
label=_('First name'),
max_length=100,
widget=forms.TextInput(attrs={
'placeholder': _('Your first name'),
'class': 'form-control-short',
})
)
last_name = forms.CharField(
label=_('Last name'),
max_length=100,
widget=forms.TextInput(attrs={
'placeholder': _('Your last name'),
'class': 'form-control-short',
})
)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.initial = {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name
}
class ChangePasswordForm(FormWithRepeatedPassword):
old_password = forms.CharField(
required=True,
label=_('Old password'),
max_length=128,
widget=forms.PasswordInput(attrs={
'class': 'form-control-short'
})
)
def __init__(self, *args, **kwargs):
if 'field_order' in kwargs:
del kwargs['field_order']
super().__init__(field_order=['old_password', 'password', 'password_repeat'], *args, **kwargs)
| 28.429487 | 121 | 0.559414 | 4,339 | 0.978354 | 0 | 0 | 0 | 0 | 0 | 0 | 1,099 | 0.247802 |
1a8e71aea7b32873518907cb5c906e9645f4d8e0 | 2,780 | py | Python | sample/src/rdt_header.py | PANG-hans/CS305Proj | 418b6f205ebb6101ec7445e359ad5df45cfc9d44 | [
"MIT"
] | null | null | null | sample/src/rdt_header.py | PANG-hans/CS305Proj | 418b6f205ebb6101ec7445e359ad5df45cfc9d44 | [
"MIT"
] | null | null | null | sample/src/rdt_header.py | PANG-hans/CS305Proj | 418b6f205ebb6101ec7445e359ad5df45cfc9d44 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
"""
@Github: https://github.com/Certseeds
@Organization: SUSTech
@Author: nanoseeds
@Date: 2020-07-12 17:46:45
@LastEditors : nanoseeds
"""
""" CS305_2019F_Remake
Copyright (C) 2020 nanoseeds
CS305_2019F_Remake is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
CS305_2019F_Remake is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from config import *
class rdt_header(object):
def __init__(self, bits: int, seq_num: int, ack_num: int, data_str: [str, bytes] = ""):
self.bits: int = bits
self.seq_num: int = seq_num
self.ack_num: int = ack_num
self.length: int = len(data_str)
@classmethod
def unpack(cls, data_bytes: bytes) -> 'rdt_header':
if len(data_bytes) < header_length:
return cls(0, -1, -1)
assert len(data_bytes) >= header_length
bits, seq_num, ack_num, length, temp = struct.unpack(header_format, data_bytes[0:header_length])
will_return: rdt_header = cls(bits, seq_num, ack_num)
will_return.length = length
return will_return
def to_bytes(self) -> bytes:
return produce_packets(header_format, self.bits, self.seq_num, self.ack_num, "")
def equal(self, **args) -> bool:
will_return = True
if 'bits' in args:
will_return = will_return and args['bits'] == self.bits
if 'seq_num' in args:
will_return = will_return and args['seq_num'] == self.seq_num
if 'ack_num' in args:
will_return = will_return and args['ack_num'] == self.ack_num
if 'length' in args:
will_return = will_return and args['length'] == self.length
return will_return
def __str__(self):
return "bits:{} seq:{} ack:{} length:{}".format(str(self.bits), str(self.seq_num), str(self.ack_num),
str(self.length))
if __name__ == '__main__':
head = rdt_header(7, 0, 0, '12345')
assert head.equal(bits=7, seq_num=0, ack_num=0, length=5)
head2 = rdt_header.unpack(b'\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x05')
assert head2.equal(bits=1, seq_num=2, ack_num=3, length=4)
| 38.611111 | 114 | 0.653597 | 1,518 | 0.546043 | 0 | 0 | 424 | 0.152518 | 0 | 0 | 1,138 | 0.409353 |
1a8f11a248ca1351bd360d4ea1e5e3435ee8c318 | 188 | py | Python | functions/analytics_worker/test/conftest.py | epiphone/lambda-terraform-analytics | b569ee6a7bcb56c8c3c41b875edd4859350010ee | [
"MIT"
] | null | null | null | functions/analytics_worker/test/conftest.py | epiphone/lambda-terraform-analytics | b569ee6a7bcb56c8c3c41b875edd4859350010ee | [
"MIT"
] | null | null | null | functions/analytics_worker/test/conftest.py | epiphone/lambda-terraform-analytics | b569ee6a7bcb56c8c3c41b875edd4859350010ee | [
"MIT"
] | null | null | null | import os
import sys
here = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(here, '..', '..', '..', 'test_utils'))
from fixtures import event, lambda_context
| 23.5 | 70 | 0.696809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.12766 |
1a9012d3bbaeed3c5502f33b98692347736e7973 | 213 | wsgi | Python | darkweb.wsgi | robopt/Capstone | d0e092ed7f4a2a2fc2ec79bd2fdf06663de19c11 | [
"Apache-2.0"
] | 5 | 2016-04-11T20:24:40.000Z | 2019-03-11T23:19:13.000Z | darkweb.wsgi | robopt/Capstone | d0e092ed7f4a2a2fc2ec79bd2fdf06663de19c11 | [
"Apache-2.0"
] | null | null | null | darkweb.wsgi | robopt/Capstone | d0e092ed7f4a2a2fc2ec79bd2fdf06663de19c11 | [
"Apache-2.0"
] | 5 | 2016-04-18T19:08:59.000Z | 2019-01-08T20:11:19.000Z | #!/usr/bin/python
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/capstone/")
#from FlaskApp import app as application
#application.secret_key = 'Add your secret key'
| 23.666667 | 47 | 0.779343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.58216 |
1a90e4152f8eb4914f77ba3c276a65abf4a75f61 | 13,600 | py | Python | tests/dialog/test_router.py | uezo/minette-python | dd8cd7d244b6e6e4133c8e73d637ded8a8c6846f | [
"Apache-2.0"
] | 31 | 2017-12-18T15:35:42.000Z | 2021-12-16T07:27:33.000Z | tests/dialog/test_router.py | uezo/minette-python | dd8cd7d244b6e6e4133c8e73d637ded8a8c6846f | [
"Apache-2.0"
] | 17 | 2017-07-13T22:25:08.000Z | 2020-11-02T14:19:32.000Z | tests/dialog/test_router.py | uezo/minette-python | dd8cd7d244b6e6e4133c8e73d637ded8a8c6846f | [
"Apache-2.0"
] | 2 | 2017-09-14T09:28:35.000Z | 2021-01-17T12:31:54.000Z | import sys
import os
sys.path.append(os.pardir)
import pytest
from pytz import timezone
from minette import DialogRouter, DialogService, EchoDialogService, ErrorDialogService
from minette import (
Message,
Context,
PerformanceInfo,
Priority
)
class PizzaDialogService(DialogService):
pass
class SobaDialogService(DialogService):
pass
class AdhocDialogService(DialogService):
pass
class MyDialogRouter(DialogRouter):
def register_intents(self):
self.intent_resolver = {
"PizzaIntent": PizzaDialogService,
"SobaIntent": SobaDialogService,
"AdhocIntent": AdhocDialogService,
"NotRegisteredIntent": None,
}
def extract_intent(self, request, context, connection):
if "pizza" in request.text:
return "PizzaIntent"
elif "lower" in request.text:
return "SobaIntent", {"soba_name": "tanuki soba", "is_hot": True}, Priority.Low
elif "soba" in request.text:
return "SobaIntent", {"soba_name": "tanuki soba", "is_hot": True}, Priority.High
elif "highest p" in request.text:
return "PizzaIntent", {}, Priority.Highest
elif "highest s" in request.text:
return "SobaIntent", {}, Priority.Highest
elif "adhoc" in request.text:
request.is_adhoc = True
return "AdhocIntent", {}, Priority.Highest
elif "not_registered" in request.text:
return "NotRegisteredIntent"
elif "unknown" in request.text:
return "UnknownIntent"
elif "error" in request.text:
1 / 0
def test_init_base():
dr = DialogRouter(timezone=timezone("Asia/Tokyo"))
assert dr.timezone == timezone("Asia/Tokyo")
assert dr.default_dialog_service is DialogService
def test_extract_intent():
dr = DialogRouter(timezone=timezone("Asia/Tokyo"))
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "PizzaIntent"
request.entities = {"key1": "value1"}
intent, entities = dr.extract_intent(request, context, None)
assert intent == "PizzaIntent"
assert entities == {"key1": "value1"}
def test_init():
dr = MyDialogRouter(timezone=timezone("Asia/Tokyo"), default_dialog_service=EchoDialogService)
assert dr.timezone == timezone("Asia/Tokyo")
assert dr.default_dialog_service is EchoDialogService
def test_route():
# update topic
dr = MyDialogRouter(timezone=timezone("Asia/Tokyo"), default_dialog_service=EchoDialogService)
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "PizzaIntent"
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
# adhoc topic
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "AdhocIntent"
request.is_adhoc = True
ds = dr.route(request, context, None)
assert ds is AdhocDialogService
assert context.topic.name == ""
# adhoc topic (keep previous topic)
context = Context("TEST", "test_user")
context.topic.name = "pizza"
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "AdhocIntent"
request.is_adhoc = True
request.intent_priority = Priority.High
ds = dr.route(request, context, None)
assert ds is AdhocDialogService
assert context.topic.name == "pizza"
assert context.topic.keep_on is True
# continue topic
context = Context("TEST", "test_user")
context.topic.name = "pizza"
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
assert context.topic.priority == Priority.Normal
# not updated by same priority
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "SobaIntent"
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
assert context.topic.priority == Priority.Normal
# highest topic updated by highest intent
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "PizzaIntent"
request.intent_priority = Priority.Highest
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
assert context.topic.priority == Priority.Highest - 1
# next message (not updated by lower than highest)
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "SobaIntent"
request.intent_priority = Priority.Highest - 1
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
assert context.topic.priority == Priority.Highest - 1
# last message (updated by highest)
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "SobaIntent"
request.intent_priority = Priority.Highest
ds = dr.route(request, context, None)
assert ds is SobaDialogService
assert context.topic.priority == Priority.Highest - 1
# no intent
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
ds = dr.route(request, context, None)
assert ds is dr.default_dialog_service
# unknown intent
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "UnknownIntent"
ds = dr.route(request, context, None)
assert ds is dr.default_dialog_service
# dialog for intent not registered
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "NotRegisteredIntent"
ds = dr.route(request, context, None)
assert ds is DialogService
# update topic by higher priority intent
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "PizzaIntent"
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
context.topic.keep_on = True
# intent continue without intent
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
context.topic.keep_on = True
# soba intent with normal priority (not updated)
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "SobaIntent"
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
context.topic.keep_on = True
# soba intent with higher priority (updated)
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "SobaIntent"
request.intent_priority = Priority.High
ds = dr.route(request, context, None)
assert ds is SobaDialogService
# update topic by normal priority intent
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "PizzaIntent"
request.intent_priority = Priority.Low
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
context.topic.keep_on = True
# intent continue without intent
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
ds = dr.route(request, context, None)
assert ds is PizzaDialogService
context.topic.keep_on = True
# soba intent with normal priority (updated)
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
request.intent = "SobaIntent"
ds = dr.route(request, context, None)
assert ds is SobaDialogService
def test_handle_exception():
dr = MyDialogRouter(timezone=timezone("Asia/Tokyo"), default_dialog_service=EchoDialogService)
context = Context("TEST", "test_user")
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
ds = dr.handle_exception(request, context, ValueError("test error"), None)
assert isinstance(ds, ErrorDialogService)
assert context.error["exception"] == "test error"
def test_execute():
dr = MyDialogRouter(timezone=timezone("Asia/Tokyo"), default_dialog_service=EchoDialogService)
performance = PerformanceInfo()
# default
context = Context("TEST", "test_user")
context.topic.is_new = True
request = Message(channel="TEST", channel_user_id="test_user", text="Hello")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, dr.default_dialog_service)
# pizza
context = Context("TEST", "test_user")
context.topic.is_new = True
request = Message(channel="TEST", channel_user_id="test_user", text="give me pizza")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, PizzaDialogService)
# continue pizza
request = Message(channel="TEST", channel_user_id="test_user", text="seafood")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, PizzaDialogService)
# soba lower priority (continume pizza)
request = Message(channel="TEST", channel_user_id="test_user", text="lower")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, PizzaDialogService)
# soba higher priority (update to soba)
request = Message(channel="TEST", channel_user_id="test_user", text="give me soba")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, SobaDialogService)
# pizza highest (update pizza)
request = Message(channel="TEST", channel_user_id="test_user", text="highest p")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, PizzaDialogService)
assert context.topic.priority == Priority.Highest - 1
# soba with high priority (continue pizza)
request = Message(channel="TEST", channel_user_id="test_user", text="give me soba")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, PizzaDialogService)
assert context.topic.priority == Priority.Highest - 1
# soba with highest priority (update soba)
request = Message(channel="TEST", channel_user_id="test_user", text="highest s")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, SobaDialogService)
# adhoc
context = Context("TEST", "test_user")
context.topic.is_new = True
request = Message(channel="TEST", channel_user_id="test_user", text="pizza")
# start pizza
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, PizzaDialogService)
request = Message(channel="TEST", channel_user_id="test_user", text="adhoc")
# adhoc
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, AdhocDialogService)
request = Message(channel="TEST", channel_user_id="test_user", text="seafood")
# continue pizza
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, PizzaDialogService)
# no intent
context = Context("TEST", "test_user")
context.topic.is_new = True
request = Message(channel="TEST", channel_user_id="test_user", text="_")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, dr.default_dialog_service)
# unknown
context = Context("TEST", "test_user")
context.topic.is_new = True
request = Message(channel="TEST", channel_user_id="test_user", text="unknown")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, dr.default_dialog_service)
# dialog for intent not registered
context = Context("TEST", "test_user")
context.topic.is_new = True
request = Message(channel="TEST", channel_user_id="test_user", text="not_registered")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, DialogService)
# error
context = Context("TEST", "test_user")
context.topic.is_new = True
request = Message(channel="TEST", channel_user_id="test_user", text="error")
ds = dr.execute(request, context, None, performance)
assert isinstance(ds, ErrorDialogService)
def test_intent_resolver_as_arg():
# init
dr = MyDialogRouter(
timezone=timezone("Asia/Tokyo"),
default_dialog_service=EchoDialogService,
intent_resolver={
"PizzaIntent": PizzaDialogService,
"SobaIntent": SobaDialogService,
})
assert dr.timezone == timezone("Asia/Tokyo")
assert dr.default_dialog_service is EchoDialogService
# route
context = Context("TEST", "test_user")
request = Message(
channel="TEST",
channel_user_id="test_user",
text="Hello",
intent="PizzaIntent")
assert dr.route(request, context, None) is PizzaDialogService
context = Context("TEST", "test_user")
request = Message(
channel="TEST",
channel_user_id="test_user",
text="Hello",
intent="SobaIntent")
assert dr.route(request, context, None) is SobaDialogService
context = Context("TEST", "test_user")
request = Message(
channel="TEST",
channel_user_id="test_user",
text="Hello")
assert dr.route(request, context, None) is EchoDialogService
| 38.857143 | 98 | 0.693529 | 1,367 | 0.100515 | 0 | 0 | 0 | 0 | 0 | 0 | 3,026 | 0.2225 |
1a931fb1a28cc19934bbc31bca302f6be041992e | 4,630 | py | Python | manim2/for_tb_videos/chat.py | tigerking/manim2 | 93e8957e433b8e59acb5a5213a4074ee0125b823 | [
"MIT"
] | null | null | null | manim2/for_tb_videos/chat.py | tigerking/manim2 | 93e8957e433b8e59acb5a5213a4074ee0125b823 | [
"MIT"
] | null | null | null | manim2/for_tb_videos/chat.py | tigerking/manim2 | 93e8957e433b8e59acb5a5213a4074ee0125b823 | [
"MIT"
] | null | null | null | from manim2.imports import *
'''
Codigo por Miroslav Olsak
https://github.com/mkoconnor/manim
https://www.youtube.com/user/procdalsinazev/feed
'''
class ChatBubble(VMobject):
CONFIG = {
"answer_color":GREEN_B,
"response_color":BLUE_B,
"background_chat_opacity":0.95,
"background_chat_color":ORANGE,
"match_background_color":True,
"text_color":WHITE,
"answer_message_t":False,
"response_message_t":False,
"type_text":"Tex",
"font_text":"Arial",
}
def __init__(self, text, answer_bubble = False, border = 0.3,**kwargs):
VMobject.__init__(self, **kwargs)
self.answer_bubble = answer_bubble
self.bubble = SVGMobject(file_name = "mix/Speach.svg",
initial_scale_factor = 0.02,
unpack_groups= True
)[0]
#self.bubble.set_fill(BLACK, opacity = background_chat_opacity)
if answer_bubble: self.bubble.set_stroke(self.answer_color)
else: self.bubble.set_stroke(self.response_color)
if self.match_background_color:
if answer_bubble: self.bubble.set_fill(self.answer_color,opacity = self.background_chat_opacity)
else: self.bubble.set_fill(self.response_color,opacity = self.background_chat_opacity)
else:
self.bubble.set_fill(self.background_chat_color, opacity = self.background_chat_opacity)
if self.type_text == "Tex":
self.text = Text(text,alignment="\\flushleft",color=self.text_color)
else:
self.text = FontText(text,font=self.font_text,color=self.text_color)
self.text.set_height(0.3621)
#print(self.text.get_tex_string())
self.tip_h = self.bubble.points[16,1] - self.bubble.points[20,1]
self.text.move_to(self.bubble.get_corner(LEFT+DOWN)+np.array([border,self.tip_h+border,0]), aligned_edge = LEFT+DOWN)
size_shift = self.text.get_corner(UP+RIGHT) - self.bubble.get_corner(UP+RIGHT) + border
shift_w = size_shift[0]
shift_h = size_shift[1]
if shift_w < -3.9: shift_w = -3.9
if shift_h < -0.5: shift_h = -0.5
for p in self.bubble.points[26:]: p[0] += shift_w
for p in self.bubble.points[35:]: p[1] += shift_h
for p in self.bubble.points[:5]: p[1] += shift_h
#for p in self.bubble.points[-36:13]: p[1] += shift_h
self.add(self.bubble, self.text)
if answer_bubble:
self.center()
self.bubble.scale([-1,1,1])
self.to_corner(RIGHT+DOWN)
else:
self.to_corner(LEFT+DOWN)
def stretch_rate_func(f):
f0 = f(0)
f1 = f(1)
def result(t):
return (f(t)-f0)/(f1-f0)
return result
class Conversation:
def __init__(self, scene, start_answer = True,
answer_color=GREEN_B,response_color=BLUE_B,
background_chat_opacity=0.95,
match_background_color=True,
background_chat_color=BLACK,
answer_message=False,
response_message=False,
text_color=BLACK,**kwargs):
self.a_c=answer_color
self.r_c=response_color
self.b_c_o=background_chat_opacity
self.m_b_c=match_background_color
self.t_c=text_color
self.b_c=background_chat_color
self.scene = scene
self.dialog = VGroup()
self.next_answer = start_answer
self.ad=answer_message
self.rd=response_message
def add_bubble(self, text, answer_bubble = True,**kwargs):
#ChatBubble.__init__(self, text,**kwargs)
bubble = ChatBubble(text, self.next_answer,
answer_color=self.a_c,
response_color=self.r_c,
background_chat_opacity=self.b_c_o,
match_background_color=self.m_b_c,
text_color=self.t_c,
background_chat_color=self.b_c,
answer_message_t=self.ad,
response_message_t=self.rd,
**kwargs)
self.next_answer = not self.next_answer
height = bubble.get_height()
shift = height - bubble.tip_h + 0.2
dialog_target = self.dialog.copy()
dialog_target.shift([0, shift, 0])
bubble[0].set_stroke(None,0)
#bubble.set_fill(opacity=0.7)
#bubble.set_fill(bubble[0].get_color())
bubble_target = bubble.copy()
bubble.scale([1, 0, 1], about_point = np.array([0, -4.0, 0]))
def dialog_rate_func(t):
bubble_rate = rush_from(t)
bubble_rel_pos = (bubble_rate - 1) * height / shift + 1
return np.exp(bubble_rel_pos-1)
if bubble.answer_bubble:
self.scene.add_sound("efectos_sonido/send_message")
else:
self.scene.add_sound("efectos_sonido/notification1",gain=-20)
self.scene.play(Transform(self.dialog, dialog_target, rate_func = bezier([0, 0, 0.5, 1]),run_time=0.3),
Transform(bubble, bubble_target, rate_func = bezier([0, 0, 0.5, 1]),run_time=0.3),
)
'''
else:
self.scene.play(Transform(self.dialog, dialog_target, rate_func = stretch_rate_func(dialog_rate_func)),
Transform(bubble, bubble_target, rate_func = rush_from))
'''
self.dialog.add(bubble)
#self.scene.add(bubble)
| 34.044118 | 119 | 0.714687 | 4,354 | 0.940389 | 0 | 0 | 0 | 0 | 0 | 0 | 877 | 0.189417 |
1a937cd4db745f3c78096f89dfe2d4c82f2acf4d | 1,065 | py | Python | scripts/tests.py | hugoren/schedule_client | e5746e9c3e452819ca8e6e38686302bb8d8b6ead | [
"MIT"
] | null | null | null | scripts/tests.py | hugoren/schedule_client | e5746e9c3e452819ca8e6e38686302bb8d8b6ead | [
"MIT"
] | null | null | null | scripts/tests.py | hugoren/schedule_client | e5746e9c3e452819ca8e6e38686302bb8d8b6ead | [
"MIT"
] | null | null | null | import requests
import json
def file_sync(target, file_name):
token = 'Schedule0350c8c75ddcd9fafdaA9738df4c9346bec48dc9c4915'
url = 'http://127.0.0.1:10011/api/v1/schedule/file_sync/'
data = {"target": target, "file_name": file_name}
r = requests.get(url, data=json.dumps(data),
headers={'Content-Type': 'application/json', 'token': token}).json()
print(r)
def remote_command():
target = '127.0.0.1'
command = 'command'
script_name = 'tests'
args = ('a', 'b')
kwargs = {"a": "a"}
token = 'Schedule0350c8c75ddcd9fafdaA9738df4c9346bec48dc9c4915'
url = 'http://127.0.0.1:10011/api/v1/schedule/command/'
data = {"fun_name": 'func', "command": command, "target": target, "script_name": script_name,
"args": args
}
r = requests.get(url, data=json.dumps(data),
headers={'Content-Type': 'application/json', 'token': token}).json()
print(r)
if __name__ == '__main__':
# remote_command()
file_sync(target="127.0.0.1", file_name="tests.py")
| 29.583333 | 97 | 0.622535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 447 | 0.419718 |
1a97bc0b24e4089561565f310ab2f569c2a887da | 985 | py | Python | projectroles/management/commands/syncgroups.py | holtgrewe/sodar_core | 116c5c8abc1dea483a640ba68af6d5cf4d27c8d7 | [
"MIT"
] | null | null | null | projectroles/management/commands/syncgroups.py | holtgrewe/sodar_core | 116c5c8abc1dea483a640ba68af6d5cf4d27c8d7 | [
"MIT"
] | null | null | null | projectroles/management/commands/syncgroups.py | holtgrewe/sodar_core | 116c5c8abc1dea483a640ba68af6d5cf4d27c8d7 | [
"MIT"
] | null | null | null | import logging
from django.contrib import auth
from django.core.management.base import BaseCommand
from django.db import transaction
from projectroles.utils import set_user_group
User = auth.get_user_model()
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Synchronizes user groups based on user name'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
logger.info('Synchronizing user groups..')
with transaction.atomic():
for user in User.objects.all():
user.groups.clear()
user.save()
group_name = set_user_group(user)
if group_name:
logger.debug(
'Group set: {} -> {}'.format(user.username, group_name)
)
logger.info(
'Synchronized groups for {} users'.format(
User.objects.all().count()
)
)
| 25.25641 | 79 | 0.581726 | 733 | 0.744162 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.130964 |
1a994d2673e207331e66eaa0919c98e095dffbbc | 169 | py | Python | config/settings/local.py | MattE-J/django-drf | 0471673a544dcf089dac6752689ac2ea7869e633 | [
"MIT"
] | null | null | null | config/settings/local.py | MattE-J/django-drf | 0471673a544dcf089dac6752689ac2ea7869e633 | [
"MIT"
] | null | null | null | config/settings/local.py | MattE-J/django-drf | 0471673a544dcf089dac6752689ac2ea7869e633 | [
"MIT"
] | null | null | null | from .base import *
SECRET_KEY = env('DJANGO_SECRET_KEY' , default='f8$t@ah-8n9fh%1@(&^or#8((8x#+h0^6!#spyf6egk1gs!si7')
DEBUG = env.bool('DJANGO_DEBUG', default=True) | 33.8 | 100 | 0.704142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.502959 |
1a99874718e166ff8a90d8a9ae7cc2d62d45ee11 | 6,667 | py | Python | morphablegraphs/constraints/spatial_constraints/splines/segment_list.py | dfki-asr/morphablegraphs | 02c77aab72aa4b58f4067c720f5d124f0be3ea80 | [
"MIT"
] | 5 | 2020-03-03T21:07:01.000Z | 2021-05-12T16:59:28.000Z | morphablegraphs/constraints/spatial_constraints/splines/segment_list.py | dfki-asr/morphablegraphs | 02c77aab72aa4b58f4067c720f5d124f0be3ea80 | [
"MIT"
] | null | null | null | morphablegraphs/constraints/spatial_constraints/splines/segment_list.py | dfki-asr/morphablegraphs | 02c77aab72aa4b58f4067c720f5d124f0be3ea80 | [
"MIT"
] | 1 | 2020-07-20T06:57:08.000Z | 2020-07-20T06:57:08.000Z | #!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import heapq
import numpy as np
from math import sqrt
from .spline_segment import SplineSegment
class SegmentList(object):
def __init__(self, closest_point_search_accuracy=0.001, closest_point_search_max_iterations=5000, segments=None):
self.segments = segments
self.closest_point_search_accuracy = closest_point_search_accuracy
self.closest_point_search_max_iterations = closest_point_search_max_iterations
def construct_from_spline(self, spline, min_arc_length=0, max_arc_length=-1, granularity=1000):
""" Constructs line segments out of the evualated points
with the given granularity
Returns
-------
* segments : list of tuples
Each entry defines a line segment and contains
start,center and end points
Returns
-------
True if successful else if not
"""
points = []
step_size = 1.0 / granularity
if max_arc_length <= 0:
max_arc_length = spline.full_arc_length
if abs(min_arc_length-max_arc_length) > step_size:
u = 0
while u <= 1.0:
arc_length = spline.get_absolute_arc_length(u)
# TODO make more efficient by looking up min_u
if arc_length >= min_arc_length and arc_length <= max_arc_length:
point = spline.query_point_by_parameter(u)
points.append(point)
u += step_size
self.segments = []
index = 0
while index < len(points) - 1:
start = np.array(points[index])
end = np.array(points[index + 1])
center = 0.5 * (end - start) + start
segment = SplineSegment(start, center, end)
self.segments.append(segment)
index += 1
return index > 0
else:
return False
def find_closest_point(self, point):
if self.segments is None or len(self.segments) == 0:
return None, -1
candidates = self.find_two_closest_segments(point)
if len(candidates) >= 2:
closest_point_1, distance_1 = self._find_closest_point_on_segment(candidates[0][1], point)
closest_point_2, distance_2 = self._find_closest_point_on_segment(candidates[1][1], point)
if distance_1 < distance_2:
return closest_point_1, distance_1
else:
return closest_point_2, distance_2
elif len(candidates) == 1:
closest_point, distance = self._find_closest_point_on_segment(candidates[0][1], point)
return closest_point, distance
def find_closest_segment(self, point):
"""
Returns
-------
* closest_segment : Tuple
Defines line segment. Contains start,center and end
* min_distance : float
distance to this segments center
"""
closest_segment = None
min_distance = np.inf
for s in self.segments:
distance = np.linalg.norm(s.center-point)
if distance < min_distance:
closest_segment = s
min_distance = distance
return closest_segment, min_distance
def find_two_closest_segments(self, point):
""" Ueses a heap queue to find the two closest segments
Returns
-------
* closest_segments : List of Tuples
distance to the segment center
Defineiation of a line segment. Contains start,center and end points
"""
heap = [] # heap queue
idx = 0
while idx < len(self.segments):
distance = np.linalg.norm(self.segments[idx].center-point)
# print point,distance,segments[index]
# #Push the value item onto the heap, maintaining the heap invariant.
heapq.heappush(heap, (distance, idx))
idx += 1
closest_segments = []
count = 0
while idx-count > 0 and count < 2:
distance, index = heapq.heappop(heap)
segment = (distance, self.segments[index])
closest_segments.append(segment)
count += 1
return closest_segments
def _find_closest_point_on_segment(self, segment, point):
""" Find closest point by dividing the segment until the
difference in the distance gets smaller than the accuracy
Returns
-------
* closest_point : np.ndarray
point on the spline
* distance : float
distance to input point
"""
segment_length = np.inf
distance = np.inf
segment_list = SegmentList(self.closest_point_search_accuracy, self.closest_point_search_max_iterations, segment.divide())
iteration = 0
while segment_length > self.closest_point_search_accuracy and distance > self.closest_point_search_accuracy and iteration < self.closest_point_search_max_iterations:
closest_segment, distance = segment_list.find_closest_segment(point)
segment_length = np.linalg.norm(closest_segment.end-closest_segment.start)
segment_list = SegmentList(self.closest_point_search_accuracy, self.closest_point_search_max_iterations, closest_segment.divide())
iteration += 1
closest_point = closest_segment.center # extract center of closest segment
return closest_point, distance
| 43.292208 | 177 | 0.637318 | 5,457 | 0.818509 | 0 | 0 | 0 | 0 | 0 | 0 | 2,472 | 0.370781 |
1a9a080b7593d8c6b46cbb28b3da39887e1daca7 | 5,973 | py | Python | edera/exceptions.py | thoughteer/edera | c4ddb5d8a25906c3bd773c91afb3260fc0b704f2 | [
"MIT"
] | 3 | 2018-11-27T15:45:19.000Z | 2018-12-21T20:32:10.000Z | edera/exceptions.py | thoughteer/edera | c4ddb5d8a25906c3bd773c91afb3260fc0b704f2 | [
"MIT"
] | 18 | 2018-12-02T18:38:59.000Z | 2020-02-05T22:09:37.000Z | edera/exceptions.py | thoughteer/edera | c4ddb5d8a25906c3bd773c91afb3260fc0b704f2 | [
"MIT"
] | null | null | null | """
This module declares all custom exception classes.
"""
import edera.helpers
class Error(Exception):
"""
The base class for all exceptions within Edera.
"""
class ExcusableError(Error):
"""
The base class for all "excusable" errors.
They barely deserve a warning.
"""
class UndefinedParameterError(Error):
"""
You did not pass a required parameter.
"""
def __init__(self, name):
"""
Args:
name (String) - the name of the parameter
"""
Error.__init__(self, "parameter `%s` is undefined" % name)
class UnknownParameterError(Error):
"""
You passed an unknown parameter.
"""
def __init__(self, name):
"""
Args:
name (String) - the name of the parameter
"""
Error.__init__(self, "passed unknown parameter `%s`" % name)
class ValueQualificationError(Error):
"""
You passed an invalid value the a qualifier.
"""
def __init__(self, value, explanation):
"""
Args:
value (Any) - the invalid value
explanation (String) - what's gone wrong
"""
Error.__init__(self, "value %r was not qualified: %s" % (value, explanation))
class LockAcquisitionError(ExcusableError):
"""
We failed to acquire a lock for a key.
"""
def __init__(self, key):
"""
Args:
key (String) - the key
"""
ExcusableError.__init__(self, "lock for key `%s` has been already acquired" % key)
class LockRetentionError(ExcusableError):
"""
We failed to retain a lock.
"""
def __init__(self, key):
"""
Args:
key (String) - the key of the lock
"""
ExcusableError.__init__(self, "lock for key `%s` was lost" % key)
class RequisiteConformationError(Error):
"""
You used an invalid object as a requisite for a task.
"""
def __init__(self, requisite):
"""
Args:
requisite (Any) - the invalid object
"""
Error.__init__(self, "cannot conform %r" % requisite)
class CircularDependencyError(Error):
"""
You provided a graph with a cycle.
Attributes:
cycle (List[Any]) - the detected cycle
"""
def __init__(self, cycle):
"""
Args:
cycle (List[Any]) - the detected cycle
"""
message = "circular dependency detected: %s ..." % edera.helpers.render(cycle)
Error.__init__(self, message)
self.cycle = cycle
class TargetVerificationError(Error):
"""
Execution of your task didn't make its target come true.
Attributes:
task (Task) - the broken task
"""
def __init__(self, task):
"""
Args:
task (Task) - the broken task
"""
message = "target %r of task %r is false after execution" % (task.target, task)
Error.__init__(self, message)
self.task = task
class InvocationError(Error):
"""
The base class for all invocation errors within invokers.
"""
class ExcusableInvocationError(ExcusableError):
"""
The base class for all excusable invocation errors within invokers.
"""
class MasterSlaveInvocationError(InvocationError):
"""
Some of the slave workers failed for some reason.
Attributes:
failed_slaves (List[Worker]) - failed slave workers
"""
def __init__(self, failed_slaves):
"""
Args:
failed_slaves (List[Worker]) - failed slave workers
"""
message = "some of the slaves failed: %s" % edera.helpers.render(failed_slaves)
InvocationError.__init__(self, message)
self.failed_slaves = failed_slaves
class ExcusableMasterSlaveInvocationError(ExcusableInvocationError):
"""
Some of the slave workers stopped for some good reason.
Attributes:
stopped_slaves (List[Worker]) - stopped slave workers
"""
def __init__(self, stopped_slaves):
"""
Args:
stopped_slaves (List[Worker]) - stopped slave workers
"""
message = "some of the slaves stopped: %s" % edera.helpers.render(stopped_slaves)
ExcusableInvocationError.__init__(self, message)
self.stopped_slaves = stopped_slaves
class WorkflowExecutionError(Error):
"""
There were inexcusable errors during execution.
Attributes:
failed_tasks (List[Task]) - failed tasks
"""
def __init__(self, failed_tasks):
"""
Args:
failed_tasks (List[Task]) - failed tasks
"""
message = "some of the tasks failed: %s" % edera.helpers.render(failed_tasks)
Error.__init__(self, message)
self.failed_tasks = failed_tasks
class ExcusableWorkflowExecutionError(ExcusableError):
"""
There were excusable (and no inexcusable) errors during execution.
Attributes:
stopped_tasks (List[Task]) - stopped tasks
"""
def __init__(self, stopped_tasks):
"""
Args:
stopped_tasks (List[Task]) - stopped tasks
"""
message = "some of the tasks stopped: %s" % edera.helpers.render(stopped_tasks)
ExcusableError.__init__(self, message)
self.stopped_tasks = stopped_tasks
class WorkflowNormalizationError(Error):
"""
You described a workflow that cannot be normalized.
"""
class WorkflowTestificationError(Error):
"""
You provided an invalid test/stub definition.
"""
class StorageOperationError(Error):
"""
We failed to operate your storage.
"""
class MonitorInconsistencyError(ExcusableError):
"""
A checkpoint is no longer valid.
This might happen when you run multiple instances of $MonitorWatcher at the same time.
But it's totally fine, should be ignored.
"""
class ConsumptionError(Error):
"""
The consumer could not accept an element.
"""
| 23.702381 | 90 | 0.612925 | 5,829 | 0.975892 | 0 | 0 | 0 | 0 | 0 | 0 | 3,340 | 0.559183 |
1a9b95beef4372766d5b6cf6a163695415727640 | 1,153 | py | Python | src/lib/todo_classes.py | louisroyer/todopy | 8aef035bc82b13a8053394e8942c34de72fae3bf | [
"CC0-1.0"
] | null | null | null | src/lib/todo_classes.py | louisroyer/todopy | 8aef035bc82b13a8053394e8942c34de72fae3bf | [
"CC0-1.0"
] | 2 | 2020-09-01T12:32:25.000Z | 2020-09-01T12:33:11.000Z | src/lib/todo_classes.py | louisroyer/todopy | 8aef035bc82b13a8053394e8942c34de72fae3bf | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Classes for todo files.'''
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
if __debug__:
if __package__:
from . import todo_parser as _todo_parser
else:
import todo_parser as _todo_parser
if __name__ != '__main__':
__author__ = 'Louis Royer'
__credits__ = '🄯 2018, Louis Royer - CC0-1.0'
__date__ = '2018-09-15'
__version__ = '0.0.1'
class Task:
def __init__(self, title: str, filename: str, status):
assert status in _todo_parser.TASK_STATUS, 'Invalid status'
self._title = title
self._filename = filename
self._status = status
self._updated_status = False
@property
def title(self):
'''Task title.'''
return _title
@property
def filename(self):
'''Filename where task was written.'''
return _filename
@property
def status(self):
'''Task status.'''
return _status
@status.setter
def status(self, value):
assert status in STATUS, 'Invalid status'
self._updated_status = True
self._status = value
| 24.020833 | 67 | 0.61405 | 717 | 0.620242 | 0 | 0 | 422 | 0.365052 | 0 | 0 | 308 | 0.266436 |
1a9bab58f096f85858252ac66383ad075450f3a8 | 82 | py | Python | qmmm_neuralnets/files/__init__.py | adamduster/qmmm_neuralnets | 70f35ec0659e8a424cb66ad874d22232c22fcba5 | [
"MIT"
] | null | null | null | qmmm_neuralnets/files/__init__.py | adamduster/qmmm_neuralnets | 70f35ec0659e8a424cb66ad874d22232c22fcba5 | [
"MIT"
] | 1 | 2021-09-17T18:19:48.000Z | 2021-09-17T18:19:48.000Z | qmmm_neuralnets/files/__init__.py | lin-compchem/qmmm_neuralnets | 70f35ec0659e8a424cb66ad874d22232c22fcba5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
"""
from .bpsf_keys import *
from .h5_file_ops import * | 16.4 | 26 | 0.670732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.353659 |
1a9c06b409cec023288fe5de8610286e3d8638d4 | 4,347 | py | Python | attendees/persons/admin.py | xjlin0/attendees | 3c337ee68c00f17cbbbe26f2e33131e57850e4ed | [
"MIT"
] | 1 | 2020-03-26T00:42:04.000Z | 2020-03-26T00:42:04.000Z | attendees/persons/admin.py | xjlin0/attendees | 3c337ee68c00f17cbbbe26f2e33131e57850e4ed | [
"MIT"
] | null | null | null | attendees/persons/admin.py | xjlin0/attendees | 3c337ee68c00f17cbbbe26f2e33131e57850e4ed | [
"MIT"
] | null | null | null | from django_summernote.admin import SummernoteModelAdmin
from django.contrib.postgres import fields
from django_json_widget.widgets import JSONEditorWidget
from django.contrib import admin
from attendees.occasions.models import *
from attendees.whereabouts.models import *
from .models import *
# Register your models here.
class AttendeeAddressInline(admin.StackedInline):
model = AttendeeAddress
extra = 0
class AttendingMeetInline(admin.StackedInline):
model = AttendingMeet
extra = 0
class RelationshipInline(admin.TabularInline):
model = Relationship
fk_name = 'from_attendee'
extra = 0
class FamilyAttendeeInline(admin.TabularInline):
model = FamilyAttendee
extra = 0
class CategoryAdmin(admin.ModelAdmin):
readonly_fields = ['id', 'created', 'modified']
prepopulated_fields = {"slug": ("display_name",)}
list_display = ('id', 'display_name', 'slug', 'display_order', 'description', 'modified')
class FamilyAdmin(admin.ModelAdmin):
readonly_fields = ['id', 'created', 'modified']
inlines = (FamilyAttendeeInline,)
list_display_links = ('display_name',)
list_display = ('id', 'display_name', 'display_order', 'modified')
fieldsets = (
(None, {"fields": (tuple(['display_name', 'display_order']),
tuple(['id', 'created', 'modified']),
), }),
)
class FamilyAttendeeAdmin(admin.ModelAdmin):
readonly_fields = ['id', 'created', 'modified']
list_display = ('id', 'family', 'attendee', 'role', 'modified')
class RelationAdmin(admin.ModelAdmin):
readonly_fields = ['id', 'created', 'modified']
list_display_links = ('title',)
list_display = ('id', 'title', 'reciprocal_ids', 'emergency_contact', 'scheduler', 'relative', 'display_order')
class AttendeeAdmin(admin.ModelAdmin):
formfield_overrides = {
fields.JSONField: {'widget': JSONEditorWidget},
}
search_fields = ('first_name', 'last_name', 'last_name2', 'first_name2')
readonly_fields = ['id', 'created', 'modified']
inlines = (AttendeeAddressInline, RelationshipInline)
list_display_links = ('last_name',)
list_display = ('id', 'first_name', 'last_name', 'last_name2', 'first_name2', 'progressions', 'infos')
class RegistrationAdmin(admin.ModelAdmin):
formfield_overrides = {
fields.JSONField: {'widget': JSONEditorWidget},
}
list_display_links = ('main_attendee',)
list_display = ('id', 'main_attendee', 'assembly', 'infos', 'modified')
class AttendanceInline(admin.StackedInline):
model = Attendance
extra = 0
class AttendingAdmin(admin.ModelAdmin):
formfield_overrides = {
fields.JSONField: {'widget': JSONEditorWidget},
}
search_fields = ('attendee__first_name', 'attendee__last_name', 'attendee__first_name2', 'attendee__last_name2')
list_display_links = ('attendee',)
readonly_fields = ['id', 'created', 'modified']
inlines = (AttendingMeetInline,) # add AttendanceInline when creating new Attending will fails on meet_names
list_display = ('id', 'registration', 'attendee', 'meet_names', 'finish', 'infos')
class NoteAdmin(SummernoteModelAdmin):
summernote_fields = ('body',)
readonly_fields = ['id', 'created', 'modified']
list_display = ('body', 'content_type', 'object_id', 'content_object', 'display_order', 'modified')
class RelationshipAdmin(admin.ModelAdmin):
list_display_links = ('relation',)
readonly_fields = ['id', 'created', 'modified']
list_display = ('id', 'from_attendee', 'relation', 'to_attendee', 'emergency_contact', 'scheduler', 'in_family', 'finish')
class AttendingMeetAdmin(admin.ModelAdmin):
list_display_links = ('attending',)
readonly_fields = ['id', 'created', 'modified']
list_display = ('id', 'attending', 'meet', 'character', 'category', 'modified')
admin.site.register(Category, CategoryAdmin)
admin.site.register(Note, NoteAdmin)
admin.site.register(Family, FamilyAdmin)
admin.site.register(Attendee, AttendeeAdmin)
admin.site.register(FamilyAttendee, FamilyAttendeeAdmin)
admin.site.register(Registration, RegistrationAdmin)
admin.site.register(Attending, AttendingAdmin)
admin.site.register(Relation, RelationAdmin)
admin.site.register(Relationship, RelationshipAdmin)
admin.site.register(AttendingMeet, AttendingMeetAdmin)
| 34.776 | 126 | 0.707844 | 3,497 | 0.804463 | 0 | 0 | 0 | 0 | 0 | 0 | 1,263 | 0.290545 |
1a9cdff5db190924381f2d1339b1a86c96487d13 | 27,929 | py | Python | AnalysisDAFM/program/DAFM/dafm.py | rcmurray/WorkflowComponents | 368f8132ce5bef3ac3fb9d8e1a13a08f91764cfa | [
"BSD-4-Clause-UC"
] | 26 | 2016-09-14T18:15:29.000Z | 2021-12-01T20:01:29.000Z | AnalysisDAFM/program/DAFM/dafm.py | rcmurray/WorkflowComponents | 368f8132ce5bef3ac3fb9d8e1a13a08f91764cfa | [
"BSD-4-Clause-UC"
] | 122 | 2017-09-12T15:22:03.000Z | 2021-09-27T17:25:08.000Z | AnalysisDAFM/program/DAFM/dafm.py | rcmurray/WorkflowComponents | 368f8132ce5bef3ac3fb9d8e1a13a08f91764cfa | [
"BSD-4-Clause-UC"
] | 17 | 2017-02-16T16:04:41.000Z | 2021-02-06T05:36:44.000Z | from sklearn.metrics import mean_squared_error, log_loss
from keras.models import Model
from keras.models import load_model
from keras.layers import Input, Dense
from keras.layers.recurrent import SimpleRNN
from keras.layers.merge import multiply, concatenate, add
from keras import backend as K
from keras import initializers
from keras.callbacks import EarlyStopping
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import Callback
from keras import optimizers
import pandas as pd
import numpy as np
from keras.constraints import max_norm, non_neg, unit_norm
np.random.seed(42)
from math import sqrt
import os
import sys
from collections import defaultdict
class DeepAFM:
def __init__(self):
pass
def custom_bce(self, y_true, y_pred):
b = K.not_equal(y_true, -K.ones_like(y_true))
b = K.cast(b, dtype='float32')
ans = K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1) * K.mean(b, axis=-1)
ans = K.cast(ans, dtype='float32')
return K.sum(ans)
def custom_activation(self, x):
if self.activation.split('-')[0] == "custom":
a = float(self.activation.split('-')[1])
return 1.0 / ( 1 + K.exp(-a*x) )
elif self.activation.split('-')[0] == "rounded":
K.minimum(K.maximum(K.round(K.sigmoid(x)), 0), 1)
def custom_init(self, shape, dtype=None):
return K.cast_to_floatx(self.Q_jk_initialize)
def custom_random(self, shape, dtype=None):
if self.random_init == "normal":
return K.random_normal(shape, 0.5, 0.05, dtype=dtype, seed=22)
else:
return K.random_uniform(shape, 0, 1, dtype=dtype, seed=22)
def f(self, x):
def custom_init(shape, dtype=None):
return K.cast_to_floatx(np.reshape(x, shape))
return custom_init
def build(self, dafm_type="dafm-afm", optimizer="rmsprop", learning_rate=0.01, activation="linear", Q_jk_initialize=0, section="", section_count=0, model1="", stateful=False, theta_student="False", student_count=0, binary="False"):
skills = np.shape(Q_jk_initialize)[1]
steps = np.shape(Q_jk_initialize)[0]
self.activation = activation
if '-' in self.activation:
activation = self.custom_activation
if dafm_type.split("_")[-1] == "different":
skills = int( float(dafm_type.split("_")[-2])*skills )
dafm_type = dafm_type.split('_')[0]
if dafm_type.split("_")[0] == "round-fine-tuned":
try:
self.round_threshold = float(dafm_type.split("_")[-1])
dafm_type = dafm_type.split("_")[0]
except:
pass
q_jk_size = skills
if '^' in dafm_type:
q_jk_size = skills
skills = int (float(dafm_type.split('^')[-1]) * skills)
dafm_type = dafm_type.split('^')[0]
self.dafm_type = dafm_type
if dafm_type == "random-uniform" or dafm_type == "random-normal":
qtrainable, finetuning, randomize = True, False, True
self.random_init = dafm_type.split('-')[-1]
elif dafm_type == "dafm-afm":
qtrainable, finetuning, randomize = False, False, False
elif dafm_type == "fine-tuned":
qtrainable, finetuning, randomize = True, True, False
elif dafm_type == "kcinitialize":
qtrainable, finetuning, randomize = True, False, False
elif dafm_type== "round-fine-tuned":
# if not self.round_threshold == -1:
# rounded_Qjk = np.abs(Q_jk1 - Q_jk_initialize)
# Q_jk1[rounded_Qjk <= self.round_threshold] = Q_jk_initialize[rounded_Qjk <= self.round_threshold]
# Q_jk1[rounded_Qjk > self.round_threshold] = np.ones(np.shape(Q_jk_initialize[rounded_Qjk > self.round_threshold])) - Q_jk_initialize[rounded_Qjk > self.round_threshold]
# else:
Q_jk1 = model1.get_layer("Q_jk").get_weights()[0]
Q_jk1 = np.minimum(np.ones(np.shape(Q_jk1)), np.maximum(np.round(Q_jk1), np.zeros(np.shape(Q_jk1))))
model1.get_layer("Q_jk").set_weights([Q_jk1])
return model1
elif dafm_type == "qjk-dense":
qtrainable, finetuning, randomize = False, False, False
activation_dense = activation
elif dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
qtrainable, finetuning, randomize = False, False, True
self.random_init = dafm_type.split('-')[-1]
activation_dense = activation
else:
print ("No Valid Model Found")
sys.exit()
if section == "onehot":
section_input = Input(batch_shape=(None, None, section_count), name='section_input')
if not theta_student=="False":
student_input = Input(batch_shape=(None, None, student_count), name='student_input')
virtual_input1 = Input(batch_shape=(None, None, 1), name='virtual_input1')
if finetuning:
B_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("B_k").get_weights()[0]), use_bias=False), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("T_k").get_weights()[0]), use_bias=False), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=self.f(model1.get_layer("bias").get_weights()[0]), trainable=True), name="bias")(virtual_input1)
else:
B_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=initializers.Zeros(), trainable=True), name="bias")(virtual_input1)
step_input = Input(batch_shape=(None, None, steps), name='step_input')
if randomize:
if binary=="False":
Q_jk = TimeDistributed(Dense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random), trainable=qtrainable ,name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random),trainable=qtrainable, name="Q_jk")(step_input)
else:
if binary=="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize), use_bias=False,trainable=qtrainable), trainable=qtrainable, name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize),trainable=qtrainable,
use_bias=False), name="Q_jk", trainable=qtrainable)(step_input)
if dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
if binary =="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
elif dafm_type == "qjk-dense":
if binary =='False':
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
pass
Qjk_mul_Bk = multiply([Q_jk, B_k])
sum_Qjk_Bk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False,name="sum_Qjk_Bk")(Qjk_mul_Bk)
P_k = SimpleRNN(skills, kernel_initializer=initializers.Identity(), recurrent_initializer=initializers.Identity() , use_bias=False, trainable=False, activation='linear', return_sequences=True, name="P_k")(Q_jk)
Qjk_mul_Pk_mul_Tk = multiply([Q_jk, P_k, T_k])
sum_Qjk_Pk_Tk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False),trainable=False, name="sum_Qjk_Pk_Tk")(Qjk_mul_Pk_mul_Tk)
Concatenate = concatenate([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])
if not (theta_student=="False"):
if finetuning:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("theta").get_weights()[0])), name='theta')(student_input)
else:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='theta')(student_input)
Concatenate = concatenate([Concatenate, theta])
if section == "onehot":
if finetuning:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("S_k").get_weights()[0])), name='S_k')(section_input)
else:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='S_k')(section_input)
Concatenate = concatenate([Concatenate, S_k])
output = TimeDistributed(Dense(1, activation="sigmoid", trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False, name="output")(Concatenate)
if section == "onehot" and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, section_input, student_input], outputs=output)
elif section == "onehot" and theta_student=="False":
model = Model(inputs=[virtual_input1, step_input, section_input], outputs=output)
elif not (section == "onehot") and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, student_input], outputs=output)
else:
model = Model(inputs=[virtual_input1, step_input], outputs=output)
d_optimizer = {"rmsprop":optimizers.RMSprop(lr=learning_rate), "adam":optimizers.Adam(lr=learning_rate), "adagrad":optimizers.Adagrad(lr=learning_rate) }
model.compile( optimizer = d_optimizer[optimizer],
loss = self.custom_bce)
return model
def fit(self, x_train, y_train, x_train_section, x_train_student, x_test, y_test, x_test_section, x_test_student, model, epochs=5, batch_size=32, loaded=False, validation=True):
loss_epoch = {"epoch":[], "loss":[], "val_loss":[], 'patience':[]}
print ("Max Epochs", epochs)
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
patience, epoch = 0 , 1
prev_best_val_loss = np.inf
counter = 0
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
virtual_input1_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if not validation:
earlyStopping = EarlyStopping(monitor='loss', patience=2)
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=epochs , callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
# print ("Epoch Number:", counter, "Patience:", 0, "val loss:", current_val_loss)
loss_epoch["loss"].extend(history_callback.history["loss"])
loss_epoch["val_loss"].extend(history_callback.history["loss"])
loss_epoch["epoch"].extend(list(range(epochs)))
loss_epoch["patience"].extend(list(range(epochs)))
best_model = model
epoch = epochs
else:
while (patience <=5 and epoch <= epochs and (not self.dafm_type == "round-fine-tuned") and (loaded == False)):
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
counter += 1
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section], y_test), verbose=0, shuffle=True)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_student], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section, x_test_student], y_test), verbose=0, shuffle=True)
current_val_loss = history_callback.history["val_loss"][0]
print ("Epoch Number:", counter, "Patience:", patience, "val loss:", current_val_loss)
loss_epoch["val_loss"].append(history_callback.history["val_loss"][0])
loss_epoch["loss"].append(history_callback.history["loss"][0])
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
if (prev_best_val_loss - current_val_loss) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_val_loss
else:
patience += 1
if len(x_train_student)==0:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section]), x_train)
else:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_student]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
print ("PARAM", model_param)
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
B_k = best_model.get_layer("B_k").get_weights()[0]
T_k = best_model.get_layer("T_k").get_weights()[0]
return best_model, AIC, BIC, epoch, loss_epoch
def fit_batches(self, dafmdata_obj, model, max_epochs=30, earlyStop="val_loss", loaded=False):
print ("Max Epochs", max_epochs)
loss_epoch = {"epoch":[], "loss":[], earlyStop:[], 'patience':[]}
patience, epoch = 0, 1
prev_best_val_loss = np.inf
counter = 0
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
while (patience <= 2 and epoch <= max_epochs and loaded==False and (not self.dafm_type == "round-fine-tuned")):
counter += 1
current_val_loss = 0
total_loss, total_train_samples = 0, 0
train = dafmdata_obj.data_generator1("train")
test = dafmdata_obj.data_generator1("test")
bc = 0
for x_train, y_train, x_train_section, x_train_student, batch_size in train:
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
print ("Batch Number:", bc, np.shape(x_train))
if len(x_train_student)==0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, verbose=1)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=1)
total_loss += history_callback.history["loss"][0] * len(x_train)
total_train_samples += len(x_train)
bc += 1
if earlyStop == "rmse":
current_avg_rmse = self.predict_batches(dafmdata_obj, model)
loss_epoch["rmse"].append(current_avg_rmse)
else:
current_avg_rmse = np.mean(self.bce_loss_batches(dafmdata_obj, model, utype="test"))
loss_epoch["val_loss"].append(current_avg_rmse)
loss_epoch["loss"].append(float(total_loss)/float(total_train_samples))
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
print ("Epoch Number:", counter, "Patience:", patience, earlyStop, current_avg_rmse, "Loss:", loss_epoch["loss"][-1])
if (prev_best_val_loss - current_avg_rmse) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_avg_rmse
else:
patience += 1
x = self.bce_loss_batches(dafmdata_obj, best_model, utype="train")
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
return best_model, AIC, BIC, epoch, loss_epoch
def L(self, y_true, y_pred, x_train):
mask_matrix = np.sum(x_train, axis=2).flatten()
num_users, max_responses = np.shape(x_train)[0], np.shape(x_train)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
rmse = []
SSR = 0
response = 0
L = 0
N = 0
c = 0
for user in range(num_users):
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
break
if y_pred[i] < 1 and y_pred[i] > 0:
L += ( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) )
else:
c += 1
eps = 1e-4
if y_pred[i] == y_true[i]:
pass
else:
y_pred[i] = max(eps, min(1 - eps, y_pred[i]))
L += ( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) )
response += 1
N += 1
return L, N
def L_batches(self, dafmdata_obj, model):
L = 0
N = 0
train_generator = dafmdata_obj.data_generator1("train")
for x_train, y_train, x_train_section, x_train_student, batch_size in train_generator:
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
if len(x_train_student)==0:
if len(x_train_section) == 0:
l, x= self.L(y_train, model.predict([virtual_input1, x_train]), x_train)
L += l
else:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_section]), x_train)
L += l
else:
if len(x_train_section) == 0:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_student]), x_train)
L += l
else:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L += l
N += len(x_train)
return L, N
def predict(self, x_test, y_test, x_test_section, x_test_student, model, batch_size=32):
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student)==0:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section] , batch_size=batch_size)
else:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section, x_test_student] , batch_size=batch_size)
rmse = self.rmse_masking(y_test, y_pred, x_test)
return rmse
def prediction(self, x_test, x_test_section, x_test_student, model, batch_size=32):
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student)==0:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section] , batch_size=batch_size)
else:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section, x_test_student], batch_size=batch_size)
return y_pred
def predict_batches(self, dafmdata_obj, model):
test_generator = dafmdata_obj.data_generator1("test")
avg_rmse = 0
t_users = 0
for x_test, y_test, x_test_section, x_test_student, batch_size in test_generator:
avg_rmse = avg_rmse + len(x_test)*self.predict(x_test, y_test, x_test_section, x_test_student, model, batch_size)
t_users = t_users + len(x_test)
return avg_rmse/float(t_users)
def bce_loss_batches(self, dafmdata_obj, model, utype="train"):
ll = []
test_generator = dafmdata_obj.data_generator1(utype)
for x_test, y_test, x_test_section, x_test_student, batch_size in test_generator:
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student) == 0:
if len(x_test_section) == 0:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test], batch_size=batch_size), x_test))
else:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_section], batch_size=batch_size), x_test))
else:
if len(x_test_section) == 0:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size), x_test))
else:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_section, x_test_student], batch_size=batch_size), x_test))
return ll
def bce_loss(self, y_true, y_pred, x_test):
mask_matrix = np.sum(x_test, axis=2).flatten()
num_users, max_responses = np.shape(x_test)[0], np.shape(x_test)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
ll = []
response = 0
for user in range(num_users):
log_loss = []
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
break
response += 1
eps = 1e-7
y_pred[i] = max(eps, min(1 - eps, y_pred[i]))
log_loss.append( -( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) ) )
ll.extend(log_loss)
return ll
def rmse_masking(self, y_true, y_pred, x_test):
mask_matrix = np.sum(x_test, axis=2).flatten()
num_users, max_responses = np.shape(x_test)[0], np.shape(x_test)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
rmse = []
for user in range(num_users):
diff_sq, response = 0, 0
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
continue
# continue for response level evaluation
diff_sq += (y_true[i] - y_pred[i]) ** 2
response += 1
rmse.append(sqrt(diff_sq/float(response)))
return np.mean(rmse)
if __name__ == "__main__":
x_train = [ [ [0, 0, 1], [0, 0, 1], [1, 0, 0], [0, 0, 0] ],
[ [1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0] ],
[ [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1] ],
[ [1, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0] ] ]
x_test = [ [ [ 1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1] ] ]
y_test = [ [ [-1], [-1], [-1], [-1] ] ]
y_train = [ [ [0], [0], [1], [-1] ],
[ [1], [0], [1], [-1] ],
[ [0], [0], [0], [0] ],
[ [0], [1], [0], [0] ] ]
Q_jk_initialize = np.random.rand(3,2)
Q_jk_initialize = np.array([[1, 0], [0, 1], [1, 1]])
obj = DAFM(np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test), Q_jk_initialize, skills=2, steps=3)
model = obj.build(qtrainable=False, finetuning=False, loaded=False, dftype="")
obj.predict(np.array(x_test), np.array(y_test), model)
| 54.655577 | 269 | 0.607362 | 26,320 | 0.94239 | 0 | 0 | 0 | 0 | 0 | 0 | 1,868 | 0.066884 |
1a9cfbd4589de28828d34cf4701ed63a665c6003 | 1,423 | py | Python | python/misc/initialize.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | python/misc/initialize.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | 2 | 2022-03-10T03:49:14.000Z | 2022-03-14T00:49:54.000Z | python/misc/initialize.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Initialize.
Turn full names into initials.
Source:
https://edabit.com/challenge/ANsubgd5zPGxov3u8
"""
def __initialize(name: str, period: bool=False) -> str:
"""Turn full name string into a initials string.
Private function used by initialize.
Arguments:
name {[str]} -- Full name to be initialized.
Keyword Arguments:
period {bool} -- Include periods in initials (default: {False})
Returns:
[str] -- Initials string.
"""
if period:
return f"{'.'.join([n[0] for n in name.split(' ')])}."
return ''.join([n[0] for n in name.split(' ')])
def initialize(names: list, **kwargs) ->list:
"""Turn a list of full names into a list of initials.
Arguments:
names {list} -- List of full names, with a space between each name.
Raises:
TypeError -- Check for names is a list.
Returns:
list -- All names initialized.
"""
if isinstance(names, list):
return [__initialize(name.strip(), **kwargs) for name in names if len(name) > 2 and ' ' in name]
else:
raise TypeError('Parameter \'names\' is not a list.')
def main():
"""Run sample initialize function."""
print(initialize(['Peter Parker', 'Steve Rogers', 'Tony Stark']))
print(initialize(
['Bruce Wayne', 'Clark Kent', 'Diana Prince'], period=True))
if __name__ == "__main__":
main()
| 23.716667 | 104 | 0.613493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 925 | 0.650035 |
1a9e3905097491e316cd401df61464d8541b1d60 | 7,038 | py | Python | examples/shapes_from_glsl/cylinder_shape.py | szabolcsdombi/zengl | 2c9c26784285f2f049fb5d6fc9da0ad65d32d52f | [
"MIT"
] | 116 | 2021-10-31T17:24:18.000Z | 2022-02-01T05:47:18.000Z | examples/shapes_from_glsl/cylinder_shape.py | szabolcsdombi/zengl | 2c9c26784285f2f049fb5d6fc9da0ad65d32d52f | [
"MIT"
] | 9 | 2021-11-12T19:21:33.000Z | 2022-01-20T09:48:31.000Z | examples/shapes_from_glsl/cylinder_shape.py | szabolcsdombi/zengl | 2c9c26784285f2f049fb5d6fc9da0ad65d32d52f | [
"MIT"
] | 3 | 2021-11-12T18:55:05.000Z | 2022-01-19T13:58:26.000Z | import zengl
from defaults import defaults
from grid import grid_pipeline
from window import Window
window = Window(1280, 720)
ctx = zengl.context()
image = ctx.image(window.size, 'rgba8unorm', samples=4)
depth = ctx.image(window.size, 'depth24plus', samples=4)
image.clear_value = (0.2, 0.2, 0.2, 1.0)
ctx.includes['defaults'] = defaults
grid = grid_pipeline(ctx, [image, depth])
pipeline = ctx.pipeline(
vertex_shader='''
#version 330
#include "defaults"
vec3 vertices[24] = vec3[](
vec3(0.000000, 1.000000, -0.500000),
vec3(0.000000, 1.000000, 0.500000),
vec3(0.500000, 0.866025, -0.500000),
vec3(0.500000, 0.866025, 0.500000),
vec3(0.866025, 0.500000, -0.500000),
vec3(0.866025, 0.500000, 0.500000),
vec3(1.000000, -0.000000, -0.500000),
vec3(1.000000, -0.000000, 0.500000),
vec3(0.866025, -0.500000, -0.500000),
vec3(0.866025, -0.500000, 0.500000),
vec3(0.500000, -0.866025, -0.500000),
vec3(0.500000, -0.866025, 0.500000),
vec3(-0.000000, -1.000000, -0.500000),
vec3(-0.000000, -1.000000, 0.500000),
vec3(-0.500000, -0.866025, -0.500000),
vec3(-0.500000, -0.866025, 0.500000),
vec3(-0.866025, -0.500000, -0.500000),
vec3(-0.866025, -0.500000, 0.500000),
vec3(-1.000000, 0.000000, -0.500000),
vec3(-1.000000, 0.000000, 0.500000),
vec3(-0.866025, 0.500000, -0.500000),
vec3(-0.866025, 0.500000, 0.500000),
vec3(-0.500000, 0.866025, -0.500000),
vec3(-0.500000, 0.866025, 0.500000)
);
vec3 normals[14] = vec3[](
vec3(-0.0000, 1.0000, -0.0000),
vec3(0.5000, 0.8660, -0.0000),
vec3(0.8660, 0.5000, -0.0000),
vec3(1.0000, -0.0000, -0.0000),
vec3(0.8660, -0.5000, -0.0000),
vec3(0.5000, -0.8660, -0.0000),
vec3(-0.0000, -1.0000, -0.0000),
vec3(-0.5000, -0.8660, -0.0000),
vec3(-0.8660, -0.5000, -0.0000),
vec3(-1.0000, -0.0000, -0.0000),
vec3(-0.8660, 0.5000, -0.0000),
vec3(-0.0000, -0.0000, 1.0000),
vec3(-0.5000, 0.8660, -0.0000),
vec3(-0.0000, -0.0000, -1.0000)
);
vec2 texcoords[50] = vec2[](
vec2(1.000000, 0.500000),
vec2(0.000000, 0.500000),
vec2(0.750000, 0.490000),
vec2(1.000000, 1.000000),
vec2(0.250000, 0.490000),
vec2(0.000000, 1.000000),
vec2(0.916667, 0.500000),
vec2(0.870000, 0.457846),
vec2(0.916667, 1.000000),
vec2(0.370000, 0.457846),
vec2(0.833333, 0.500000),
vec2(0.957846, 0.370000),
vec2(0.833333, 1.000000),
vec2(0.457846, 0.370000),
vec2(0.750000, 0.500000),
vec2(0.990000, 0.250000),
vec2(0.750000, 1.000000),
vec2(0.490000, 0.250000),
vec2(0.666667, 0.500000),
vec2(0.957846, 0.130000),
vec2(0.666667, 1.000000),
vec2(0.457846, 0.130000),
vec2(0.583333, 0.500000),
vec2(0.870000, 0.042154),
vec2(0.583333, 1.000000),
vec2(0.370000, 0.042154),
vec2(0.500000, 0.500000),
vec2(0.750000, 0.010000),
vec2(0.500000, 1.000000),
vec2(0.250000, 0.010000),
vec2(0.416667, 0.500000),
vec2(0.630000, 0.042154),
vec2(0.416667, 1.000000),
vec2(0.130000, 0.042154),
vec2(0.333333, 0.500000),
vec2(0.542154, 0.130000),
vec2(0.333333, 1.000000),
vec2(0.042154, 0.130000),
vec2(0.250000, 0.500000),
vec2(0.510000, 0.250000),
vec2(0.250000, 1.000000),
vec2(0.010000, 0.250000),
vec2(0.166667, 0.500000),
vec2(0.542154, 0.370000),
vec2(0.042154, 0.370000),
vec2(0.166667, 1.000000),
vec2(0.083333, 0.500000),
vec2(0.630000, 0.457846),
vec2(0.130000, 0.457846),
vec2(0.083333, 1.000000)
);
int vertex_indices[132] = int[](
1, 2, 0, 3, 4, 2, 5, 6, 4, 7, 8, 6, 9, 10, 8, 11, 12, 10, 13, 14, 12, 15, 16, 14, 17, 18, 16, 19, 20, 18,
21, 13, 5, 21, 22, 20, 23, 0, 22, 6, 14, 22, 1, 3, 2, 3, 5, 4, 5, 7, 6, 7, 9, 8, 9, 11, 10, 11, 13, 12, 13,
15, 14, 15, 17, 16, 17, 19, 18, 19, 21, 20, 5, 3, 1, 1, 23, 21, 21, 19, 17, 17, 15, 13, 13, 11, 9, 9, 7, 5,
5, 1, 21, 21, 17, 13, 13, 9, 5, 21, 23, 22, 23, 1, 0, 22, 0, 2, 2, 4, 6, 6, 8, 10, 10, 12, 14, 14, 16, 18,
18, 20, 22, 22, 2, 6, 6, 10, 14, 14, 18, 22
);
int normal_indices[132] = int[](
0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4, 5, 4, 5, 6, 5, 6, 7, 6, 7, 8, 7, 8, 9, 8, 9, 10, 9, 11, 11, 11, 10,
12, 10, 12, 0, 12, 13, 13, 13, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8,
9, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 10, 12, 12, 12, 0, 0, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
);
int texcoord_indices[132] = int[](
3, 6, 0, 8, 10, 6, 12, 14, 10, 16, 18, 14, 20, 22, 18, 24, 26, 22, 28, 30, 26, 32, 34, 30, 36, 38, 34, 40,
42, 38, 44, 29, 13, 45, 46, 42, 49, 1, 46, 15, 31, 47, 3, 8, 6, 8, 12, 10, 12, 16, 14, 16, 20, 18, 20, 24,
22, 24, 28, 26, 28, 32, 30, 32, 36, 34, 36, 40, 38, 40, 45, 42, 13, 9, 4, 4, 48, 44, 44, 41, 37, 37, 33,
29, 29, 25, 21, 21, 17, 13, 13, 4, 44, 44, 37, 29, 29, 21, 13, 45, 49, 46, 49, 5, 1, 47, 2, 7, 7, 11, 15,
15, 19, 23, 23, 27, 31, 31, 35, 39, 39, 43, 47, 47, 7, 15, 15, 23, 31, 31, 39, 47
);
out vec3 v_vertex;
out vec3 v_normal;
out vec2 v_texcoord;
void main() {
v_vertex = vertices[vertex_indices[gl_VertexID]];
v_normal = normals[normal_indices[gl_VertexID]];
v_texcoord = texcoords[texcoord_indices[gl_VertexID]];
gl_Position = mvp * vec4(v_vertex, 1.0);
}
''',
fragment_shader='''
#version 330
#include "defaults"
in vec3 v_normal;
layout (location = 0) out vec4 out_color;
void main() {
float lum = dot(normalize(light.xyz), normalize(v_normal)) * 0.7 + 0.3;
out_color = vec4(lum, lum, lum, 1.0);
}
''',
framebuffer=[image, depth],
topology='triangles',
cull_face='back',
vertex_count=132,
)
while window.update():
image.clear()
depth.clear()
grid.render()
pipeline.render()
image.blit()
| 38.67033 | 119 | 0.483376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,415 | 0.911481 |
1a9fc66dc44361b73876b4bba42e389f64a4a3a0 | 1,305 | py | Python | arq.py | CesarOncala/Exercicios-em-Python | 722e20204c8c1e2cb145a955d7ef8f80f73557d7 | [
"MIT"
] | null | null | null | arq.py | CesarOncala/Exercicios-em-Python | 722e20204c8c1e2cb145a955d7ef8f80f73557d7 | [
"MIT"
] | null | null | null | arq.py | CesarOncala/Exercicios-em-Python | 722e20204c8c1e2cb145a955d7ef8f80f73557d7 | [
"MIT"
] | null | null | null | #Funções:__________________________________________________________________
def cadastro():
resp=input(print("Deseja cadastrar alguém?"))
if resp.upper()!= "N":
while resp.upper()!= "N":
arq=open("teste.txt","a")
nome=input(print("Digite seu nome"))
arq.write("Olá"+";"+nome+";")
arq.write("\n")
arq.close()
resp=input(print("Deseja continuar?"))
def pesquisa():
pesquisa="s"
while pesquisa!="n" and pesquisa!="N" and pesquisa!= "Não" and pesquisa!="não":
ler=open("teste.txt","r")
linha="a"
resultado= list(range(3))
pesquisa=input(print("Digite um nome"))
while linha!="":
linha=ler.readline()
if linha!="":
resultado=linha.split(';')
if resultado[1]==pesquisa:
print(resultado[0],",",resultado[1],resultado[2])
ler.close()
pesquisa=input(print("Deseja continuar? S/N"))
#Main:____________________________________________________________________
print("Hey")
o="s"
while o.upper()!="N":
cadastro()
pesquisa()
o=input(print("Continuar com o sistema S/N?"))
#[[0 for c in range(3)] for l in range(10)]
| 33.461538 | 84 | 0.547126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.323417 |