hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b748129a257264ee78fbb33c2f52b2552698dcea
| 2,418
|
py
|
Python
|
CalibTracker/SiStripCommon/python/theBigNtuple_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
CalibTracker/SiStripCommon/python/theBigNtuple_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
CalibTracker/SiStripCommon/python/theBigNtuple_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from CalibTracker.SiStripCommon.ShallowEventDataProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowDigisProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowTrackClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowRechitClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowTracksProducer_cfi import *
from RecoVertex.BeamSpotProducer.BeamSpot_cff import *
from RecoTracker.TrackProducer.TrackRefitters_cff import *
bigNtupleTrackCollectionTag = cms.InputTag("bigNtupleTracksRefit")
bigNtupleClusterCollectionTag = cms.InputTag("siStripClusters")
bigNtupleTracksRefit = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone(src = "generalTracks")
bigNtupleEventRun = shallowEventRun.clone()
bigNtupleDigis = shallowDigis.clone()
bigNtupleClusters = shallowClusters.clone(Clusters=bigNtupleClusterCollectionTag)
bigNtupleRecHits = shallowRechitClusters.clone(Clusters=bigNtupleClusterCollectionTag)
bigNtupleTrackClusters = shallowTrackClusters.clone(Tracks = bigNtupleTrackCollectionTag,Clusters=bigNtupleClusterCollectionTag)
bigNtupleTracks = shallowTracks.clone(Tracks = bigNtupleTrackCollectionTag)
bigShallowTree = cms.EDAnalyzer("ShallowTree",
outputCommands = cms.untracked.vstring(
'drop *',
'keep *_bigNtupleEventRun_*_*',
'keep *_bigNtupleDigis_*_*',
'keep *_bigNtupleClusters_*_*' ,
'keep *_bigNtupleRechits_*_*',
'keep *_bigNtupleTracks_*_*',
'keep *_bigNtupleTrackClusters_*_*'
)
)
from Configuration.StandardSequences.RawToDigi_Data_cff import *
from Configuration.StandardSequences.Reconstruction_cff import *
theBigNtuple = cms.Sequence( ( siPixelRecHits+siStripMatchedRecHits +
offlineBeamSpot +
bigNtupleTracksRefit)
* (bigNtupleEventRun +
bigNtupleClusters +
bigNtupleRecHits +
bigNtupleTracks +
bigNtupleTrackClusters
)
)
theBigNtupleDigi = cms.Sequence( siStripDigis + bigNtupleDigis )
| 43.178571
| 130
| 0.700165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 254
| 0.105045
|
b748865dafd57226e01bad7504ce06ab355e363a
| 75
|
py
|
Python
|
anti_freeze/__main__.py
|
Donluigimx/anti-freeze
|
03699e5c4f82ccd06f37b4e8b51da22cc5841b57
|
[
"MIT"
] | null | null | null |
anti_freeze/__main__.py
|
Donluigimx/anti-freeze
|
03699e5c4f82ccd06f37b4e8b51da22cc5841b57
|
[
"MIT"
] | null | null | null |
anti_freeze/__main__.py
|
Donluigimx/anti-freeze
|
03699e5c4f82ccd06f37b4e8b51da22cc5841b57
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
from .system import MyApp
MyApp().run()
| 18.75
| 29
| 0.626667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.133333
|
b74908cfbdafb8fdf6ed4e638d485501633fe75d
| 18,656
|
py
|
Python
|
classic_NN/nn.py
|
disooqi/learning-machine-learning
|
5fcef0a18f0c2e9aeab4abf45b968eb6ca5ba463
|
[
"MIT"
] | 1
|
2020-09-30T18:09:51.000Z
|
2020-09-30T18:09:51.000Z
|
classic_NN/nn.py
|
disooqi/learning-machine-learning
|
5fcef0a18f0c2e9aeab4abf45b968eb6ca5ba463
|
[
"MIT"
] | null | null | null |
classic_NN/nn.py
|
disooqi/learning-machine-learning
|
5fcef0a18f0c2e9aeab4abf45b968eb6ca5ba463
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.special import expit, logit
import time
import logging
np.random.seed(4) # 4
logger = logging.getLogger(__name__)
fr = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
sh = logging.StreamHandler()
# sh.setFormatter(fr)
logger.addHandler(sh)
logger.setLevel(logging.DEBUG)
logger2 = logging.getLogger('other')
file_handler = logging.FileHandler('run.log')
file_handler.setFormatter(fr)
file_handler.setLevel(logging.INFO)
logger2.addHandler(file_handler)
logger2.setLevel(logging.INFO)
class HiddenLayer:
def __init__(self):
pass
class ConvLayer(HiddenLayer):
def __init__(self):
super().__init__()
class FullyConnectedLayer(HiddenLayer):
def __init__(self, n_units, n_in, activation='sigmoid', output_layer=False, keep_prob=1):
super().__init__()
self.n_units = n_units
# It means at every iteration you shut down each neurons of the layer with "1-keep_prob" probability.
self.keep_prob = keep_prob
# todo (3): weight initialization should be in the Network class
if activation == 'sigmoid':
self.activation = self.sigmoid
self.dAdZ = self.sigmoid_prime
self._weights_initialization(n_in)
elif activation == 'relu':
self.activation = self.relu
self.dAdZ = self.relu_prime
self._He_initialization(n_in)
elif activation == 'tanh':
self.activation = self.tanh
self.dAdZ = self.tanh_prime
self._Xavier_initialization(n_in)
elif activation == 'leaky_relu':
self.activation = self.leaky_relu
self.dAdZ = self.leaky_relu_prime
self._He_initialization(n_in)
self.activation_type = activation
self.output_layer = output_layer
def _weights_initialization(self, n_in):
# multiplying W by a small number makes the learning fast
# however from a practical point of view when multiplied by 0.01 using l>2 the NN does not converge
# that is beacuse it runs into gradients vanishing problem
self.W = np.random.randn(self.n_units, n_in) * 0.01
self.b = np.zeros((self.n_units, 1))
def _He_initialization(self, n_in):
self.W = np.random.randn(self.n_units, n_in) * np.sqrt(2 / n_in)
self.b = np.zeros((self.n_units, 1))
def _Xavier_initialization(self, n_in):
"""Initialize weight W using Xavier Initialization
So if the input features of activations are roughly mean 0 and standard variance and variance 1 then this would
cause z to also take on a similar scale and this doesn't solve, but it definitely helps reduce the vanishing,
exploding gradients problem because it's trying to set each of the weight matrices W so that it's not
too much bigger than 1 and not too much less than 1 so it doesn't explode or vanish too quickly.
"""
self.W = np.random.randn(self.n_units, n_in) * np.sqrt(1 / n_in)
self.b = np.zeros((self.n_units, 1))
def _Benjio_initialization(self, n_in):
self.W = np.random.randn(self.n_units, n_in) * np.sqrt(2 / (n_in + self.n_units))
self.b = np.zeros((self.n_units, 1))
@staticmethod
def softmax(Z):
"""Compute softmax of Matrix Z
:param Z: is in the shape of (n * m), where n is the number of classes and m is the number of examples
:return:
"""
Z_exp = np.exp(Z)
return Z_exp/np.sum(Z_exp, axis=0)
@staticmethod
def stable_softmax(Z):
"""Compute the softmax of vector Z in a numerically stable way."""
shift_Z = Z - np.max(Z, axis=0)
Z_exp = np.exp(shift_Z)
return Z_exp / np.sum(Z_exp, axis=0)
@staticmethod
def softmax_prime(A):
"""N/A
https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/
# Kronecker delta function
:param A:
:return:
"""
pass
@staticmethod
def sigmoid(Z):
# https://docs.scipy.org/doc/scipy/reference/generated /scipy.special.expit.html
# return 1 / (1 + np.exp(-Z))
return expit(np.clip(Z, -709, 36.73))
@classmethod
def sigmoid_prime(cls, A):
""" calculate dAdZ
:param A:
:return: dAdZ
"""
return A * (1 - A)
@staticmethod
def tanh(Z):
return (np.exp(Z) - np.exp(-Z)) / (np.exp(Z) + np.exp(-Z))
@classmethod
def tanh_prime(cls, A):
return 1 - A ** 2
@staticmethod
def relu(Z):
# a[a<0] = 0
# return np.clip(Z, 0, Z)
return np.maximum(Z, 0)
@staticmethod
def relu_prime(A):
A[A > 0] = 1
return A
@staticmethod
def leaky_relu(Z, alpha=0.01):
'''
:param Z:
:param alpha: Slope of the activation function at x < 0.
:return:
'''
# return np.clip(Z, alpha * Z, Z)
return np.where(Z < 0, alpha * Z, Z)
@staticmethod
def leaky_relu_prime(A, alpha=0.01):
return np.where(A > 0, 1, alpha)
def __repr__(self):
return 'FullyConnectedLayer(n_units={}, activation={}, output_layer={}, keep_prob={})'.format(
self.n_units, self.activation_type, self.output_layer, self.keep_prob)
class NN:
def __init__(self, n_features, n_classes):
self.n = n_features
self.n_classes = n_classes
self.layers = list()
def add_layer(self, n_units, activation='sigmoid', dropout_keep_prob=1):
if self.layers:
n_units_previous_layer = self.layers[-1].n_units
else:
n_units_previous_layer = self.n
layer = FullyConnectedLayer(n_units, n_units_previous_layer, activation=activation, keep_prob=dropout_keep_prob)
self.layers.append(layer)
def add_output_layer(self):
if not self.layers:
self.add_layer(self.n_classes, activation='sigmoid')
self.layers[-1].output_layer = True
if not self.layers[-1].output_layer:
self.add_layer(self.n_classes, activation='sigmoid')
self.layers[-1].output_layer = True
else:
# TODO: you should raise an error and message that says you need to delete existing output_layer
pass
@staticmethod
def _calculate_single_layer_gradients(dLdA, layer_cache, compute_dLdA_1=True):
'''
:param dJdA:
:return: dJdA_1, dJdW, dJdb
'''
# For the first iteration where loss is cross entropy and activation func of output layer
# is sigmoid, that could be shorten to,
# dZ[L] = A[L]-Y
# In general, you can compute dZ as follows
# dZ = dA * g'(Z) TODO: currently we pass A instead of Z, I guess it is much better to follow "A. Ng" and pass Z
# During forward propagation, you had divided A1 by keep_prob. In backpropagation, you'll therefore have to
# divide dA1 by keep_prob again (the calculus interpretation is that if A[1]A[1] is scaled by keep_prob, then
# its derivative dA[1]dA[1] is also scaled by the same keep_prob).
dLdA = np.multiply(dLdA, layer_cache.D) / layer_cache.keep_prob
dAdZ = layer_cache.dAdZ(layer_cache.A)
dLdZ = dLdA * dAdZ # Element-wise product
# dw = dz . a[l-1]
dZdW = layer_cache.A_l_1
dJdW = np.dot(dLdZ, dZdW.T) / dLdA.shape[1] # this is two steps in one line; getting dLdw and then dJdW
dJdb = np.sum(dLdZ, axis=1, keepdims=True) / dLdA.shape[1]
dLdA_1 = None
if compute_dLdA_1:
# da[l-1] = w[l].T . dz[l]
dZdA_1 = layer_cache.W
dLdA_1 = np.dot(dZdA_1.T, dLdZ) # computing dLd(A-1)
return dLdA_1, dJdW, dJdb
def accuracy(self, X, y):
# You only use dropout during training. Don't use dropout (randomly eliminate nodes) during test time.
A = X
for layer in self.layers:
Z = np.dot(layer.W, A) + layer.b
A = layer.activation(Z)
else:
y = y.argmax(axis=0) + 1
prediction = A.argmax(axis=0) + 1
res = np.equal(prediction, y)
return 100 * np.sum(res) / y.size
class Optimization:
def __init__(self, loss='cross_entropy', method='gradient-descent'):
self.method = method
self.VsnSs = list()
if loss == 'cross_entropy':
self.loss = self.cross_entropy_loss
self.activation_prime = self.cross_entropy_loss_prime
if method == 'gradient-descent':
self.optimizer = self.gradient_descent
elif method == 'gd-with-momentum':
self.optimizer = self.gradient_descent_with_momentum
elif method == 'rmsprop':
self.optimizer = self.RMSprop
elif method == 'adam':
self.optimizer = self.adam
@staticmethod
def weight_decay(m, alpha, lmbda):
# L2 Regularization
return 1 - ((alpha * lmbda) / m)
@staticmethod
def learning_rate_decay(decay_rate, epoch_num):
return 1/(1+decay_rate*epoch_num)
@staticmethod
def o_learning_rate_decay(k, epoch_num):
return k/np.sqrt(epoch_num)
@staticmethod
def exponential_learning_rate_decay(decay_rate, epoch_num):
return 0.95**epoch_num
def discrete_staircase_learning_rate_decay(self):
pass
@classmethod
def learning_rate(cls, alpha, decay_rate, epoch_num):
return cls.learning_rate_decay(decay_rate, epoch_num) * alpha
@classmethod
def gradient_descent(cls, dJdW, dJdb, W, b, m, **kwargs):
alpha0 = kwargs['alpha']
lmbda = kwargs['lmbda']
epoch_num = kwargs['epoch']
decay_rate = kwargs['decay_rate']
alpha = cls.learning_rate(alpha0, decay_rate, epoch_num)
W = cls.weight_decay(m, alpha, lmbda) * W - alpha * dJdW
b -= alpha * dJdb
return W, b
@classmethod
def gradient_descent_with_momentum(cls, dJdW, dJdb, W, b, m, **kwargs):
beta1 = kwargs['beta1']
Vs = kwargs['VS']
alpha0 = kwargs['alpha']
lmbda = kwargs['lmbda']
epoch_num = kwargs['epoch']
decay_rate = kwargs['decay_rate']
alpha = cls.learning_rate(alpha0, decay_rate, epoch_num)
Vs['Vdw'] = beta1*Vs['Vdw'] + (1-beta1)*dJdW
Vs['Vdb'] = beta1*Vs['Vdb'] + (1-beta1)*dJdb
W = cls.weight_decay(m, alpha, lmbda) * W - alpha * Vs['Vdw']
b = b - alpha * Vs['Vdb']
return W, b
@classmethod
def RMSprop(cls, dJdW, dJdb, W, b, m, **kwargs):
beta2 = kwargs['beta2']
Ss = kwargs['VS']
alpha0 = kwargs['alpha']
lmbda = kwargs['lmbda']
epoch_num = kwargs['epoch']
decay_rate = kwargs['decay_rate']
alpha = cls.learning_rate(alpha0, decay_rate, epoch_num)
epsilon = np.finfo(np.float32).eps
Ss['Sdw'] = beta2 * Ss['Sdw'] + (1 - beta2) * np.square(dJdW)
Ss['Sdb'] = beta2 * Ss['Sdb'] + (1 - beta2) * np.square(dJdb)
W = cls.weight_decay(m, alpha, lmbda)*W - alpha * (dJdW/(np.sqrt(Ss['Sdw'])+epsilon))
b = b - alpha * (dJdb/(np.sqrt(Ss['Sdb'])+epsilon))
return W, b
@classmethod
def adam(cls, dJdW, dJdb, W, b, m, **kwargs):
beta1 = kwargs['beta1']
beta2 = kwargs['beta2']
VsSs = kwargs['VS']
t = kwargs['t']
alpha0 = kwargs['alpha']
lmbda = kwargs['lmbda']
epoch_num = kwargs['epoch']
decay_rate = kwargs['decay_rate']
alpha = cls.learning_rate(alpha0, decay_rate, epoch_num)
epsilon = np.finfo(np.float32).eps
VsSs['Vdw'] = beta1 * VsSs['Vdw'] + (1 - beta1) * dJdW
VsSs['Vdb'] = beta1 * VsSs['Vdb'] + (1 - beta1) * dJdb
VsSs['Sdw'] = beta2 * VsSs['Sdw'] + (1 - beta2) * np.square(dJdW)
VsSs['Sdb'] = beta2 * VsSs['Sdb'] + (1 - beta2) * np.square(dJdb)
Vdw_corrected = VsSs['Vdw']/(1-beta1**t)
Vdb_corrected = VsSs['Vdb']/(1-beta1**t)
Sdw_corrected = VsSs['Sdw']/(1-beta2**t)
Sdb_corrected = VsSs['Sdb']/(1-beta2**t)
W = cls.weight_decay(m, alpha, lmbda) * W - alpha * (Vdw_corrected / (np.sqrt(Sdw_corrected) + epsilon))
b = b - alpha * (Vdb_corrected / (np.sqrt(Sdb_corrected) + epsilon))
return W, b
@staticmethod
def cross_entropy_loss(y, a):
# http://christopher5106.github.io/deep/learning/2016/09/16/about-loss-functions-multinomial-logistic-logarithm-cross-entropy-square-errors-euclidian-absolute-frobenius-hinge.html
# https://stats.stackexchange.com/questions/260505/machine-learning-should-i-use-a-categorical-cross-entropy-or-binary-cross-entro
# here we penalize every class even the zero ones
# the classes here are independent i.e you can reduce the error of one without affecting the other
return -(y * np.log(a) + (1 - y) * np.log(1 - a))
@staticmethod
def softmax_loss(y, a):
# here we penalize only the targeted class and this is intuitive because they are all dependent i.e. if targeted
# error is reduced the rest will give less probability because of the softmax relation
return - np.sum(y * np.log(a), axis=0, keepdims=True)
@staticmethod
def cross_entropy_loss_prime(y, a):
return -y / a + (1 - y) / (1 - a)
@staticmethod
def softmax_loss_prime(y, a):
return -np.sum(y/a)
@staticmethod
def regularization_term(network, m, lmbda):
agg = 0
for layer in network.layers:
agg = np.sum(np.square(layer.W))
else:
return (lmbda / (2 * m)) * agg
def cost(self, network, X, y, lmbda=0):
A = X
for layer in network.layers:
Z = np.dot(layer.W, A) + layer.b
A = layer.activation(Z)
else:
loss_matrix = self.loss(y, A)
sum_over_all_examples = np.sum(loss_matrix, axis=1) / loss_matrix.shape[1]
return (np.sum(sum_over_all_examples) / sum_over_all_examples.size) + self.regularization_term(network,
X.shape[1],
lmbda=lmbda)
def _update_weights(self, X, y, network, alpha, lmbda, t, beta1, beta2, decay_rate, epoch_num):
A = X
for layer in network.layers:
layer.A_l_1 = A # this is A-1 from last loop step
Z = np.dot(layer.W, A) + layer.b # (called "logits" in ML folklore)
A = layer.activation(Z)
# NB! we don't not apply dropout to the input layer or output layer.
D = np.random.rand(*A.shape) <= layer.keep_prob # dropout
A = np.multiply(A, D) / layer.keep_prob # inverted dropout
layer.D = D
layer.A = A
with np.errstate(invalid='raise'):
try:
dLdA = self.activation_prime(y, A)
except FloatingPointError:
raise
# To avoid the confusion: reversed() doesn't modify the list. reversed() doesn't make a copy of the list
# (otherwise it would require O(N) additional memory). If you need to modify the list use alist.reverse(); if
# you need a copy of the list in reversed order use alist[::-1]
for l, layer, VsnSs in zip(range(len(network.layers), 0, -1), reversed(network.layers), reversed(self.VsnSs)):
dLdA, dJdW, dJdb = network._calculate_single_layer_gradients(dLdA, layer, compute_dLdA_1=(l > 1))
layer.W, layer.b = self.optimizer(dJdW, dJdb, layer.W, layer.b, X.shape[1], alpha=alpha, lmbda=lmbda,
VS=VsnSs, beta1=beta1, beta2=beta2, t=t, decay_rate=decay_rate, epoch=epoch_num)
def minimize(self, network, epochs=1, mini_batch_size=0, learning_rate=0.1, regularization_parameter=0,
momentum=0.9, beta2=0.999, learning_rate_decay=0, dataset=None):
bef = time.time()
for layer in network.layers:
self.VsnSs.append({"Vdw": np.zeros_like(layer.W), "Vdb": np.zeros_like(layer.b),
"Sdw": np.zeros_like(layer.W), "Sdb": np.zeros_like(layer.b)})
for epoch in range(1, epochs+1):
for t, mini_batch in enumerate(dataset.next_mini_batch(size=mini_batch_size), start=1):
self._update_weights(mini_batch.X, mini_batch.y, network, learning_rate,
regularization_parameter, t, beta1=momentum, beta2=beta2, decay_rate=learning_rate_decay, epoch_num=epoch)
else:
if epoch % 10 == 0:
cost = self.cost(network, dataset.X_train, dataset.y_train, lmbda=regularization_parameter)
logger.info('epoch {} (error: {:.5f})'.format(epoch, cost))
else:
aft = time.time()
cost = self.cost(network, dataset.X_train, dataset.y_train, lmbda=regularization_parameter)
logger.debug('-' * 80)
logger.debug('| Summary')
logger.debug('-' * 80)
logger.debug('training time: {:.2f} SECs'.format(aft - bef))
logger.debug('-' * 80)
logger.debug('Finish error: {:.5f}'.format(
self.cost(network, dataset.X_train, dataset.y_train, lmbda=regularization_parameter)))
ss = ''
for i, layer in enumerate(network.layers):
ss += '\n layer# ' + str(i + 1) + ' - ' + repr(layer)
logger2.info('train error: {:.2f}, '
'time: {:.2f}SECs, '
'#layers {}, '
'#epochs: {}, '
'learning rate: {},\n'
'regularization parameter: {}, '
'mini-batch size: {}, '
'optimizer: [{}], '
'dataset: [{}, dev_size:{}, shuffle:{}], {}'.format(cost, aft - bef, len(network.layers),
epochs, learning_rate,
regularization_parameter, mini_batch_size,
self.method, dataset.name, dataset.dev_size,
dataset.shuffle, ss))
if __name__ == '__main__':
pass
| 39.609342
| 187
| 0.582547
| 18,074
| 0.968804
| 0
| 0
| 7,938
| 0.425493
| 0
| 0
| 4,768
| 0.255575
|
b749f4714d0c5e5ad919fdd5ae7b07a02ccd8628
| 71
|
py
|
Python
|
sensorAtlas/__init__.py
|
iosefa/pyMatau
|
7b3f768db578771ba55a912bc4a9b8be58619070
|
[
"MIT"
] | 2
|
2021-05-28T10:26:17.000Z
|
2021-07-03T03:11:22.000Z
|
sensorAtlas/__init__.py
|
iosefa/pyMatau
|
7b3f768db578771ba55a912bc4a9b8be58619070
|
[
"MIT"
] | 2
|
2020-11-19T00:51:19.000Z
|
2020-11-19T01:18:03.000Z
|
sensorAtlas/__init__.py
|
sensoratlas/sensoratlas
|
7b3f768db578771ba55a912bc4a9b8be58619070
|
[
"MIT"
] | 1
|
2019-10-10T14:03:42.000Z
|
2019-10-10T14:03:42.000Z
|
# app config
default_app_config = 'sensorAtlas.apps.sensorAtlasConfig'
| 23.666667
| 57
| 0.830986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.676056
|
b74a328698a70e0b159b7d2e8ddf8ec1e64183ed
| 376
|
py
|
Python
|
api/urls.py
|
yasminfarza/country-state-address-api
|
39c8d349095dcca4f2411f7097497d6a8f39c1e1
|
[
"MIT"
] | 4
|
2021-06-06T14:16:33.000Z
|
2021-06-09T03:42:11.000Z
|
api/urls.py
|
yasminfarza/country-state-address-api
|
39c8d349095dcca4f2411f7097497d6a8f39c1e1
|
[
"MIT"
] | null | null | null |
api/urls.py
|
yasminfarza/country-state-address-api
|
39c8d349095dcca4f2411f7097497d6a8f39c1e1
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from api import views
router = DefaultRouter()
router.register('countries', views.CountryViewSet)
router.register('states/(?P<country>[^/.]+)', views.StateViewSet)
router.register('addresses', views.AddressViewSet)
app_name = 'api'
urlpatterns = [
path('', include(router.urls))
]
| 23.5
| 65
| 0.755319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.151596
|
b74a946738ed6712ecf1be81551ad79c1bd928a1
| 1,401
|
py
|
Python
|
tests/test_protocol.py
|
gimbas/openinput
|
9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693
|
[
"MIT"
] | 38
|
2020-05-11T10:54:15.000Z
|
2022-03-30T13:19:09.000Z
|
tests/test_protocol.py
|
gimbas/openinput
|
9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693
|
[
"MIT"
] | 45
|
2020-04-21T23:52:22.000Z
|
2022-02-19T20:29:27.000Z
|
tests/test_protocol.py
|
gimbas/openinput
|
9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693
|
[
"MIT"
] | 5
|
2020-08-29T02:10:42.000Z
|
2021-08-31T03:12:15.000Z
|
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Filipe Laíns <lains@riseup.net>
def test_dispatch(basic_device):
basic_device.protocol_dispatch([0x03, 0x02, 0x01]) # unrelated
basic_device.protocol_dispatch([0x20]) # invalid length short
basic_device.protocol_dispatch([0x21]) # invalid length long
basic_device.hid_send.assert_not_called()
def test_fw_info_vendor(basic_device):
basic_device.protocol_dispatch([0x20, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00])
basic_device.hid_send.assert_called_with(
[0x21, 0x00, 0x01] + list(b'openinput-git') + [0x00] * 16
)
def test_fw_info_version(basic_device, fw_version):
basic_device.protocol_dispatch([0x20, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00])
basic_device.hid_send.assert_called_with(
[0x21, 0x00, 0x01] + list(fw_version.encode('ascii')) + [0x00] * (29 - len(fw_version))
)
def test_fw_info_device_name(basic_device):
basic_device.protocol_dispatch([0x20, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00])
basic_device.hid_send.assert_called_with(
[0x21, 0x00, 0x01] + list(b'basic test device') + [0x00] * 12
)
def test_fw_info_unsupported(basic_device):
basic_device.protocol_dispatch([0x20, 0x00, 0x01, 0xFF, 0x00, 0x00, 0x00, 0x00])
basic_device.hid_send.assert_called_with(
[0x21, 0xFF, 0x01, 0x00, 0x01] + [0x00] * 27
)
| 32.581395
| 95
| 0.712348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.135521
|
b74acbae89490d10494c82735b42d81274199ebb
| 4,314
|
py
|
Python
|
zaqar-8.0.0/zaqar/storage/sqlalchemy/driver.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 97
|
2015-01-02T09:35:23.000Z
|
2022-03-25T00:38:45.000Z
|
zaqar-8.0.0/zaqar/storage/sqlalchemy/driver.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
zaqar-8.0.0/zaqar/storage/sqlalchemy/driver.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 44
|
2015-01-28T03:01:28.000Z
|
2021-05-13T18:55:19.000Z
|
# Copyright (c) 2013 Red Hat, Inc.
# Copyright 2014 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from oslo_db.sqlalchemy import engines
from osprofiler import profiler
from osprofiler import sqlalchemy as sa_tracer
import sqlalchemy as sa
from zaqar.common import decorators
from zaqar.conf import drivers_management_store_sqlalchemy
from zaqar import storage
from zaqar.storage.sqlalchemy import controllers
class ControlDriver(storage.ControlDriverBase):
def __init__(self, conf, cache):
super(ControlDriver, self).__init__(conf, cache)
self.conf.register_opts(
drivers_management_store_sqlalchemy.ALL_OPTS,
group=drivers_management_store_sqlalchemy.GROUP_NAME)
self.sqlalchemy_conf = self.conf[
drivers_management_store_sqlalchemy.GROUP_NAME]
def _mysql_on_connect(self, conn, record):
# NOTE(flaper87): This is necessary in order
# to ensure that all date operations in mysql
# happen in UTC, `now()` for example.
conn.query('SET time_zone = "+0:00"')
@decorators.lazy_property(write=False)
def engine(self):
uri = self.sqlalchemy_conf.uri
engine = engines.create_engine(uri, sqlite_fk=True)
if (uri.startswith('mysql://') or
uri.startswith('mysql+pymysql://')):
# oslo_db.create_engine makes a test connection, throw that out
# first. mysql time_zone can be added to oslo_db as a
# startup option
engine.dispose()
sa.event.listen(engine, 'connect',
self._mysql_on_connect)
if (self.conf.profiler.enabled and
self.conf.profiler.trace_message_store):
sa_tracer.add_tracing(sa, engine, "db")
return engine
# TODO(cpp-cabrera): expose connect/close as a context manager
# that acquires the connection to the DB for the desired scope and
# closes it once the operations are completed
# TODO(wangxiyuan): we should migrate to oslo.db asap.
def run(self, *args, **kwargs):
return self.engine.execute(*args, **kwargs)
def close(self):
pass
@property
def pools_controller(self):
controller = controllers.PoolsController(self)
if (self.conf.profiler.enabled and
self.conf.profiler.trace_management_store):
return profiler.trace_cls("sqlalchemy_pools_"
"controller")(controller)
else:
return controller
@property
def queue_controller(self):
controller = controllers.QueueController(self)
if (self.conf.profiler.enabled and
(self.conf.profiler.trace_message_store or
self.conf.profiler.trace_management_store)):
return profiler.trace_cls("sqlalchemy_queue_"
"controller")(controller)
else:
return controller
@property
def catalogue_controller(self):
controller = controllers.CatalogueController(self)
if (self.conf.profiler.enabled and
self.conf.profiler.trace_management_store):
return profiler.trace_cls("sqlalchemy_catalogue_"
"controller")(controller)
else:
return controller
@property
def flavors_controller(self):
controller = controllers.FlavorsController(self)
if (self.conf.profiler.enabled and
self.conf.profiler.trace_management_store):
return profiler.trace_cls("sqlalchemy_flavors_"
"controller")(controller)
else:
return controller
@property
def topic_controller(self):
pass
| 36.871795
| 79
| 0.653454
| 3,383
| 0.784191
| 0
| 0
| 2,328
| 0.539638
| 0
| 0
| 1,283
| 0.297404
|
b74c264ab951da49d482e8b5b2b953e6b1285a3b
| 792
|
py
|
Python
|
tests/explainers/test_explainer.py
|
zduey/shap
|
1bb8203f2d43f7552396a5f26167a258cbdc505c
|
[
"MIT"
] | 1
|
2021-03-03T11:00:32.000Z
|
2021-03-03T11:00:32.000Z
|
tests/explainers/test_explainer.py
|
zduey/shap
|
1bb8203f2d43f7552396a5f26167a258cbdc505c
|
[
"MIT"
] | null | null | null |
tests/explainers/test_explainer.py
|
zduey/shap
|
1bb8203f2d43f7552396a5f26167a258cbdc505c
|
[
"MIT"
] | null | null | null |
""" Tests for Explainer class.
"""
import pytest
import shap
def test_wrapping_for_text_to_text_teacher_forcing_logits_model():
""" This tests using the Explainer class to auto choose a text to text setup.
"""
transformers = pytest.importorskip("transformers")
def f(x): # pylint: disable=unused-argument
pass
tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2")
model = transformers.AutoModelForCausalLM.from_pretrained("gpt2")
wrapped_model = shap.models.TeacherForcingLogits(f, similarity_model=model, similarity_tokenizer=tokenizer)
masker = shap.maskers.Text(tokenizer, mask_token="...")
explainer = shap.Explainer(wrapped_model, masker)
assert shap.utils.safe_isinstance(explainer.masker, "shap.maskers.FixedComposite")
| 31.68
| 111
| 0.753788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 212
| 0.267677
|
b74d8e9763f51be71d9332444a4477006848a8de
| 1,301
|
py
|
Python
|
main/urls.py
|
guinslym/django-Django-Code-Review-CodeEntrepreneurs
|
2ad9bd3d352f7eba46e16a7bf24e06b809049d62
|
[
"BSD-3-Clause"
] | 2
|
2017-07-31T13:52:40.000Z
|
2017-09-19T15:07:09.000Z
|
main/urls.py
|
guinslym/Django-Code-Review-CodeEntrepreneurs
|
2ad9bd3d352f7eba46e16a7bf24e06b809049d62
|
[
"BSD-3-Clause"
] | null | null | null |
main/urls.py
|
guinslym/Django-Code-Review-CodeEntrepreneurs
|
2ad9bd3d352f7eba46e16a7bf24e06b809049d62
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib import admin
from django.conf.urls.static import static
from django.conf.urls.i18n import i18n_patterns
from django.views.decorators.cache import cache_page
from django.conf.urls import url, include, handler404, handler500
admin.autodiscover()
from applications.elearning.views.general import robot_files
urlpatterns = [
#Robot and Humans.txt
url(
r'^(?P<filename>(robots.txt)|(humans.txt))$',
robot_files,
name='home-files'
),
#Main application
url(
r'^elearning/',
include(
'applications.elearning.urls',
namespace="elearning"
)
),
url(r'^', include('applications.elearning.urls')),
#admin
url(r'^admin/',
include('admin_honeypot.urls',
namespace='admin_honeypot')
),
url(r'^ilovemyself/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
#handler404 = 'applications.elearning.views.views_general.handler404'
#handler500 = 'applications.elearning.views.views_general.handler500'
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| 27.104167
| 69
| 0.687164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 452
| 0.347425
|
b74eef5240ddb793f5798e460265805a101c2233
| 486
|
py
|
Python
|
examples/simpleform/app/forms.py
|
ezeev/Flask-AppBuilder
|
d95f0ed934272629ee44ad3241646fa7ba09cdf8
|
[
"BSD-3-Clause"
] | 71
|
2016-11-02T06:45:42.000Z
|
2021-11-15T12:33:48.000Z
|
examples/simpleform/app/forms.py
|
ezeev/Flask-AppBuilder
|
d95f0ed934272629ee44ad3241646fa7ba09cdf8
|
[
"BSD-3-Clause"
] | 3
|
2021-06-08T23:39:54.000Z
|
2022-03-12T00:50:13.000Z
|
examples/simpleform/app/forms.py
|
ezeev/Flask-AppBuilder
|
d95f0ed934272629ee44ad3241646fa7ba09cdf8
|
[
"BSD-3-Clause"
] | 23
|
2016-11-02T06:45:44.000Z
|
2022-02-08T14:55:13.000Z
|
from wtforms import Form, StringField
from wtforms.validators import DataRequired
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_appbuilder.forms import DynamicForm
class MyForm(DynamicForm):
field1 = StringField(('Field1'),
description=('Your field number one!'),
validators = [DataRequired()], widget=BS3TextFieldWidget())
field2 = StringField(('Field2'),
description=('Your field number two!'), widget=BS3TextFieldWidget())
| 37.384615
| 76
| 0.751029
| 293
| 0.602881
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.131687
|
b74ef20d1f5294557f6193fe99adc3a01e0224ec
| 403
|
py
|
Python
|
comms.py
|
kajusz/ufscreenadsclient
|
0151edec0117161c522a87643eef2f7be214210c
|
[
"MIT"
] | null | null | null |
comms.py
|
kajusz/ufscreenadsclient
|
0151edec0117161c522a87643eef2f7be214210c
|
[
"MIT"
] | null | null | null |
comms.py
|
kajusz/ufscreenadsclient
|
0151edec0117161c522a87643eef2f7be214210c
|
[
"MIT"
] | null | null | null |
import zmq
context = zmq.Context()
socket = context.socket(zmq.PAIR)
address = "tcp://127.0.0.1:5000"
def client(address):
socket.connect(address)
def server(address):
socket.bind(address)
def send(data):
socket.send_string(data)
def recv():
try:
return socket.recv(flags=zmq.NOBLOCK)
except zmq.Again as e:
return None
# print("No message received yet")
| 18.318182
| 45
| 0.66005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.156328
|
b74f2a4a74090ecd5db981f0f8052fb5379e118a
| 410
|
py
|
Python
|
runtime/python/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | null | null | null |
runtime/python/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | null | null | null |
runtime/python/Lib/site-packages/numpy/typing/tests/data/fail/datasource.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import numpy as np
path: Path
d1: np.DataSource
d1.abspath(path) # E: incompatible type
d1.abspath(b"...") # E: incompatible type
d1.exists(path) # E: incompatible type
d1.exists(b"...") # E: incompatible type
d1.open(path, "r") # E: incompatible type
d1.open(b"...", encoding="utf8") # E: incompatible type
d1.open(None, newline="/n") # E: incompatible type
| 25.625
| 57
| 0.656098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.468293
|
b7509767f47f312767bff162702df8fc8da90b4c
| 2,821
|
py
|
Python
|
applications/admin/controllers/gae.py
|
otaviocarvalho/forca-inf
|
93b61f1d6988d4fb00a1736633d85b4f99a2f259
|
[
"BSD-3-Clause"
] | 1
|
2017-03-28T21:31:51.000Z
|
2017-03-28T21:31:51.000Z
|
applications/admin/controllers/gae.py
|
murray3/augmi-a
|
9f8cff457fa3966d67d3752ccd86876b08bb19b1
|
[
"BSD-3-Clause"
] | null | null | null |
applications/admin/controllers/gae.py
|
murray3/augmi-a
|
9f8cff457fa3966d67d3752ccd86876b08bb19b1
|
[
"BSD-3-Clause"
] | 1
|
2022-03-10T19:53:44.000Z
|
2022-03-10T19:53:44.000Z
|
### this works on linux only
try:
import fcntl
import subprocess
import signal
import os
except:
session.flash='sorry, only on Unix systems'
redirect(URL(request.application,'default','site'))
forever=10**8
def kill():
p = cache.ram('gae_upload',lambda:None,forever)
if not p or p.poll()!=None:
return 'oops'
os.kill(p.pid, signal.SIGKILL)
cache.ram('gae_upload',lambda:None,-1)
def deploy():
if not os.path.exists(GAE_APPCFG):
redirect(URL(request.application,'default','site'))
regex = re.compile('^\w+$')
apps = sorted([(file.upper(), file) for file in \
os.listdir(apath(r=request)) if regex.match(file)])
options = [OPTION(item[1]) for item in apps]
form = FORM(TABLE(TR('Applications to deploy',
SELECT(_name='applications',_multiple='multiple',
_id='applications',*options)),
TR('GAE Email:',
INPUT(_name='email',requires=IS_EMAIL())),
TR('GAE Password:',
INPUT(_name='password',_type='password',
requires=IS_NOT_EMPTY())),
TR('',INPUT(_type='submit',value='deploy'))))
cmd = output = errors= ""
if form.accepts(request.vars,session):
try:
kill()
except:
pass
ignore_apps = [item[1] for item in apps \
if not item[1] in request.vars.applications]
regex = re.compile('\(applications/\(.*')
yaml = apath('../app.yaml', r=request)
data=open(yaml,'r').read()
data = regex.sub('(applications/(%s)/.*)|' % '|'.join(ignore_apps),data)
open(yaml,'w').write(data)
path = request.env.web2py_path
cmd = '%s --email=%s --passin update %s' % \
(GAE_APPCFG, form.vars.email, path)
p = cache.ram('gae_upload',
lambda s=subprocess,c=cmd:s.Popen(c, shell=True,
stdin=s.PIPE,
stdout=s.PIPE,
stderr=s.PIPE, close_fds=True),-1)
p.stdin.write(form.vars.password)
fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(p.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
return dict(form=form,command=cmd)
def callback():
p = cache.ram('gae_upload',lambda:None,forever)
if not p or p.poll()!=None:
return '<done/>'
try:
output = p.stdout.read()
except:
output=''
try:
errors = p.stderr.read()
except:
errors=''
return (output+errors).replace('\n','<br/>')
| 36.636364
| 90
| 0.515066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 410
| 0.145339
|
b751a3b9de29d209e3c48a06bc158c7966ca65b5
| 1,110
|
py
|
Python
|
basicts/archs/AGCRN_arch/AGCRNCell.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | 3
|
2022-02-22T12:50:08.000Z
|
2022-03-13T03:38:46.000Z
|
basicts/archs/AGCRN_arch/AGCRNCell.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
basicts/archs/AGCRN_arch/AGCRNCell.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from basicts.archs.AGCRN_arch.AGCN import AVWGCN
class AGCRNCell(nn.Module):
def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim):
super(AGCRNCell, self).__init__()
self.node_num = node_num
self.hidden_dim = dim_out
self.gate = AVWGCN(dim_in+self.hidden_dim, 2*dim_out, cheb_k, embed_dim)
self.update = AVWGCN(dim_in+self.hidden_dim, dim_out, cheb_k, embed_dim)
def forward(self, x, state, node_embeddings):
#x: B, num_nodes, input_dim
#state: B, num_nodes, hidden_dim
state = state.to(x.device)
input_and_state = torch.cat((x, state), dim=-1)
z_r = torch.sigmoid(self.gate(input_and_state, node_embeddings))
z, r = torch.split(z_r, self.hidden_dim, dim=-1)
candidate = torch.cat((x, z*state), dim=-1)
hc = torch.tanh(self.update(candidate, node_embeddings))
h = r*state + (1-r)*hc
return h
def init_hidden_state(self, batch_size):
return torch.zeros(batch_size, self.node_num, self.hidden_dim)
| 42.692308
| 81
| 0.648649
| 1,020
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.054955
|
b7529f85e20a09a7d94f12902a504b82d6d2f333
| 1,763
|
py
|
Python
|
lib/python2.7/site-packages/openopt/kernel/iterPrint.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/python2.7/site-packages/openopt/kernel/iterPrint.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/python2.7/site-packages/openopt/kernel/iterPrint.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
from numpy import log10, isnan
def signOfFeasible(p):
r = '-'
if p.isFeas(p.xk): r = '+'
return r
textOutputDict = {\
'objFunVal': lambda p: p.iterObjFunTextFormat % (-p.Fk if p.invertObjFunc else p.Fk),
'log10(maxResidual)': lambda p: '%0.2f' % log10(p.rk+1e-100),
'log10(MaxResidual/ConTol)':lambda p: '%0.2f' % log10(max((p.rk/p.contol, 1e-100))),
'residual':lambda p: '%0.1e' % p._Residual,
'isFeasible': signOfFeasible,
'nSolutions': lambda p: '%d' % p._nObtainedSolutions,
'front length':lambda p: '%d' % p._frontLength,
'outcome': lambda p: ('%+d' % -p._nOutcome if p._nOutcome != 0 else ''),
'income': lambda p: ('%+d' % p._nIncome if p._nIncome != 0 else ''),
'f*_distance_estim': lambda p: ('%0.1g' % p.f_bound_distance if not isnan(p.f_bound_distance) else 'N/A'),
'f*_bound_estim': lambda p: (p.iterObjFunTextFormat % \
p.f_bound_estimation) if not isnan(p.f_bound_estimation) else 'N/A',
}
delimiter = ' '
class ooTextOutput:
def __init__(self):
pass
def iterPrint(self):
if self.lastPrintedIter == self.iter: return
if self.iter == 0 and self.iprint >= 0: # 0th iter (start)
s = ' iter' + delimiter
for fn in self.data4TextOutput:
s += fn + delimiter
self.disp(s)
elif self.iprint<0 or \
(((self.iprint>0 and self.iter % self.iprint != 0) or self.iprint==0) and not(self.isFinished or self.iter == 0)):
return
s = str(self.iter).rjust(5) + ' '
for columnName in self.data4TextOutput:
val = textOutputDict[columnName](self)
#nWhole = length(columnName)
s += val.rjust(len(columnName)) + ' '
self.disp(s)
self.lastPrintedIter = self.iter
| 37.510638
| 123
| 0.604651
| 817
| 0.463415
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.163925
|
b752f435d4eed268979210bf9a7cb3d5c6b5fde1
| 1,833
|
py
|
Python
|
src/cli.py
|
blu3r4y/ccc-linz-mar2019
|
a012a8e8d0cbf01c495385c62f2571bfb1b01962
|
[
"MIT"
] | null | null | null |
src/cli.py
|
blu3r4y/ccc-linz-mar2019
|
a012a8e8d0cbf01c495385c62f2571bfb1b01962
|
[
"MIT"
] | null | null | null |
src/cli.py
|
blu3r4y/ccc-linz-mar2019
|
a012a8e8d0cbf01c495385c62f2571bfb1b01962
|
[
"MIT"
] | null | null | null |
import os
from main import main
from pprint import pprint
def parse(lines):
# world bounds
wx = int(lines[0].split()[0])
wy = int(lines[0].split()[0])
# initial position
x = int(lines[1].split()[0])
y = int(lines[1].split()[1])
cmds = []
# command / step pair
it = iter(lines[2].split())
for e in it:
cmds.append((e, int(next(it))))
# health and speed
health = float(lines[3].split()[0])
speed = float(lines[3].split()[1])
# spawn times
nspawn = int(lines[4])
spawns = []
for i in range(nspawn):
spawns.append(int(lines[4 + i + 1]))
# damage and range
damage = float(lines[4 + nspawn + 1].split()[0])
towerrange = int(lines[4 + nspawn + 1].split()[1])
# queries
t = int(lines[4 + nspawn + 2])
towers = []
for i in range(t):
towertxt = lines[4 + nspawn + 3 + i]
towerx = int(towertxt.split()[0])
towery = int(towertxt.split()[1])
towers.append((towerx, towery))
return {
"wx": wx, "wy": wy,
"x": x, "y": y,
"cmds": cmds,
"speed": speed,
"health": health,
"damage": damage,
"range": towerrange,
"spawns": spawns,
"towers": towers
}
if __name__ == "__main__":
level, quests = 4, 5
for i in range(1, quests + 1):
input_file = r'..\data\level{0}\level{0}_{1}.in'.format(level, i)
output_file = os.path.splitext(input_file)[0] + ".out"
with open(input_file, 'r') as fi:
data = parse(fi.readlines())
# pprint(data)
print("=== Output {}".format(i))
print("======================")
result = main(data)
pprint(result)
with open(output_file, 'w+') as fo:
fo.write(result)
| 24.118421
| 73
| 0.505728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 288
| 0.157119
|
b75565cf56b991351466f79c8a9946c1474351a6
| 5,749
|
py
|
Python
|
card_utils/games/gin/ricky/utils.py
|
cdrappi/card_utils
|
dd12d3be22774cf35d7a6ce6b5f05ff6ee527929
|
[
"MIT"
] | null | null | null |
card_utils/games/gin/ricky/utils.py
|
cdrappi/card_utils
|
dd12d3be22774cf35d7a6ce6b5f05ff6ee527929
|
[
"MIT"
] | null | null | null |
card_utils/games/gin/ricky/utils.py
|
cdrappi/card_utils
|
dd12d3be22774cf35d7a6ce6b5f05ff6ee527929
|
[
"MIT"
] | null | null | null |
import itertools
from typing import List, Tuple
from card_utils import deck
from card_utils.deck.utils import (
rank_partition,
suit_partition,
ranks_to_sorted_values
)
from card_utils.games.gin.deal import new_game
def deal_new_game():
""" shuffle up and deal each player 7 cards,
put one card in the discard list,
and put remaining cards in deck
:return: (dict)
{
'p1_hand': [str],
'p2_hand': [str],
'discard': [str],
'deck': [str]
}
"""
return new_game(n_cards=7)
def sorted_hand_points(hand):
"""
:param hand: ([str]) list of cards
:return: ([str], int)
"""
runs_3, runs_4 = get_runs(hand)
sets_3, sets_4 = get_sets(hand)
melds_3 = runs_3 + sets_3
melds_4 = runs_4 + sets_4
sorted_hand = sort_cards_by_rank(hand)
hand_points_ = sum_points_by_ranks(hand)
if len(hand) == 8:
hand_points_ -= max(deck.rank_to_value[r] for r, _ in hand)
if len(melds_3 + melds_4) == 0:
return sorted_hand, hand_points_
for meld_3, meld_4 in itertools.product(melds_3, melds_4):
cards_in_meld = {*meld_3, *meld_4}
if len(cards_in_meld) == 7:
# if there is a non-intersecting 3-meld and 4-meld,
# then you have 0 points and win
remaining_cards = list(set(hand) - set(cards_in_meld))
return meld_4 + meld_3 + remaining_cards, 0
for meld in melds_3 + melds_4:
hand_without_meld = [card for card in hand if card not in meld]
# print(hand, hand_without_meld, meld)
meld_points = sum_points_by_ranks(hand_without_meld)
if len(hand) == 8:
meld_points -= max(deck.rank_to_value[r] for r, _ in hand_without_meld)
if meld_points < hand_points_:
sorted_hand = meld + sort_cards_by_rank(hand_without_meld)
hand_points_ = min(hand_points_, meld_points)
return sorted_hand, hand_points_
def rank_straights(ranks, straight_length, aces_high=True, aces_low=True, suit=''):
"""
:param ranks: ([str])
e.g. ['A', '2', '7', 'T', 'J', 'Q', 'K']
:param straight_length: (int) e.g. 5
:param aces_high: (bool)
:param aces_low: (bool)
:param suit: (str) optional: inject a suit in the final returned value
:return: ([[str]]) list of list of straights,
each with length straight_length
e.g. [['T','J','Q','K','A']]
or [['Th', 'Jh', 'Qh', 'Kh', 'Ah']]
"""
if len(ranks) < straight_length:
# don't waste our time if its impossible to make a straight
return []
if suit not in {'', *deck.suits}:
raise ValueError(
f'rank_straights: suit parameter must either be '
f'the empty string "" or one of {deck.suits}'
)
values = ranks_to_sorted_values(ranks, aces_high=aces_high, aces_low=aces_low)
values_in_a_row = 0
num_values = len(values)
last_value = values[0]
straights = []
for ii, value in enumerate(values[1:]):
if last_value + 1 == value:
values_in_a_row += 1
else:
values_in_a_row = 0
if values_in_a_row >= straight_length - 1:
straights.append([
f'{deck.value_to_rank[v]}{suit}'
for v in range(value - straight_length + 1, value + 1)
])
if num_values + values_in_a_row < straight_length + ii:
# exit early if there aren't enough cards left
# to complete a straight
return straights
last_value = value
return straights
def get_runs(hand):
""" cleaner but slower (!?) method to get runs
:param hand: ([str])
:return: ([[str]], [[str]])
"""
suit_to_ranks = suit_partition(hand)
runs_3, runs_4 = [], []
for suit, ranks in suit_to_ranks.items():
runs_3.extend(rank_straights(ranks, 3, True, True, suit=suit))
runs_4.extend(rank_straights(ranks, 4, True, True, suit=suit))
return runs_3, runs_4
def get_sets(hand):
"""
:param hand: ([str])
:return: ([[str]], [[str]])
"""
rank_to_suits = rank_partition(hand)
sets_3, sets_4 = [], []
for rank, suits in rank_to_suits.items():
if len(suits) == 4:
sets_4.append([f'{rank}{s}' for s in suits])
sets_3.extend([
[f'{rank}{s}' for s in suit_combo]
for suit_combo in itertools.combinations(suits, 3)
])
elif len(suits) == 3:
sets_3.append([f'{rank}{s}' for s in suits])
return sets_3, sets_4
def get_melds(hand) -> Tuple:
"""
:param hand: ([str])
:return: ([[str], [str]])
"""
runs_3, runs_4 = get_runs(hand)
sets_3, sets_4 = get_sets(hand)
return runs_3 + sets_3, runs_4 + sets_4
def are_two_distinct_3_melds(melds_3: List[List]):
"""
:param melds_3: ([[str]])
:return: (bool)
"""
if len(melds_3) < 2:
return False
for m1, m2 in itertools.combinations(melds_3, 2):
if len({*m1, *m2}) == 6:
return True
return False
def sum_points_by_ranks(hand):
"""
:param hand: ([str])
:return: (int)
"""
return sum(deck.rank_to_value[r] for r, _ in hand)
def sort_cards_by_rank(cards):
"""
:param cards: ([str])
:return: ([str])
"""
return sorted(cards, key=lambda c: deck.rank_to_value[c[0]])
def sort_hand(hand):
"""
:param hand: ([str])
:return: ([str])
"""
sorted_hand, _ = sorted_hand_points(hand)
return sorted_hand
def hand_points(hand):
"""
:param hand: ([str])
:return: (int)
"""
_, points = sorted_hand_points(hand)
return points
| 27.117925
| 83
| 0.584623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,750
| 0.304401
|
b7569ffd8bee128efc51f5bcf493cd00aa1b2d94
| 899
|
py
|
Python
|
evennia/contrib/rpg/dice/tests.py
|
davidrideout/evennia
|
879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/contrib/rpg/dice/tests.py
|
davidrideout/evennia
|
879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/contrib/rpg/dice/tests.py
|
davidrideout/evennia
|
879eea55acdf4fe5cdc96ba8fd0ab5ccca4ae84b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Testing of TestDice.
"""
from evennia.commands.default.tests import BaseEvenniaCommandTest
from mock import patch
from . import dice
@patch("evennia.contrib.rpg.dice.dice.randint", return_value=5)
class TestDice(BaseEvenniaCommandTest):
def test_roll_dice(self, mocked_randint):
self.assertEqual(dice.roll_dice(6, 6, modifier=("+", 4)), mocked_randint() * 6 + 4)
self.assertEqual(dice.roll_dice(6, 6, conditional=("<", 35)), True)
self.assertEqual(dice.roll_dice(6, 6, conditional=(">", 33)), False)
def test_cmddice(self, mocked_randint):
self.call(
dice.CmdDice(), "3d6 + 4", "You roll 3d6 + 4.| Roll(s): 5, 5 and 5. Total result is 19."
)
self.call(dice.CmdDice(), "100000d1000", "The maximum roll allowed is 10000d10000.")
self.call(dice.CmdDice(), "/secret 3d6 + 4", "You roll 3d6 + 4 (secret, not echoed).")
| 37.458333
| 100
| 0.657397
| 693
| 0.770857
| 0
| 0
| 757
| 0.842047
| 0
| 0
| 259
| 0.288098
|
b75755658b51065a953a59f32b666762d1790a50
| 9,247
|
py
|
Python
|
ardour_tally_relay.py
|
Jajcus/ardour_tally_relay
|
aa69035a86bd282238f70ef17c427068249efd59
|
[
"BSD-2-Clause"
] | null | null | null |
ardour_tally_relay.py
|
Jajcus/ardour_tally_relay
|
aa69035a86bd282238f70ef17c427068249efd59
|
[
"BSD-2-Clause"
] | null | null | null |
ardour_tally_relay.py
|
Jajcus/ardour_tally_relay
|
aa69035a86bd282238f70ef17c427068249efd59
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
import argparse
import logging
import signal
import time
from logging import debug, error, info, warning
import pythonosc.osc_server
import pythonosc.udp_client
from pythonosc.dispatcher import Dispatcher
import hid
LOG_FORMAT = '%(message)s'
POLL_INTERVAL = 1
# Supported USB relay vendor-id and product-id
USB_VID = 0x16c0
USB_PID = 0x05df
ON_COMMAND = [0x00,0xff,0x01,0x00,0x00,0x00,0x00,0x00,0x00]
OFF_COMMAND = [0x00,0xfd,0x01,0x00,0x00,0x00,0x00,0x00,0x00]
class SignalReceived(Exception):
pass
class OSCClientServer(pythonosc.udp_client.SimpleUDPClient,
pythonosc.osc_server.BlockingOSCUDPServer):
def __init__(self, local_address, remote_address, dispatcher, service_cb):
self._service_cb = service_cb
self._remote_addr = remote_address
pythonosc.osc_server.BlockingOSCUDPServer.__init__(self,
local_address,
dispatcher)
def service_actions(self):
pythonosc.osc_server.BlockingOSCUDPServer.service_actions(self)
self._service_cb()
def send(self, content):
self.socket.sendto(content.dgram, self._remote_addr)
class OSCRelay:
def __init__(self):
self._last_ping = 0
self._last_hb = 0
self.args = None
self.ardour_addr = None
self.server = None
self.rec_enable = False
self.record_tally = False
self.relay_device = None
def _open_relay_device(self, just_print=False):
device = None
try:
for devinfo in hid.enumerate():
if device is not None:
try:
device.close()
except OSError as err:
pass
if devinfo["vendor_id"] != USB_VID:
continue
if devinfo["product_id"] != USB_PID:
continue
device = hid.device()
try:
device.open_path(devinfo["path"])
except OSError as err:
warning("Cannot open device %r: %s", devinfo["path"], err)
continue
report = device.get_feature_report(0,9)
device_serial = bytes(report[1:6]).rstrip(b"\x00")
device_serial = device_serial.decode("us-ascii", "replace")
if just_print:
info("Device %r found, serial number: %r",
devinfo["path"], device_serial)
elif self.args.serial:
if self.args.serial == device_serial:
break
else:
debug("Ignoring USB Relay device %r", device_serial)
continue
else:
debug("Using the first device found: %r (serial: %r)",
devinfo["path"], device_serial)
break
else:
if just_print:
return
raise FileNotFoundError("No matching USB Relay device found")
self.relay_device, device = device, None
finally:
if device is not None:
try:
device.close()
except OSError as err:
debug("device.close(): %s", err)
device = None
def _close_relay_device(self):
if self.relay_device is not None:
try:
device.close()
except OSError as err:
debug("device.close(): %s", err)
self.relay_device = None
def toggle_light(self):
if self.args.mode == "master":
on = self.rec_enable
elif self.args.mode == "track":
on = self.record_tally
else:
on = self.rec_enable and self.record_tally
info("Turning the tally light %s", "ON" if on else "OFF")
for i in 1, 2:
if not self.relay_device:
try:
self._open_relay_device()
except OSError as err:
warning("Could not open the relay device: %s", err)
break
command = ON_COMMAND if on else OFF_COMMAND
try:
self.relay_device.write(command)
break
except OSError as err:
warning("Could not write to the relay device: %s", err)
self._close_relay_device()
def handle_rec_enable_toggle(self, address, on):
on = bool(on)
debug("message received{!r}".format((address, on)))
if on != self.rec_enable:
info("Master Record %s", "ON" if on else "OFF")
self.rec_enable = on
self.toggle_light()
def handle_record_tally(self, address, on):
on = bool(on)
debug("message received{!r}".format((address, on)))
if on != self.record_tally:
info("Track Record %s", "ON" if on else "OFF")
self.record_tally = on
self.toggle_light()
def handle_heartbeat(self, address, value):
debug("message received{!r}".format((address, value)))
self._last_hb = time.time()
def handle_any(self, address, value):
debug("message received{!r}".format((address, value)))
def _start_server(self):
dispatcher = Dispatcher()
self.server = OSCClientServer(("0.0.0.0", self.args.port),
self.ardour_addr,
dispatcher,
self._service_action)
dispatcher.map("/rec_enable_toggle", self.handle_rec_enable_toggle)
dispatcher.map("/record_tally", self.handle_record_tally)
dispatcher.map("/heartbeat", self.handle_heartbeat)
dispatcher.set_default_handler(self.handle_any)
def _ping_ardour(self):
debug("Asking Ardour for feedback")
self.server.send_message("/set_surface/feedback", 24)
self._last_ping = time.time()
def _service_action(self):
now = time.time()
waited = now - max(self._last_ping, self._last_hb)
if waited > self.args.interval:
debug("no message received in %.3fs", waited)
self._ping_ardour()
if self._last_hb:
waited = now - self._last_hb
if waited > self.args.interval * 3:
info("No heartbeat heard from Ardour in %.3fs", waited)
self.rec_enable = False
self.record_tally = False
self._last_hb = 0
self.toggle_light()
def _signal_handler(self, signum, frame):
info("Signal %i received. Exiting.", signum)
# server.shutdown() is unusable here :-(
raise SignalReceived()
def main(self):
parser = argparse.ArgumentParser(
description="Toggle USB relay in response to Ardour OSC messages.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--port", "-p", default=8000, type=int,
help="Local port to listen on.")
parser.add_argument("--ardour", "-a", default="localhost:3819",
help="Ardour host to connect to, with optional port number.")
parser.add_argument("--mode", choices=["master", "track", "both"],
default="both",
help="Turn the light on when master record is enabled, track record or both.")
parser.add_argument("--serial", "-s",
help="USB relay serial number.")
parser.add_argument("--interval", "-i", default=5.0, type=float,
help="Ardour 'ping' interval.")
parser.add_argument("--debug", "-d", action="store_true",
help="Enable debug output.")
parser.add_argument("--detect", action="store_true",
help="Detect connected USB Relay devices.")
self.args = parser.parse_args()
if self.args.debug:
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
else:
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
if self.args.detect:
self._open_relay_device(just_print=True)
return
if ":" in self.args.ardour:
host, port = self.args.ardour.split(":", 1)
port = int(port)
self.ardour_addr = (host, port)
else:
self.ardour_addr = (self.args.ardour, 3819)
signal.signal(signal.SIGTERM, self._signal_handler)
self.toggle_light()
info("Talking to Ardour at %s:%i", *self.ardour_addr)
self._start_server()
self._ping_ardour()
try:
self.server.serve_forever(POLL_INTERVAL)
except (KeyboardInterrupt, SignalReceived):
self.rec_enable = False
self.record_tally = False
self.toggle_light()
if __name__ == "__main__":
osc_relay = OSCRelay()
osc_relay.main()
| 37.589431
| 106
| 0.54861
| 8,677
| 0.938358
| 0
| 0
| 0
| 0
| 0
| 0
| 1,400
| 0.1514
|
b757a3fb8db3b96f5cc0d1f1dd19f7847059351f
| 1,408
|
py
|
Python
|
python second semester working scripts/electrode_fcn.py
|
pm2111/Heart-Defibrillation-Project
|
48ea3570c360aac7c3ff46354891998f4f364fab
|
[
"MIT"
] | null | null | null |
python second semester working scripts/electrode_fcn.py
|
pm2111/Heart-Defibrillation-Project
|
48ea3570c360aac7c3ff46354891998f4f364fab
|
[
"MIT"
] | null | null | null |
python second semester working scripts/electrode_fcn.py
|
pm2111/Heart-Defibrillation-Project
|
48ea3570c360aac7c3ff46354891998f4f364fab
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import os
path = "/Users/petermarinov/msci project/electrode data/test data/data/"
filenames = []
for f in os.listdir(path):
if not f.startswith('.'):
filenames.append(f)
i=-12
data = np.genfromtxt(path + filenames[i])
V = np.zeros((200,200))
for i in range (0,200):
for j in range (0,200):
if data[j+200*i][0] == 0:
V[i,j] = -90.0
if data[j+200*i][0] >1:
V[i,j] = 20.-(110./data[j+200*i][1])*(data[j+200*i][0]-1)
if data[j+200*i][0] ==1:
V[i,j] = 20.
i1 = 50
k= 3
total = []
x=0 #dummy
elec = np.zeros((200,200,200))
for j1 in range(0,200):
for i in range (1,200):
for j in range (1,200):
#elec[j1,i,j] = np.divide(float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1])),float(((i-i1)**2+ (j-j1)**2 +k**2)**(3/2)))
#x +=((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/((i-i1)**2+ (j-j1)**2 +k**2)**(3/2)
x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2)
total.append(x)
x=0
plt.plot(total)
plt.xlabel("time [dimentionless]", fontsize = 18)
plt.ylabel("Voltage [mV]" , fontsize = 18)
plt.title("Electrode measurement for a healthy pacing heart")
plt.grid()
plt.show()
| 31.288889
| 139
| 0.496449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 381
| 0.270597
|
b757a454248faaffeb488872e86cf07d801bf71c
| 1,355
|
py
|
Python
|
resources/lib/IMDbPY/bin/get_first_movie.py
|
bopopescu/ServerStatus
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | 1
|
2017-11-02T06:06:39.000Z
|
2017-11-02T06:06:39.000Z
|
resources/lib/IMDbPY/bin/get_first_movie.py
|
bopopescu/ServerStatus
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | 1
|
2015-04-21T22:05:02.000Z
|
2015-04-22T22:27:15.000Z
|
resources/lib/IMDbPY/bin/get_first_movie.py
|
GetSomeBlocks/Score_Soccer
|
a883598248ad6f5273eb3be498e3b04a1fab6510
|
[
"MIT"
] | 4
|
2017-11-01T19:24:31.000Z
|
2018-09-13T00:05:41.000Z
|
#!/usr/bin/env python
"""
get_first_movie.py
Usage: get_first_movie "movie title"
Search for the given title and print the best matching result.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You bad boy! You need to install the IMDbPY package!'
sys.exit(1)
if len(sys.argv) != 2:
print 'Only one argument is required:'
print ' %s "movie title"' % sys.argv[0]
sys.exit(2)
title = sys.argv[1]
i = imdb.IMDb()
in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
title = unicode(title, in_encoding, 'replace')
try:
# Do the search, and get the results (a list of Movie objects).
results = i.search_movie(title)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
if not results:
print 'No matches for "%s", sorry.' % title.encode(out_encoding, 'replace')
sys.exit(0)
# Print only the first result.
print ' Best match for "%s"' % title.encode(out_encoding, 'replace')
# This is a Movie instance.
movie = results[0]
# So far the Movie object only contains basic information like the
# title and the year; retrieve main information:
i.update(movie)
print movie.summary().encode(out_encoding, 'replace')
| 22.583333
| 79
| 0.702583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 676
| 0.498893
|
b7592e3ec4b70120c5e12cf12590570b289d59a3
| 14,079
|
py
|
Python
|
ID3.py
|
idiomatic/id3.py
|
574b2a6bd52897e07c220198d451e5971577fc02
|
[
"MIT"
] | null | null | null |
ID3.py
|
idiomatic/id3.py
|
574b2a6bd52897e07c220198d451e5971577fc02
|
[
"MIT"
] | null | null | null |
ID3.py
|
idiomatic/id3.py
|
574b2a6bd52897e07c220198d451e5971577fc02
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- mode: python -*-
import re
import struct
import types
def items_in_order(dict, order=[]):
"""return all items of dict, but starting in the specified order."""
done = { }
items = [ ]
for key in order + dict.keys():
if not done.has_key(key) and dict.has_key(key):
done[key] = None
items.append((key, dict[key]))
return items
class UnsupportedID3:
pass
class InvalidMP3:
pass
genres = [
'Blues', 'Classic Rock', 'Country', 'Dance', 'Disco', 'Funk',
'Grunge', 'Hip-Hop', 'Jazz', 'Metal', 'New Age', 'Oldies', 'Other',
'Pop', 'R&B', 'Rap', 'Reggae', 'Rock', 'Techno', 'Industrial',
'Alternative', 'Ska', 'Death Metal', 'Pranks', 'Soundtrack',
'Euro-Techno', 'Ambient', 'Trip-Hop', 'Vocal', 'Jazz+Funk', 'Fusion',
'Trance', 'Classical', 'Instrumental', 'Acid', 'House', 'Game',
'Sound Clip', 'Gospel', 'Noise', 'Alt. Rock', 'Bass', 'Soul',
'Punk', 'Space', 'Meditative', 'Instrum. Pop', 'Instrum. Rock',
'Ethnic', 'Gothic', 'Darkwave', 'Techno-Indust.', 'Electronic',
'Pop-Folk', 'Eurodance', 'Dream', 'Southern Rock', 'Comedy',
'Cult', 'Gangsta', 'Top 40', 'Christian Rap', 'Pop/Funk', 'Jungle',
'Native American', 'Cabaret', 'New Wave', 'Psychadelic', 'Rave',
'Showtunes', 'Trailer', 'Lo-Fi', 'Tribal', 'Acid Punk', 'Acid Jazz',
'Polka', 'Retro', 'Musical', 'Rock & Roll', 'Hard Rock', 'Folk',
'Folk/Rock', 'National Folk', 'Swing', 'Fusion', 'Bebob', 'Latin',
'Revival', 'Celtic', 'Bluegrass', 'Avantgarde', 'Gothic Rock',
'Progress. Rock', 'Psychadel. Rock', 'Symphonic Rock', 'Slow Rock',
'Big Band', 'Chorus', 'Easy Listening', 'Acoustic', 'Humour',
'Speech', 'Chanson', 'Opera', 'Chamber Music', 'Sonata', 'Symphony',
'Booty Bass', 'Primus', 'Porn Groove', 'Satire', 'Slow Jam',
'Club', 'Tango', 'Samba', 'Folklore', 'Ballad', 'Power Ballad',
'Rhythmic Soul', 'Freestyle', 'Duet', 'Punk Rock', 'Drum Solo',
'A Capella', 'Euro-House', 'Dance Hall', 'Goa', 'Drum & Bass',
'Club-House', 'Hardcore', 'Terror', 'Indie', 'BritPop', 'Negerpunk',
'Polsk Punk', 'Beat', 'Christian Gangsta Rap', 'Heavy Metal',
'Black Metal', 'Crossover', 'Contemporary Christian', 'Christian Rock',
'Merengue', 'Salsa', 'Thrash Metal', 'Anime', 'Jpop', 'Synthpop',
]
frame_id_names = {
'BUF' : 'Recommended buffer size',
'CNT' : 'Play counter',
'COM' : 'Comments',
'CRA' : 'Audio encryption',
'CRM' : 'Encrypted meta frame',
'ETC' : 'Event timing codes',
'EQU' : 'Equalization',
'GEO' : 'General encapsulated object',
'IPL' : 'Involved people list',
'LNK' : 'Linked information',
'MCI' : 'Music CD Identifier',
'MLL' : 'MPEG location lookup table',
'PIC' : 'Attached picture',
'POP' : 'Popularimeter',
'REV' : 'Reverb',
'RVA' : 'Relative volume adjustment',
'SLT' : 'Synchronized lyric/text',
'STC' : 'Synced tempo codes',
'TAL' : 'Title',
'TBP' : 'Beats per minute',
'TCM' : 'Composer',
'TCO' : 'Content type',
'TCR' : 'Copyright message',
'TDA' : 'Date',
'TDY' : 'Playlist delay',
'TEN' : 'Encoded by',
'TFT' : 'File type',
'TIM' : 'Time',
'TKE' : 'Initial key',
'TLA' : 'Language(s)',
'TLE' : 'Length',
'TMT' : 'Media type',
'TOA' : 'Original artist(s)/performer(s)',
'TOF' : 'Original filename',
'TOL' : 'Original Lyricist(s)/text writer(s)',
'TOR' : 'Original release year',
'TOT' : 'Original album/Movie/Show title',
'TP1' : 'Lead artist(s)/Lead performer(s)/Soloist(s)/Performing group',
'TP2' : 'Band/Orchestra/Accompaniment',
'TP3' : 'Conductor/Performer refinement',
'TP4' : 'Interpreted, remixed, or otherwise modified by',
'TPA' : 'Part of a set',
'TPB' : 'Publisher',
'TRC' : 'ISRC (International Standard Recording Code)',
'TRD' : 'Recording dates',
'TRK' : 'Track number/Position in set',
'TSI' : 'Size',
'TSS' : 'Software/hardware and settings used for encoding',
'TT1' : 'Content group description',
'TT2' : 'Title/Songname/Content description',
'TT3' : 'Subtitle/Description refinement',
'TXT' : 'Lyricist/text writer',
'TXX' : 'User defined text information frame',
'TYE' : 'Year',
'UFI' : 'Unique file identifier',
'ULT' : 'Unsychronized lyric/text transcription',
'WAF' : 'Official audio file webpage',
'WAR' : 'Official artist/performer webpage',
'WAS' : 'Official audio source webpage',
'WCM' : 'Commercial information',
'WCP' : 'Copyright/Legal information',
'WPB' : 'Publishers official webpage',
'WXX' : 'User defined URL link frame',
}
text_frame_ids = ( 'TT1', 'TT2', 'TT3', 'TP1', 'TP2', 'TP3', 'TP4',
'TCM', 'TXT', 'TLA', 'TCO', 'TAL', 'TPA', 'TRK',
'TRC', 'TYE', 'TDA', 'TIM', 'TRD', 'TMT', 'TFT',
'TBP', 'TCR', 'TPB', 'TEN', 'TSS', 'TOF', 'TLE',
'TSI', 'TDY', 'TKE', 'TOT', 'TOA', 'TOL', 'TOR',
'IPL' )
_genre_number_re = re.compile("^\((\d+)\)$")
_track_re = re.compile("^(\d+)/(\d+)$")
def _nts(s):
null = s.find('\0')
if null:
return s[:null]
return s
def _unpack_non_negative_octet_28_bit_int(n):
return (((n & 0x7f000000) >> 3)
+ ((n & 0x7f0000) >> 2)
+ ((n & 0x7f00) >> 1)
+ (n & 0x7f))
def _pack_non_negative_ocket_28_bit_int(n):
return (((n & 0xfe00000) << 3)
+ ((n & 0x1fc000) << 2)
+ ((n & 0x3f80) << 1)
+ (n & 0x7f))
def _unpack_genre(s):
m = _genre_number_re.match(s)
if m:
return genres[int(m.group(1))]
return s
def _pack_genre(s):
s = s.lower()
for i in range(len(genres)):
if s == genres[i].lower():
return "(%d)" % (i,)
return s
def _unpack_track(s):
m = _track_re.match(s)
if m:
return (int(m.group(1)), int(m.group(2)))
return (int(track), 0)
def _pack_track(track, tracks):
return "%d/%d" % (track, tracks)
def _unpack_str(s):
if s[0] == "\0":
return s[1:]
raise UnsupportedID3
def _pack_str(s):
if type(s) is types.StringType:
return "\0" + s
raise UnsupportedID3
class id3_file:
def __init__(self, f):
self._f = f
self._order = [ ]
self._version = None
self._read_length = 0
self._write_length = 0
def _read(self, length):
self._read_length += length
return self._f.read(length)
def _read_unpack(self, format, exception=InvalidMP3):
data = self._read(struct.calcsize(format))
if not data:
raise exception
data = struct.unpack(format, data)
if len(data) == 1:
return data[0]
return data
def _read_id3_v1(self):
id3 = self._read_unpack('30s30s30s4s30sB')
title, artist, album, year, comment, genre = id3
track = ord(comment[-1])
if track and ord(comment[-2]) == 0:
comment = comment[:-2]
else:
track = None
self.title = _nts(title)
self.artist = _nts(artist)
self.album = _nts(album)
self.year = _nts(year)
self.genre = genres[genre]
self.comment = _nts(comment)
if track is None:
self.version = self._version = "1.0"
else:
self.version = self._version = "1.1"
self.track = track
def _read_id3_v2(self):
major_version, minor_version, flags = self._read_unpack('>BBB')
self._version = "2.%d.%d" % (major_version, minor_version)
self.version = self._version
if flags & 0x20:
# extended header
raise UnsupportedID3
if major_version == 2:
return self._read_id3_v2_2()
elif major_version == 3:
return self._read_id3_v2_3()
else:
raise UnsupportedID3
def _read_id3_v2_3(self):
self.raw = raw = { }
self._order = order = [ ]
size = self._read_unpack('>L')
size = _unpack_non_negative_octet_28_bit_int(size)
#print "size", size
while size > 0:
id, frame_size, flags = self._read_unpack('>4sLH')
size -= 10
if flags:
raise UnsupportedID3
if frame_size:
size -= frame_size
data = self._read(frame_size)
if id == "\0\0\0\0":
continue
if raw.has_key(id):
# duplicates?
raise UnsupportedID3
raw[id] = data
order.append(id)
self.title = _unpack_str(raw.get('TIT2', "\0"))
self.artist = _unpack_str(raw.get('TPE1', "\0"))
self.album = _unpack_str(raw.get('TALB', "\0"))
self.year = _unpack_str(raw.get('TYER', "\0"))
self.comment = _unpack_str(raw.get('COMM', "\0"))
self.genre = _unpack_genre(_unpack_str(raw.get('TCON', "\0")))
track = _unpack_str(raw.get('TRCK', "\0000"))
self.track, self.tracks = _unpack_track(track)
def _read_id3_v2_2(self):
self.raw = raw = { }
size = self._read_unpack('>L')
size = _unpack_non_negative_octet_28_bit_int(size)
# frames
while size > 0:
id, frame_size = self._read_unpack('>3s3s')
frame_size = (struct.unpack('>L', '\0' + frame_size))[0]
size -= 6
if frame_size:
size -= frame_size
data = self._read(frame_size)
if raw.has_key(id):
# duplicates?
raise UnsupportedID3
raw[id] = data
if raw.has_key('UFI'):
raw['UFI'] = (raw['UFI'][0], raw['UFI'][1:])
for id in text_frame_ids:
if raw.has_key(id):
raw[id] = _unpack_str(raw[id])
if raw.has_key('TXX'):
raw['TXX'] = (_unpack_str(raw['TXX']).split('\0', 1))
if raw.has_key('COM'):
raw['COM'] = (_unpack_str(raw['COM']).split('\0'))
self.title = raw.get('TT2', '')
self.artist = raw.get('TP1', '')
self.album = raw.get('TAL', '')
self.year = raw.get('TYE', '')
self.comment = raw.get('COM', '')
self.genre = _unpack_genre(raw.get('TCO', ''))
track = raw.get('TRK', '0')
self.track, self.tracks = _unpack_track(track)
def read(self):
magic = self._read_unpack('>3s')
if magic == 'ID3':
self._read_id3_v2()
return self
self._f.seek(128, 2) # last 128 bytes
tag = self._read_unpack('3s')
if tag == 'TAG':
self._read_id3_v1()
return self
raise InvalidMP3
def _write(self, data):
self._write_length += len(data)
self._f.write(data)
def _pad(self):
self._write("\0" * (self._read_length - self._write_length))
self._write_length = 0
def _write_pack(self, format, *values):
data = apply(struct.pack, [ format ] + list(values))
return self._write(data)
def _write_id3_v2_3(self):
self._f.seek(0)
raw = self.raw.copy()
if self.title:
raw['TIT2'] = _pack_str(self.title)
if self.artist:
raw['TPE1'] = _pack_str(self.artist)
if self.album:
raw['TALB'] = _pack_str(self.album)
if self.year:
raw['TYER'] = _pack_str(self.year)
if self.comment:
raw['COMM'] = _pack_str(self.comment)
if self.genre:
raw['TCON'] = _pack_str(_pack_genre(self.genre))
if self.track:
raw['TRCK'] = _pack_str(_pack_track(self.track, self.tracks))
# order of:
# 1) original key order
# 2) title, artist, album, year, comment, genre, tracks
# 3) anything else added since read()
front_keys = [ 'TIT2', 'TPE1', 'TALB', 'TYER', 'COMM', 'TCON', 'TRCK' ]
data = [ ]
for k,v in items_in_order(raw, self._order + front_keys):
data.append(struct.pack('>4sLH', k, len(v), 0x0) + v)
data = "".join(data)
if len(data) > self._read_length:
raise InvalidMP3
length = max(len(data), self._read_length - 10)
self._write('ID3')
self._write_pack('>BBB', 3, 0, 0x0)
self._write_pack('>L', _pack_non_negative_ocket_28_bit_int(length))
self._write(data)
self._pad()
def _write_id3_v2_2(self):
raise UnsupportedID3
self._f.seek(0)
pass
def _write_id3_v1(self):
raise UnsupportedID3
self._f.seek(128, 2)
pass
def write(self):
version = (getattr(self, '_version', None)
or getattr(self, 'version', None))
if version == "2.3.0":
self._write_id3_v2_3()
elif version == "2.2.0":
self._write_id3_v2_2()
elif version == "1.1":
self._write_id3_v1()
elif version == "1.0":
self._write_id3_v1()
#def info(filename):
# f = open(filename, 'rb')
# try:
# return id3().read(f).attributes()
# finally:
# f.close()
# composer
# disc
# part_of_a_compilation
# volume_adjustment
# equalizer_preset
# my_rating
# start_time
# stop_time
def test(filename="2_3.mp3"):
import StringIO
f = open(filename)
i = id3_file(f)
i.read()
i._f = StringIO.StringIO()
i.write()
v = i._f.getvalue()
f.seek(0)
v2 = f.read(len(v))
f.close()
return v == v2
def scan():
import os
def walkfn(arg, dir, files):
for filename in files:
if filename[-4:] == '.mp3':
filename = os.path.join(dir, filename)
if not test(filename):
print filename
os.path.walk('.', walkfn, 0)
if __name__ == '__main__':
scan()
| 32.291284
| 79
| 0.539882
| 7,020
| 0.498615
| 0
| 0
| 0
| 0
| 0
| 0
| 4,482
| 0.318346
|
b75a00768c2cceed8ca46774029ad378bc7cc2e6
| 1,180
|
py
|
Python
|
workflow/pnmlpy/pmnl_model.py
|
SODALITE-EU/verification
|
584e3c61bc20e65944e34b875eb5ed0ec02d6fa9
|
[
"Apache-2.0"
] | null | null | null |
workflow/pnmlpy/pmnl_model.py
|
SODALITE-EU/verification
|
584e3c61bc20e65944e34b875eb5ed0ec02d6fa9
|
[
"Apache-2.0"
] | 2
|
2020-03-30T12:02:32.000Z
|
2021-04-20T19:09:25.000Z
|
workflow/pnmlpy/pmnl_model.py
|
SODALITE-EU/verification
|
584e3c61bc20e65944e34b875eb5ed0ec02d6fa9
|
[
"Apache-2.0"
] | null | null | null |
from xml.dom import minidom
from xml.etree import ElementTree
from xml.etree.cElementTree import Element, SubElement, ElementTree, tostring
class PNMLModelGenerator:
def generate(self, tasks):
top = Element('pnml')
child = SubElement(top, 'net',
attrib={'id': "net1", 'type': "http://www.pnml.org/version-2009/grammar/pnmlcoremodel"})
page = SubElement(child, 'page',
attrib={'id': "n0"})
index = 0
for task in tasks:
place = SubElement(page, 'place',
attrib={'id': "p" + str(index)})
name = SubElement(place, 'name')
text = SubElement(name, 'text')
text.text = task.name
index += 1
finalmarkings = SubElement(child, 'finalmarkings')
markings = SubElement(finalmarkings, 'markings')
tree = ElementTree(top)
return prettify(top)
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
| 31.891892
| 115
| 0.582203
| 811
| 0.687288
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.187288
|
b75be5ebe9cb0ad6772b99405564c425be4f2dda
| 969
|
py
|
Python
|
examples/truss/truss_01.py
|
ofgod2/Analisis-matricial-nusa-python
|
7cea329ba00449b97711a0c67725053a0d194335
|
[
"MIT"
] | 92
|
2016-11-14T01:39:55.000Z
|
2022-03-27T17:23:41.000Z
|
examples/truss/truss_01.py
|
ofgod2/Analisis-matricial-nusa-python
|
7cea329ba00449b97711a0c67725053a0d194335
|
[
"MIT"
] | 1
|
2017-11-30T05:04:02.000Z
|
2018-08-29T04:31:39.000Z
|
examples/truss/truss_01.py
|
ofgod2/Analisis-matricial-nusa-python
|
7cea329ba00449b97711a0c67725053a0d194335
|
[
"MIT"
] | 31
|
2017-05-17T18:50:18.000Z
|
2022-03-12T03:08:00.000Z
|
# -*- coding: utf-8 -*-
# ***********************************
# Author: Pedro Jorge De Los Santos
# E-mail: delossantosmfq@gmail.com
# Blog: numython.github.io
# License: MIT License
# ***********************************
from nusa import *
"""
Logan, D. (2007). A first course in the finite element analysis.
Example 3.1, pp. 70.
"""
# Input data
E = 30e6 # psi
A = 2.0 # in^2
P = 10e3 # lbf
# Model
m = TrussModel("Truss Model")
# Nodes
n1 = Node((0,0))
n2 = Node((0,120))
n3 = Node((120,120))
n4 = Node((120,0))
# Elements
kdg = np.pi/180.0
e1 = Truss((n1,n2),E,A)
e2 = Truss((n1,n3),E,A)
e3 = Truss((n1,n4),E,A)
# Add elements
for nd in (n1,n2,n3,n4):
m.add_node(nd)
for el in (e1,e2,e3):
m.add_element(el)
m.add_force(n1,(0,-P))
m.add_constraint(n2,ux=0,uy=0) # fixed
m.add_constraint(n3,ux=0,uy=0) # fixed
m.add_constraint(n4,ux=0,uy=0) # fixed
m.plot_model()
m.solve() # Solve model
m.plot_deformed_shape() # plot deformed shape
m.show()
| 21.065217
| 64
| 0.585139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 454
| 0.468524
|
b75dd73022d3840c6328953902299b38ebc5ba18
| 2,919
|
py
|
Python
|
Profiles/Mahmoud Higazy/logistic_regression.py
|
AhmedHani/FCIS-Machine-Learning-2017
|
f241d989fdccfabfe351cd9c01f5de4da8df6ef3
|
[
"Apache-2.0"
] | 13
|
2017-07-02T06:45:46.000Z
|
2020-12-26T16:35:24.000Z
|
Profiles/Mahmoud Higazy/logistic_regression.py
|
AhmedHani/FCIS-Machine-Learning-2017
|
f241d989fdccfabfe351cd9c01f5de4da8df6ef3
|
[
"Apache-2.0"
] | 4
|
2017-07-22T00:09:41.000Z
|
2017-12-15T15:54:33.000Z
|
Profiles/Mahmoud Higazy/logistic_regression.py
|
AhmedHani/FCIS-Machine-Learning-2017
|
f241d989fdccfabfe351cd9c01f5de4da8df6ef3
|
[
"Apache-2.0"
] | 25
|
2017-07-01T23:07:08.000Z
|
2019-01-24T09:45:08.000Z
|
from data_reader.reader import CsvReader
from util import *
import numpy as np
import matplotlib.pyplot as plt
class LogisticRegression(object):
def __init__(self, learning_rate=0.01, epochs=50):
self.__epochs= epochs
self.__learning_rate = learning_rate
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.__epochs):
# 1- Calculate the net input W^T * x
z = self.__net_input(X)
# 2- Get the activation using Sigmoid function
h = self.__activation(z)
# 3- Calculate the gradient
temp = X.T.dot(y - h)
# 4- Update the weights and bias using the gradient and learning rate
self.w_[1:] += self.__learning_rate * temp
self.w_[0] += self.__learning_rate * sum(temp)
# 5- Uncomment the cost collecting line
self.cost_.append(self.__logit_cost(y, self.__activation(z)))
def __logit_cost(self, y, y_val):
logit = -y.dot(np.log(y_val)) - ((1 - y).dot(np.log(1 - y_val)))
return logit
def __sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def __net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def __activation(self, X):
return self.__sigmoid(X)
def predict(self, X):
# 1- Calculate the net input W^T * x
z = self.__net_input(X)
# 2- Return the activated values (0 or 1 classes)
h = self.__activation(z)
return np.where(self.__activation(z) >= 0.5, 1, 0)
reader = CsvReader("./data/Iris.csv")
iris_features, iris_labels = reader.get_iris_data()
ignore_verginica = [i for i, v in enumerate(iris_labels) if v == 'Iris-virginica']
iris_features = [v for i, v in enumerate(iris_features) if i not in ignore_verginica]
iris_labels = [v for i, v in enumerate(iris_labels) if i not in ignore_verginica]
print(len(iris_features))
print(len(iris_labels))
iris_features, iris_labels = shuffle(iris_features, iris_labels)
iris_labels = to_onehot(iris_labels)
iris_labels = list(map(lambda v: v.index(max(v)), iris_labels))
train_x, train_y, test_x, test_y = iris_features[0:89], iris_labels[0:89], iris_features[89:], iris_labels[89:]
train_x, train_y, test_x, test_y = np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
train_x, means, stds = standardize(train_x)
test_x = standardize(test_x, means, stds)
lr = LogisticRegression(learning_rate=0.1, epochs=50)
lr.fit(train_x, train_y)
plt.plot(range(1, len(lr.cost_) + 1), np.log10(lr.cost_))
plt.xlabel('Epochs')
plt.ylabel('Cost')
plt.title('Logistic Regression - Learning rate 0.1')
plt.tight_layout()
plt.show()
predicted_test = lr.predict(test_x)
print("Test Accuracy: " + str(((sum([predicted_test[i] == test_y[i] for i in range(0, len(predicted_test))]) / len(predicted_test)) * 100.0)) + "%")
| 34.341176
| 148
| 0.656732
| 1,477
| 0.505995
| 0
| 0
| 0
| 0
| 0
| 0
| 410
| 0.140459
|
b75eb4207857101d04d38eb0f52b4294fd616690
| 1,413
|
py
|
Python
|
wavefront_reader/reading/readobjfile.py
|
SimLeek/wavefront_reader
|
4504f5b6185a03fcdd1722dbea660f7af35b8b8c
|
[
"MIT"
] | null | null | null |
wavefront_reader/reading/readobjfile.py
|
SimLeek/wavefront_reader
|
4504f5b6185a03fcdd1722dbea660f7af35b8b8c
|
[
"MIT"
] | null | null | null |
wavefront_reader/reading/readobjfile.py
|
SimLeek/wavefront_reader
|
4504f5b6185a03fcdd1722dbea660f7af35b8b8c
|
[
"MIT"
] | null | null | null |
from wavefront_reader.wavefront_classes.objfile import ObjFile
from .readface import read_face
def read_objfile(fname):
"""Takes .obj filename and return an ObjFile class."""
obj_file = ObjFile()
with open(fname) as f:
lines = f.read().splitlines()
if 'OBJ' not in lines[0]:
raise ValueError("File not .obj-formatted.")
# todo: assumes one object per .obj file, which is wrong
# todo: doesn't properly ignore comments
for line in lines:
if line:
prefix, value = line.split(' ', 1)
if prefix == 'o':
obj_file.add_prop(value)
if obj_file.has_prop():
if prefix == 'v':
obj_file.last_obj_prop.vertices.append([float(val) for val in value.split(' ')])
elif prefix == 'vn':
obj_file.last_obj_prop.vertex_normals.append([float(val) for val in value.split(' ')])
elif prefix == 'vt':
obj_file.last_obj_prop.vertex_textures.append([float(val) for val in value.split(' ')])
elif prefix == 'usemtl':
obj_file.last_obj_prop.material_name = value
elif prefix == 'f':
obj_file.last_obj_prop.faces.append(read_face(value, obj_file.last_obj_prop))
else:
obj_file.misc[prefix] = value
return obj_file
| 39.25
| 107
| 0.573248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 218
| 0.154282
|
b760116d8d8fe2d046e6af340b2d6bd9cb6fc8e2
| 157
|
py
|
Python
|
constants.py
|
Guedelho/snake-ai
|
176db202aaec76ff5c7cac6cc9d7a7bc46ff2b16
|
[
"MIT"
] | null | null | null |
constants.py
|
Guedelho/snake-ai
|
176db202aaec76ff5c7cac6cc9d7a7bc46ff2b16
|
[
"MIT"
] | null | null | null |
constants.py
|
Guedelho/snake-ai
|
176db202aaec76ff5c7cac6cc9d7a7bc46ff2b16
|
[
"MIT"
] | null | null | null |
# Directions
UP = 'UP'
DOWN = 'DOWN'
LEFT = 'LEFT'
RIGHT = 'RIGHT'
# Colors
RED = (255, 0, 0)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
WHITE = (255, 255, 255)
| 13.083333
| 23
| 0.547771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.273885
|
b76026927b6eb058284eefad5002a87c72c21db0
| 520
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Utils for video_pipeline app.
"""
from django.conf import settings
from edx_rest_api_client.client import OAuthAPIClient
def create_video_pipeline_api_client(api_client_id, api_client_secret):
"""
Returns an API client which can be used to make Video Pipeline API requests.
Arguments:
api_client_id(unicode): Video pipeline client id.
api_client_secret(unicode): Video pipeline client secret.
"""
return OAuthAPIClient(settings.LMS_ROOT_URL, api_client_id, api_client_secret)
| 28.888889
| 82
| 0.765385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 269
| 0.517308
|
b76161b7b67049e769a1af4d2aa06f728082679c
| 2,695
|
py
|
Python
|
run.py
|
Galaxy-SynBioCAD/extractTaxonomy
|
da3a1da443909dbefe143a3b7de66905c43eaf82
|
[
"MIT"
] | null | null | null |
run.py
|
Galaxy-SynBioCAD/extractTaxonomy
|
da3a1da443909dbefe143a3b7de66905c43eaf82
|
[
"MIT"
] | null | null | null |
run.py
|
Galaxy-SynBioCAD/extractTaxonomy
|
da3a1da443909dbefe143a3b7de66905c43eaf82
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Created on March 18 2020
@author: Melchior du Lac
@description: Extract the taxonomy ID from an SBML file
"""
import argparse
import tempfile
import os
import logging
import shutil
import docker
def main(inputfile, output):
"""Call the extractTaxonomy docker to return the JSON file
:param inputfile: The path to the SBML file
:param output: The path to the output json file
:type inputfile: str
:type output: str
:rtype: None
:return: None
"""
docker_client = docker.from_env()
image_str = 'brsynth/extracttaxonomy-standalone'
try:
image = docker_client.images.get(image_str)
except docker.errors.ImageNotFound:
logging.warning('Could not find the image, trying to pull it')
try:
docker_client.images.pull(image_str)
image = docker_client.images.get(image_str)
except docker.errors.ImageNotFound:
logging.error('Cannot pull image: '+str(image_str))
exit(1)
with tempfile.TemporaryDirectory() as tmpOutputFolder:
if os.path.exists(inputfile):
shutil.copy(inputfile, tmpOutputFolder+'/input.dat')
command = ['/home/tool_extractTaxonomy.py',
'-input',
'/home/tmp_output/input.dat',
'-output',
'/home/tmp_output/output.dat']
container = docker_client.containers.run(image_str,
command,
detach=True,
stderr=True,
volumes={tmpOutputFolder+'/': {'bind': '/home/tmp_output', 'mode': 'rw'}})
container.wait()
err = container.logs(stdout=False, stderr=True)
err_str = err.decode('utf-8')
if 'ERROR' in err_str:
print(err_str)
elif 'WARNING' in err_str:
print(err_str)
if not os.path.exists(tmpOutputFolder+'/output.dat'):
print('ERROR: Cannot find the output file: '+str(tmpOutputFolder+'/output.dat'))
else:
shutil.copy(tmpOutputFolder+'/output.dat', output)
container.remove()
else:
logging.error('Cannot find the input file: '+str(inputfile))
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Extract the t')
parser.add_argument('-input', type=str)
parser.add_argument('-output', type=str)
params = parser.parse_args()
main(params.input, params.output)
| 35
| 127
| 0.565121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 813
| 0.30167
|
b761fb951040af2347c2dd2aa478c82dca9ff08e
| 10,460
|
py
|
Python
|
src/ebay_rest/api/buy_browse/models/refinement.py
|
matecsaj/ebay_rest
|
dd23236f39e05636eff222f99df1e3699ce47d4a
|
[
"MIT"
] | 3
|
2021-12-12T04:28:03.000Z
|
2022-03-10T03:29:18.000Z
|
src/ebay_rest/api/buy_browse/models/refinement.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 33
|
2021-06-16T20:44:36.000Z
|
2022-03-30T14:55:06.000Z
|
src/ebay_rest/api/buy_browse/models/refinement.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 7
|
2021-06-03T09:30:23.000Z
|
2022-03-08T19:51:33.000Z
|
# coding: utf-8
"""
Browse API
<p>The Browse API has the following resources:</p> <ul> <li><b> item_summary: </b> Lets shoppers search for specific items by keyword, GTIN, category, charity, product, or item aspects and refine the results by using filters, such as aspects, compatibility, and fields values.</li> <li><b> search_by_image: </b><a href=\"https://developer.ebay.com/api-docs/static/versioning.html#experimental\" target=\"_blank\"><img src=\"/cms/img/docs/experimental-icon.svg\" class=\"legend-icon experimental-icon\" alt=\"Experimental Release\" title=\"Experimental Release\" /> (Experimental)</a> Lets shoppers search for specific items by image. You can refine the results by using URI parameters and filters.</li> <li><b> item: </b> <ul><li>Lets you retrieve the details of a specific item or all the items in an item group, which is an item with variations such as color and size and check if a product is compatible with the specified item, such as if a specific car is compatible with a specific part.</li> <li>Provides a bridge between the eBay legacy APIs, such as <b> Finding</b>, and the RESTful APIs, which use different formats for the item IDs.</li> </ul> </li> <li> <b> shopping_cart: </b> <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#experimental\" target=\"_blank\"><img src=\"/cms/img/docs/experimental-icon.svg\" class=\"legend-icon experimental-icon\" alt=\"Experimental Release\" title=\"Experimental Release\" /> (Experimental)</a> <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> Provides the ability for eBay members to see the contents of their eBay cart, and add, remove, and change the quantity of items in their eBay cart. <b> Note: </b> This resource is not available in the eBay API Explorer.</li></ul> <p>The <b> item_summary</b>, <b> search_by_image</b>, and <b> item</b> resource calls require an <a href=\"/api-docs/static/oauth-client-credentials-grant.html\">Application access token</a>. The <b> shopping_cart</b> resource calls require a <a href=\"/api-docs/static/oauth-authorization-code-grant.html\">User access token</a>.</p> # noqa: E501
OpenAPI spec version: v1.11.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Refinement(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'aspect_distributions': 'list[AspectDistribution]',
'buying_option_distributions': 'list[BuyingOptionDistribution]',
'category_distributions': 'list[CategoryDistribution]',
'condition_distributions': 'list[ConditionDistribution]',
'dominant_category_id': 'str'
}
attribute_map = {
'aspect_distributions': 'aspectDistributions',
'buying_option_distributions': 'buyingOptionDistributions',
'category_distributions': 'categoryDistributions',
'condition_distributions': 'conditionDistributions',
'dominant_category_id': 'dominantCategoryId'
}
def __init__(self, aspect_distributions=None, buying_option_distributions=None, category_distributions=None, condition_distributions=None, dominant_category_id=None): # noqa: E501
"""Refinement - a model defined in Swagger""" # noqa: E501
self._aspect_distributions = None
self._buying_option_distributions = None
self._category_distributions = None
self._condition_distributions = None
self._dominant_category_id = None
self.discriminator = None
if aspect_distributions is not None:
self.aspect_distributions = aspect_distributions
if buying_option_distributions is not None:
self.buying_option_distributions = buying_option_distributions
if category_distributions is not None:
self.category_distributions = category_distributions
if condition_distributions is not None:
self.condition_distributions = condition_distributions
if dominant_category_id is not None:
self.dominant_category_id = dominant_category_id
@property
def aspect_distributions(self):
"""Gets the aspect_distributions of this Refinement. # noqa: E501
An array of containers for the all the aspect refinements. # noqa: E501
:return: The aspect_distributions of this Refinement. # noqa: E501
:rtype: list[AspectDistribution]
"""
return self._aspect_distributions
@aspect_distributions.setter
def aspect_distributions(self, aspect_distributions):
"""Sets the aspect_distributions of this Refinement.
An array of containers for the all the aspect refinements. # noqa: E501
:param aspect_distributions: The aspect_distributions of this Refinement. # noqa: E501
:type: list[AspectDistribution]
"""
self._aspect_distributions = aspect_distributions
@property
def buying_option_distributions(self):
"""Gets the buying_option_distributions of this Refinement. # noqa: E501
An array of containers for the all the buying option refinements. # noqa: E501
:return: The buying_option_distributions of this Refinement. # noqa: E501
:rtype: list[BuyingOptionDistribution]
"""
return self._buying_option_distributions
@buying_option_distributions.setter
def buying_option_distributions(self, buying_option_distributions):
"""Sets the buying_option_distributions of this Refinement.
An array of containers for the all the buying option refinements. # noqa: E501
:param buying_option_distributions: The buying_option_distributions of this Refinement. # noqa: E501
:type: list[BuyingOptionDistribution]
"""
self._buying_option_distributions = buying_option_distributions
@property
def category_distributions(self):
"""Gets the category_distributions of this Refinement. # noqa: E501
An array of containers for the all the category refinements. # noqa: E501
:return: The category_distributions of this Refinement. # noqa: E501
:rtype: list[CategoryDistribution]
"""
return self._category_distributions
@category_distributions.setter
def category_distributions(self, category_distributions):
"""Sets the category_distributions of this Refinement.
An array of containers for the all the category refinements. # noqa: E501
:param category_distributions: The category_distributions of this Refinement. # noqa: E501
:type: list[CategoryDistribution]
"""
self._category_distributions = category_distributions
@property
def condition_distributions(self):
"""Gets the condition_distributions of this Refinement. # noqa: E501
An array of containers for the all the condition refinements. # noqa: E501
:return: The condition_distributions of this Refinement. # noqa: E501
:rtype: list[ConditionDistribution]
"""
return self._condition_distributions
@condition_distributions.setter
def condition_distributions(self, condition_distributions):
"""Sets the condition_distributions of this Refinement.
An array of containers for the all the condition refinements. # noqa: E501
:param condition_distributions: The condition_distributions of this Refinement. # noqa: E501
:type: list[ConditionDistribution]
"""
self._condition_distributions = condition_distributions
@property
def dominant_category_id(self):
"""Gets the dominant_category_id of this Refinement. # noqa: E501
The identifier of the category that most of the items are part of. # noqa: E501
:return: The dominant_category_id of this Refinement. # noqa: E501
:rtype: str
"""
return self._dominant_category_id
@dominant_category_id.setter
def dominant_category_id(self, dominant_category_id):
"""Sets the dominant_category_id of this Refinement.
The identifier of the category that most of the items are part of. # noqa: E501
:param dominant_category_id: The dominant_category_id of this Refinement. # noqa: E501
:type: str
"""
self._dominant_category_id = dominant_category_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Refinement, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Refinement):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 46.488889
| 2,332
| 0.67782
| 7,924
| 0.757553
| 0
| 0
| 4,221
| 0.403537
| 0
| 0
| 6,558
| 0.62696
|
b76571e31217da708c1ce0ba259ecc1d18b070d9
| 1,878
|
py
|
Python
|
tests/dataio_tests/test_import_data_filter_empty_directories.py
|
cdeitrick/Lolipop
|
5b87b00a2c7ccbeeb3876bddb32e54aedf6bdf6d
|
[
"MIT"
] | 6
|
2020-04-18T15:43:19.000Z
|
2022-02-19T18:43:23.000Z
|
tests/dataio_tests/test_import_data_filter_empty_directories.py
|
cdeitrick/Lolipop
|
5b87b00a2c7ccbeeb3876bddb32e54aedf6bdf6d
|
[
"MIT"
] | 5
|
2020-05-04T16:09:03.000Z
|
2020-10-13T03:52:56.000Z
|
tests/dataio_tests/test_import_data_filter_empty_directories.py
|
cdeitrick/muller_diagrams
|
5b87b00a2c7ccbeeb3876bddb32e54aedf6bdf6d
|
[
"MIT"
] | 3
|
2020-03-23T17:12:56.000Z
|
2020-07-24T22:22:12.000Z
|
from pathlib import Path
import pandas
from muller.dataio import import_tables
from loguru import logger
DATA_FOLDER = Path(__file__).parent.parent / "data"
def test_filter_empty_trajectories():
input_column_0 = ['genotype-1', 'genotype-2', 'genotype-3', 'genotype-4', 'genotype-5', 'genotype-6']
input_column_1 = [0.000, 0.000, 0.000, 0.111, 0.000, 0.000]
input_column_2 = [0.000, 0.380, 0.000, 0.222, 0.000, 0.000]
input_column_3 = [0.261, 0.432, 0.000, 0.333, 0.000, 0.000]
input_column_4 = [1.000, 0.432, 0.000, 0.444, 1.470, 0.272]
expected_column_0 = ['genotype-1', 'genotype-2', 'genotype-4', 'genotype-5', 'genotype-6']
expected_column_1 = [0.000, 0.000, 0.111, 0.000, 0.000]
expected_column_2 = [0.000, 0.380, 0.222, 0.000, 0.000]
expected_column_3 = [0.261, 0.432, 0.333, 0.000, 0.000]
expected_column_4 = [1.000, 0.432, 0.444, 1.470, 0.272]
# Convert to a dataframe
input_dataframe_definition = {
'Genotype': input_column_0,
0:input_column_1,
1:input_column_2,
2:input_column_3,
3:input_column_4,
}
expected_dataframe_definition = {
'Genotype': expected_column_0,
0: expected_column_1,
1: expected_column_2,
2: expected_column_3,
3: expected_column_4
}
logger.debug(input_dataframe_definition)
input_dataframe = pandas.DataFrame(input_dataframe_definition)
input_dataframe = input_dataframe.set_index('Genotype')
logger.debug(input_dataframe)
expected_dataframe = pandas.DataFrame(expected_dataframe_definition).set_index('Genotype')
logger.debug(input_dataframe.to_string())
result = import_tables.filter_empty_trajectories(input_dataframe)
logger.debug(result.to_string())
assert list(result.columns) == list(expected_dataframe.columns)
assert len(result) == len(expected_dataframe)
assert list(result.index) == list(expected_dataframe.index)
#pandas.testing.assert_frame_equal(result, expected_dataframe)
| 33.535714
| 102
| 0.746006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.140575
|
b765d8a9e197945a8d8e649b5d2aab25a4ba41c1
| 1,548
|
py
|
Python
|
posthog/apps.py
|
adamb70/posthog
|
54ae8f0e70092f86b4aefbd93b56680dbd28b1c5
|
[
"MIT"
] | null | null | null |
posthog/apps.py
|
adamb70/posthog
|
54ae8f0e70092f86b4aefbd93b56680dbd28b1c5
|
[
"MIT"
] | null | null | null |
posthog/apps.py
|
adamb70/posthog
|
54ae8f0e70092f86b4aefbd93b56680dbd28b1c5
|
[
"MIT"
] | null | null | null |
import os
import sys
import posthoganalytics
from django.apps import AppConfig
from django.conf import settings
from posthog.utils import get_git_branch, get_git_commit, get_machine_id
from posthog.version import VERSION
class PostHogConfig(AppConfig):
name = "posthog"
verbose_name = "PostHog"
def ready(self):
posthoganalytics.api_key = "sTMFPsFhdP1Ssg"
posthoganalytics.personal_api_key = os.environ.get("POSTHOG_PERSONAL_API_KEY")
# Skip plugin sync in manage.py scripts and in tests
# (the database tables might not yet be created)
if (
not settings.TEST
and not "makemigrations" in sys.argv
and not "migrate" in sys.argv
and not "manage.py" in " ".join(sys.argv)
and not "/mypy" in sys.argv[0]
):
from posthog.plugins import sync_plugin_config
# syncs posthog.json['plugins'] and the Plugin/PluginConfig models
sync_plugin_config()
if settings.DEBUG:
# log development server launch to posthog
if os.getenv("RUN_MAIN") == "true":
posthoganalytics.capture(
get_machine_id(),
"development server launched",
{"posthog_version": VERSION, "git_rev": get_git_commit(), "git_branch": get_git_branch(),},
)
posthoganalytics.disabled = True
elif settings.TEST or os.environ.get("OPT_OUT_CAPTURE"):
posthoganalytics.disabled = True
| 34.4
| 111
| 0.628553
| 1,322
| 0.854005
| 0
| 0
| 0
| 0
| 0
| 0
| 414
| 0.267442
|
b765edd3d7064dd1a82224294d673c2cee6bca1a
| 836
|
py
|
Python
|
src/task_timer.py
|
dlb-rl/pulse-rl
|
ebe78d2286b2199d18b86941fbc9990e0d5c354a
|
[
"MIT"
] | 4
|
2021-12-06T09:19:14.000Z
|
2022-01-31T13:58:06.000Z
|
src/task_timer.py
|
dlb-rl/pulse-rl
|
ebe78d2286b2199d18b86941fbc9990e0d5c354a
|
[
"MIT"
] | null | null | null |
src/task_timer.py
|
dlb-rl/pulse-rl
|
ebe78d2286b2199d18b86941fbc9990e0d5c354a
|
[
"MIT"
] | null | null | null |
import os
import json
import time
from datetime import timedelta
class TaskTimer:
def __init__(self):
self.time_performance = {}
self.start_times = {}
def start(self, task):
self.start_times[task] = time.time()
print('--- [{}] Start "{}"'.format(time.ctime(self.start_times[task]), task))
def end(self, task):
saving_end = time.time()
self.time_performance[task] = str(
timedelta(seconds=(saving_end - self.start_times[task]))
)
print(
'--- [{}] End "{}" in {} seconds'.format(
time.ctime(saving_end), task, self.time_performance[task]
)
)
def save(self, folder):
with open(os.path.join(folder, "time_performance.json"), "w") as fp:
json.dump(self.time_performance, fp)
| 26.967742
| 85
| 0.574163
| 768
| 0.91866
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.095694
|
b7660ef0961ba8b4931477e766e7019f01819220
| 8,718
|
py
|
Python
|
tasks-deploy/terminal/check.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tasks-deploy/terminal/check.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tasks-deploy/terminal/check.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
flags = ['QCTF{2a5576bc51a5c3feb82c96fe80d3a520}', 'QCTF{eb2ddbf0e318812ede843e8ecec6144f}', 'QCTF{5cdf65c6069a6b815352c3f1b4d09a56}', 'QCTF{69d7b7deb23746b8bd18b22f3eb92b50}', 'QCTF{44e37938c0bc05393b5b33a70c5a70db}', 'QCTF{3b37a953391e38ce0b8e168e9eaa6ec5}', 'QCTF{ee2848cb73236007d36cb5ae75c4d2bf}', 'QCTF{8dceee7e583b81a844a58ae72bf0671f}', 'QCTF{1e3dbf7253eaa1454ba21806dd1fc14c}', 'QCTF{da8f9cc7d7f1b1481f87f6390fa25e0c}', 'QCTF{ef7e72c36017f57e3f6b940b25bcd3b8}', 'QCTF{41342b5fdd954b2ff55773096c335f52}', 'QCTF{35fa8fbb26685d098fc776e0a36f49aa}', 'QCTF{168ab402de637d3d309d32d5290e1e31}', 'QCTF{f184a2e0373a244ed3a1b84a9bdb3a23}', 'QCTF{f586fdb5119afc82250ed69fe9bdc7dd}', 'QCTF{1f13c89d8c9eb5573c15ecd50d2f9e50}', 'QCTF{11dfd6714cf59973c28b29c27b3f84df}', 'QCTF{210050152f6adf890bc663580cbca181}', 'QCTF{8200b5ef385ca392651e186a436e0609}', 'QCTF{552eb20e1897c751adf2e320f47d9179}', 'QCTF{cb6efd12b769ea8f1c38c98f1977d5ce}', 'QCTF{004072519dd358254b1de59133ba8386}', 'QCTF{9c4b21c7c3a0fcc3f391695968b7c7e9}', 'QCTF{5a940d17a3b9afa668381947469dd9f7}', 'QCTF{7a3891890abaf4921b8fb2e4a940eb2d}', 'QCTF{3527635e90dc57fe164d5f8d88dbcb38}', 'QCTF{3c5c5abd09bdcfdf09e65eba1eb80b24}', 'QCTF{6f46a5846a40d68d6a035a2ad60a2622}', 'QCTF{794223155a79df0d44a13687b1ed2003}', 'QCTF{43d4cb8f632f779975d6060b5193153f}', 'QCTF{775427be397d5bf852a046e65884d7f9}', 'QCTF{f9fd2dff056c2d47d14bdb9a417d99d5}', 'QCTF{300e2128e66389b5d3c3e057c1ade37f}', 'QCTF{25ee8ba64e6f578a14065aef15fb0028}', 'QCTF{7b0a45a53a059e55dcccf3b7e975ff2e}', 'QCTF{ebff50288af7c8a2a7bd8b1e11a8a612}', 'QCTF{b9a51b35b496056c4fbce56d3c6368e5}', 'QCTF{e1c9872464c4a27cbab39abcd86203b9}', 'QCTF{249589aeb27f9215b209dd933b6dccd8}', 'QCTF{55edfaf50d7e83ade9a98655fca33b31}', 'QCTF{597ab70595c974e07e31a3fa1277bf11}', 'QCTF{7c5023239ef7f22c41aa606ce9aaba5b}', 'QCTF{72257f0fb4a980bcbd9a6f41f52a370b}', 'QCTF{fc09caef8f7c55cd878344568f47a382}', 'QCTF{1dc7595bdbd4c4c60ceda6e21709e6f2}', 'QCTF{d19d9769e8c4262f3a66c95f63fe7f34}', 'QCTF{dec3d0c80f0df0e5af9a97d665dbdc37}', 'QCTF{02bafaf609aaa269bd8abf417208bb54}', 'QCTF{67507d62a48f4e06275f3363b7876d3c}', 'QCTF{8a5a2752374ef22803d1e600177b1bb4}', 'QCTF{a353e9e2be2070249cd297715ea6b10a}', 'QCTF{80225cecfb252b774fa338313e9430b7}', 'QCTF{fa584956ef689e8b341e1d734c734719}', 'QCTF{63015e092eb29efb121f3253cd21c379}', 'QCTF{d62443dfd293d1e412afe00676e437d5}', 'QCTF{59e40e54a8c5a04526e545bbee435249}', 'QCTF{30702aa0f1f56f7c6b8f738602e498e0}', 'QCTF{efec73d04226055571e8f18c96ee3c96}', 'QCTF{9d1dc2ea33d36391531c5555bf39e433}', 'QCTF{d367af673f3c9cf0fdb2893b00689b7c}', 'QCTF{1e24ece8acce5ad1902f412338b01e53}', 'QCTF{88293463e852a19b1c5009bbb1d6ed5f}', 'QCTF{319a0909fd0df476109c9bab65b5202a}', 'QCTF{59f28c649809dd3e4287c72f617428a4}', 'QCTF{ce7df959f53a8dea0ce1e3aa0c2eb8f8}', 'QCTF{5386d6256232979494c48252ee1dd640}', 'QCTF{5ad8efd6bb44d0604cba3800600b1d0e}', 'QCTF{087ca88149d4323d1f00be02a882a79b}', 'QCTF{ce4abbce715a72951061f7adcf15ea1b}', 'QCTF{301e2306bc2849fb53fff0595afc3ed1}', 'QCTF{206d3753e0070c66561a16abfa9c845b}', 'QCTF{05e734b3544679475050326e11441d6e}', 'QCTF{f45249ac1b299ac8a9f430a791e20ceb}', 'QCTF{3433fc22bbc389ba386c1d21f532ed3b}', 'QCTF{15edbf8aaa728eb81ba6f555997f4815}', 'QCTF{580fe8c58bf434bc5f2eef7420649673}', 'QCTF{e41bfb2090daf3773fa230eee97c2d90}', 'QCTF{654380c696844545896584334e208184}', 'QCTF{3946d70c84bf4810cf7e9a31e0b12cff}', 'QCTF{97b67f280a1b2bf58f8bd54ad8ceff66}', 'QCTF{3278f990d9dd5f5dd67c2d4b9d230753}', 'QCTF{f966c22475a0964eaa834830bfe338bd}', 'QCTF{3f913b97b23c854fcdf6fddc41743c62}', 'QCTF{e5dc135b13c2c5b5371d9c24b7715a90}', 'QCTF{70cfc6a3e2f98e98f3488d5180fd3d1c}', 'QCTF{1dfea28b554380e772736d0b3e2e060d}', 'QCTF{4cf742197c83c13a76ffd38fcd9394c7}', 'QCTF{0389931bc1415676698558259dd33911}', 'QCTF{2c6a7d2d0cb06b7050164c593b6eff88}', 'QCTF{43bd14ac279307f5370ae6ec6e50404e}', 'QCTF{9dc8266201ea1ba7072d3a717099bab1}', 'QCTF{f35148de32676d50e1d7b2482f800a2c}', 'QCTF{70c01985b0e31b6915a4e42936dedf25}', 'QCTF{231cdcc8bbf386a10e93530cb4595991}', 'QCTF{543a9e949ff8f72b7388b39d44288b55}', 'QCTF{2bb32c8b3e2468bc6f3f1140e02e1e33}', 'QCTF{b5f4dd820207d40769649252979ce450}', 'QCTF{1432aeb99dc6560626d075c9666973e2}', 'QCTF{7520b882081e987b73e728409c38bb55}', 'QCTF{8748557fa294f0c49ef93016ce1ca4ab}', 'QCTF{51e68ed7cbeae5ef716ff98a95ea783a}', 'QCTF{6a2643309f4ffd5856aeb90e6c68267e}', 'QCTF{9eb6102744e6c0e9994d7886ed125f8e}', 'QCTF{0731061397ccd2de5b324ce6c13dc611}', 'QCTF{3b106d379fa05638d90ed9a5a1dfdcbe}', 'QCTF{e539d324465979cb4c54773139ba833f}', 'QCTF{ac0e2a0b56508dfa6b253823697e842d}', 'QCTF{6b020c80f9b62510caec2ec17a07b640}', 'QCTF{00a1c7c11aedc76428ded7dd254adc31}', 'QCTF{2be348ddb0c9d391c114b5878ac1a3ae}', 'QCTF{5875aa9d30aca2e4590534016342aba7}', 'QCTF{07c01bde7f54d222d59e1f45781e8acc}', 'QCTF{1e3f24b6cd7066ac673048656d7d2fc9}', 'QCTF{0a75b22e9f94f3dfc19ab4b934b2961d}', 'QCTF{6a6564ca624d02b152987d5a595cf7bc}', 'QCTF{52463184224982ccf2f9346438f92268}', 'QCTF{ab014a22c6ea08979323fd8a467f1f4a}', 'QCTF{4a3e06c87bb6c0580195db33b39776f7}', 'QCTF{395eb9a839f59f933e135b02d9543c3b}', 'QCTF{c4fd0005bf0d339ff0f7e1860baa5f36}', 'QCTF{aa15b27e2f0485ca04ae787530c1ed18}', 'QCTF{10e844c23ec931932ea6bd4e3e728004}', 'QCTF{b8b86c195cb7dcb041e9dfb3d6b94573}', 'QCTF{932ff0cf891fe181b78a8763eb82a6c4}', 'QCTF{d6f4a756a5206321737afb1e0a183844}', 'QCTF{8c93d71184c17684c92e409e39cb7c00}', 'QCTF{dc4e8fb58fa2ed616d123a73b1a81467}', 'QCTF{8ee320c8dea80ef18d04534ff798caba}', 'QCTF{6a11a92d100293a4835f54dde05cbe34}', 'QCTF{9a79fff31abe7e4718070fa517e64fc0}', 'QCTF{ef93a88f1454894b3940fd2aa93e048d}', 'QCTF{0bed196ea7b09a5db9f31ef3c7f6848d}', 'QCTF{cee84fc49b89ffa988f52d0797baa73d}', 'QCTF{9e80c3d1bba93faa3b45eda0b912a02c}', 'QCTF{cac025aa9e5f4394744f12db0f3e5f71}', 'QCTF{5ee478bf634afe72ae0d3b9e95faf021}', 'QCTF{a5903dad242c4413fa99381d340226d1}', 'QCTF{abcd7d765c23353e38dc09a616bd9bde}', 'QCTF{c13ed258f4775efa54d5b166dd3240ed}', 'QCTF{e08e0a83b3a96cbf379380139b65cc7e}', 'QCTF{7b3803fdb94ff47cf63b7526ac388bc1}', 'QCTF{352ca5de54ffbe8515635118c787b8af}', 'QCTF{50da9ea0ae8ad773bde9a1577ceee9ff}', 'QCTF{7564194acd79f1eb36e46b33143361eb}', 'QCTF{05e594d4643b5b256084d33e51789029}', 'QCTF{36ba558d89ae53394cbd7eaf497601e3}', 'QCTF{33c50dc466f2cfaf68d173a7af16377a}', 'QCTF{903fa3b7125aa29dd2edcb1f2c5da381}', 'QCTF{4bc7246bfdd22e310bbfa25046b20d2c}', 'QCTF{baf8274e1ad81a53c9ca3a1c4762d718}', 'QCTF{9066fe52443e5978e9b82fe2b0801b03}', 'QCTF{39acd1bd34c6a258f3f7167dc21c7a85}', 'QCTF{deca3ebff2319c1e1df6fe96ffb2ff7b}', 'QCTF{0324d544b5e46489943574b02061ebed}', 'QCTF{d08bde8d29c4de77c3439eed7eeb677a}', 'QCTF{16337906f4fa89ee4dd04e6ad1fec14f}', 'QCTF{44de9c99a06af160110e707b93c3dd79}', 'QCTF{87a3543161d6d0ac1497ef1e43de70ec}', 'QCTF{41460476cb83e4544c1b0018c4b20980}', 'QCTF{c0242fbcf990f3a62e6be743a65d07f8}', 'QCTF{a865da86e6d2a17ab3b848f47f997704}', 'QCTF{51ac3d05e0fd1cdc6c9a31c47d2a3a88}', 'QCTF{943ca3ce0cc488be0589ca207806738d}', 'QCTF{f24460a3ce62c641236671b66f193054}', 'QCTF{87980f7c165f4d392c6d377ef6706027}', 'QCTF{2b3ef8a0e956b8d1b9c0373e5686147e}', 'QCTF{97fc9edc359c3ac1f368ea9d6f707ee2}', 'QCTF{f39d636b8d20fdb174bf627c94af8308}', 'QCTF{3eb03e255dacfff2f34afd34f0791f06}', 'QCTF{b819ac459aa4570528e102929befec96}', 'QCTF{7cbf5260678a762738a45a9a08a0d049}', 'QCTF{c58971009a765417bf73916323eb68e1}', 'QCTF{77444071adcf5221c83ad35ebc6c4cdb}', 'QCTF{53a01ebed36320ab8a71084c94ba585f}', 'QCTF{c89f06767a21bdd15a463a92897b0cae}', 'QCTF{6a4cf33cc6fcea2fab3e9612a57994fd}', 'QCTF{0bb8a72a683c35fa835226f7c5ebe3a9}', 'QCTF{206b39742a866fed482a03ce5b6dbd1c}', 'QCTF{4f027a70b064eac7ff2e9ca3d2e03b1d}', 'QCTF{4c39da37e2580f8deb5eba08ed7d5a57}', 'QCTF{f99cfb8878971c21b243f818e505b61e}', 'QCTF{e7daf2f64180b615c1c4ebbecadd0518}', 'QCTF{3304f198c3435e0d49a67f1672a08209}', 'QCTF{e720953183625b8a69a2f8e3e8a8d93a}', 'QCTF{350655c7d8bbaa6495d9ebfa1c0d76a7}', 'QCTF{d74759b2ad78fa941ef19257976392d4}', 'QCTF{146730e07ca858bbf709c2f5bee0a41e}', 'QCTF{db1962db2dc72006c551f14912f3a81a}', 'QCTF{0273d4d040f7de7daa2cea88b86f8f62}', 'QCTF{56ff70053c7a1dba014711c73f3f0486}', 'QCTF{90548bc64e69a21ace8d622e0c21d485}', 'QCTF{052d0c62d05b258b6e4d0fbb64d2c837}', 'QCTF{9fbf42a0d9c53b2b78dc9abef52dd368}', 'QCTF{cf50c6918c393ab19066d688fee7f3d1}', 'QCTF{c7a5a9ca5a29f3bdcd13bc38b4960506}', 'QCTF{c92b7a3db35070eda9793a250a8b8373}', 'QCTF{04d0261d7237329eeabe658967e752ca}', 'QCTF{f52a7914bb09c7cfea77ac0cc91c68b1}', 'QCTF{cc2c170948663de9d7dd25e4e6ce8834}']
def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(True)
if attempt.answer in flags:
return CheckedPlagiarist(False, flags.index(attempt.answer))
return Checked(False)
| 670.615385
| 8,408
| 0.848704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,022
| 0.920165
|
b76732831186e479ef5311c8854a1d2a5b3efda3
| 18,186
|
py
|
Python
|
bioshareX/api/views.py
|
amschaal/bioshare
|
7ac5da6874f70605ded8757c46fd9629d2d3d18b
|
[
"MIT"
] | 7
|
2015-12-10T23:23:53.000Z
|
2018-08-22T11:00:28.000Z
|
bioshareX/api/views.py
|
amschaal/bioshare
|
7ac5da6874f70605ded8757c46fd9629d2d3d18b
|
[
"MIT"
] | 61
|
2015-12-11T01:26:26.000Z
|
2021-10-05T00:25:20.000Z
|
bioshareX/api/views.py
|
amschaal/bioshare
|
7ac5da6874f70605ded8757c46fd9629d2d3d18b
|
[
"MIT"
] | 2
|
2019-05-31T16:33:37.000Z
|
2021-05-03T20:51:25.000Z
|
# Create your views here.
from django.core.urlresolvers import reverse
from django.http.response import JsonResponse, HttpResponse
from settings.settings import AUTHORIZED_KEYS_FILE, SITE_URL
from bioshareX.models import Share, SSHKey, MetaData, Tag
from bioshareX.forms import MetaDataForm, json_form_validate
from guardian.shortcuts import get_perms, get_users_with_perms, remove_perm, assign_perm
from bioshareX.utils import JSONDecorator, json_response, json_error, share_access_decorator, safe_path_decorator, validate_email, fetchall,\
test_path, du
from django.contrib.auth.models import User, Group
from django.db.models import Q
import os
from rest_framework.decorators import api_view, detail_route, throttle_classes,\
action
from bioshareX.forms import ShareForm
from guardian.decorators import permission_required
from bioshareX.utils import ajax_login_required, email_users
from rest_framework import generics, viewsets, status
from bioshareX.models import ShareLog, Message
from bioshareX.api.serializers import ShareLogSerializer, ShareSerializer,\
GroupSerializer, UserSerializer, MessageSerializer
from rest_framework.permissions import DjangoModelPermissions, IsAuthenticated
from bioshareX.permissions import ManageGroupPermission
from rest_framework.response import Response
from guardian.models import UserObjectPermission
from django.contrib.contenttypes.models import ContentType
import datetime
from bioshareX.api.filters import UserShareFilter, ShareTagFilter,\
GroupShareFilter, ActiveMessageFilter
from rest_framework.throttling import UserRateThrottle
from django.utils import timezone
import csv
@ajax_login_required
def get_user(request):
query = request.GET.get('query')
try:
user = User.objects.get(Q(username=query)|Q(email=query))
return JsonResponse({'user':UserSerializer(user).data})
except Exception, e:
return JsonResponse({'status':'error','query':query,'errors':[e.message]},status=status.HTTP_404_NOT_FOUND)
@ajax_login_required
def get_address_book(request):
try:
emails = User.objects.filter(shareuserobjectpermission__content_object__in=Share.objects.filter(owner=request.user).values_list('id')).values_list('email').distinct().order_by('email')
groups = Group.objects.all().order_by('name')
return json_response({'emails':[email[0] for email in emails], 'groups':[g.name for g in groups]})
except Exception, e:
return json_error([e.message])
@ajax_login_required
def get_tags(request):
try:
tags = Tag.objects.filter(name__icontains=request.GET.get('tag'))
return json_response({'tags':[tag.name for tag in tags]})
except Exception, e:
return json_error([e.message])
@share_access_decorator(['admin'])
def share_with(request,share):
query = request.POST.get('query',request.GET.get('query'))
exists = []
new_users = []
groups = []
invalid = []
try:
emails = [email.strip().lower() for email in query.split(',')]
for email in emails:
if email == '':
continue
if email.startswith('group:'):
name = email.split('group:')[1].lower()
try:
group = Group.objects.get(name__iexact=name)
groups.append({'group':{'id':group.id,'name':group.name}})
except:
invalid.append(name)
elif validate_email(email):
try:
user = User.objects.get(email=email)
exists.append({'user':{'username':email}})
except:
new_users.append({'user':{'username':email}})
else:
invalid.append(email)
return json_response({'exists':exists, 'groups':groups,'new_users':new_users,'invalid':invalid})
except Exception, e:
return json_error([e.message])
@ajax_login_required
def share_autocomplete(request):
terms = [term.strip() for term in request.GET.get('query').split()]
query = reduce(lambda q,value: q&Q(name__icontains=value), terms , Q())
try:
share_objs = Share.user_queryset(request.user).filter(query).order_by('-created')[:10]
shares = [{'id':s.id,'url':reverse('list_directory',kwargs={'share':s.id}),'name':s.name,'notes':s.notes} for s in share_objs]
return json_response({'status':'success','shares':shares})
except Exception, e:
return json_error([e.message])
def get_group(request):
query = request.GET.get('query')
try:
group = Group.objects.get(name=query)
return json_response({'group':{'name':group.name}})
except Exception, e:
return json_error([e.message])
@api_view(['GET'])
@share_access_decorator(['admin'])
def get_permissions(request,share):
data = share.get_permissions(user_specific=True)
return json_response(data)
@share_access_decorator(['admin'])
@JSONDecorator
def update_share(request,share,json=None):
share.secure = json['secure']
share.save()
return json_response({'status':'okay'})
@api_view(['POST'])
@share_access_decorator(['admin'])
@JSONDecorator
def set_permissions(request,share,json=None):
from smtplib import SMTPException
emailed=[]
created=[]
failed=[]
# if not request.user.has_perm('admin',share):
# return json_response({'status':'error','error':'You do not have permission to write to this share.'})
if json.has_key('groups'):
for group, permissions in json['groups'].iteritems():
g = Group.objects.get(id__iexact=group)
current_perms = get_perms(g,share)
removed_perms = list(set(current_perms) - set(permissions))
added_perms = list(set(permissions) - set(current_perms))
for u in g.user_set.all():
if len(share.get_user_permissions(u,user_specific=True)) == 0 and len(added_perms) > 0 and json['email']:
email_users([u],'share/share_subject.txt','share/share_email_body.txt',{'user':u,'share':share,'sharer':request.user,'site_url':SITE_URL})
emailed.append(u.username)
for perm in removed_perms:
remove_perm(perm,g,share)
for perm in added_perms:
assign_perm(perm,g,share)
if json.has_key('users'):
for username, permissions in json['users'].iteritems():
username = username.lower()
try:
u = User.objects.get(username__iexact=username)
if len(share.get_user_permissions(u,user_specific=True)) == 0 and json['email']:
try:
email_users([u],'share/share_subject.txt','share/share_email_body.txt',{'user':u,'share':share,'sharer':request.user,'site_url':SITE_URL})
emailed.append(username)
except:
failed.append(username)
except:
if len(permissions) > 0:
password = User.objects.make_random_password()
u = User(username=username,email=username)
u.set_password(password)
u.save()
try:
email_users([u],'share/share_subject.txt','share/share_new_email_body.txt',{'user':u,'password':password,'share':share,'sharer':request.user,'site_url':SITE_URL})
created.append(username)
except:
failed.append(username)
u.delete()
current_perms = share.get_user_permissions(u,user_specific=True)
print 'CURRENT'
print current_perms
print 'PERMISSIONS'
print permissions
removed_perms = list(set(current_perms) - set(permissions))
added_perms = list(set(permissions) - set(current_perms))
print 'ADDING: '
print added_perms
print 'REMOVING: '
print removed_perms
for perm in removed_perms:
if u.username not in failed:
remove_perm(perm,u,share)
for perm in added_perms:
if u.username not in failed:
assign_perm(perm,u,share)
data = share.get_permissions(user_specific=True)
data['messages']=[]
if len(emailed) > 0:
data['messages'].append({'type':'info','content':'%s has/have been emailed'%', '.join(emailed)})
if len(created) > 0:
data['messages'].append({'type':'info','content':'Accounts has/have been created and emails have been sent to the following email addresses: %s'%', '.join(created)})
if len(failed) > 0:
data['messages'].append({'type':'info','content':'Delivery has failed to the following addresses: %s'%', '.join(failed)})
data['json']=json
return json_response(data)
@share_access_decorator(['view_share_files'])
def search_share(request,share,subdir=None):
from bioshareX.utils import find
query = request.GET.get('query',False)
response={}
if query:
response['results'] = find(share,"*%s*"%query,subdir)
else:
response = {'status':'error'}
return json_response(response)
@safe_path_decorator()
@share_access_decorator(['write_to_share'])
def edit_metadata(request, share, subpath):
try:
if share.get_path_type(subpath) is None:
raise Exception('The specified file or folder does not exist in this share.')
metadata = MetaData.objects.get_or_create(share=share, subpath=subpath)[0]
form = MetaDataForm(request.POST if request.method == 'POST' else request.GET)
data = json_form_validate(form)
if not form.is_valid():
return json_response(data)#return json_error(form.errors)
tags = []
for tag in form.cleaned_data['tags'].split(','):
tag = tag.strip()
if len(tag) >2 :
tags.append(Tag.objects.get_or_create(name=tag)[0])
metadata.tags = tags
metadata.notes = form.cleaned_data['notes']
metadata.save()
name = os.path.basename(os.path.normpath(subpath))
return json_response({'name':name,'notes':metadata.notes,'tags':[tag.name for tag in tags]})
except Exception, e:
return json_error([str(e)])
@ajax_login_required
def delete_ssh_key(request):
try:
id = request.POST.get('id')
key = SSHKey.objects.get(user=request.user,id=id)
# subprocess.call(['/bin/chmod','600',AUTHORIZED_KEYS_FILE])
keystring = key.get_key()
# remove_me = keystring.replace('/','\\/')#re.escape(key.extract_key())
# command = ['/bin/sed','-i','/%s/d'%remove_me,AUTHORIZED_KEYS_FILE]
# subprocess.check_call(command)
f = open(AUTHORIZED_KEYS_FILE,"r")
lines = f.readlines()
f.close()
f = open(AUTHORIZED_KEYS_FILE,"w")
for line in lines:
if line.find(keystring) ==-1:
f.write(line)
f.close()
# subprocess.call(['/bin/chmod','400',AUTHORIZED_KEYS_FILE])
key.delete()
SSHKey.objects.filter(key__contains=keystring).delete()
response = {'status':'success','deleted':id}
except Exception, e:
response = {'status':'error','message':'Unable to delete ssh key'+str(e)}
return json_response(response)
"""
Requires: "name", "notes", "filesystem" arguments.
Optional: "link_to_path", "read_only"
"""
@api_view(['POST'])
@permission_required('bioshareX.add_share', return_403=True)
def create_share(request):
form = ShareForm(request.user,request.data)
if form.is_valid():
share = form.save(commit=False)
share.owner=request.user
link_to_path = request.data.get('link_to_path',None)
if link_to_path:
if not request.user.has_perm('bioshareX.link_to_path'):
return JsonResponse({'error':"You do not have permission to link to a specific path."},status=400)
try:
share.save()
except Exception, e:
share.delete()
return JsonResponse({'error':e.message},status=400)
return JsonResponse({'url':"%s%s"%(SITE_URL,reverse('list_directory',kwargs={'share':share.id})),'id':share.id})
else:
return JsonResponse({'errors':form.errors},status=400)
@ajax_login_required
@share_access_decorator(['view_share_files'])
def email_participants(request,share,subdir=None):
try:
subject = request.POST.get('subject')
emails = request.POST.getlist('emails',[])
users = [u for u in get_users_with_perms(share, attach_perms=False, with_superusers=False, with_group_users=True)]
if len(emails) > 0:
users = [u for u in User.objects.filter(id__in=[u.id for u in users]).filter(email__in=emails)]
body = request.POST.get('body')
users.append(share.owner)
email_users(users, ctx_dict={}, subject=subject, body=body,from_email=request.user.email,content_subtype='plain')
response = {'status':'success','sent_to':[u.email for u in users]}
return json_response(response)
except Exception, e:
return JsonResponse({'errors':[str(e)]},status=400)
class ShareLogList(generics.ListAPIView):
serializer_class = ShareLogSerializer
permission_classes = (IsAuthenticated,)
filter_fields = {'action':['icontains'],'user__username':['icontains'],'text':['icontains'],'paths':['icontains'],'share':['exact']}
def get_queryset(self):
shares = Share.user_queryset(self.request.user,include_stats=False)
return ShareLog.objects.filter(share__in=shares)
class ShareViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = ShareSerializer
permission_classes = (IsAuthenticated,)
filter_backends = generics.ListAPIView.filter_backends + [UserShareFilter,ShareTagFilter,GroupShareFilter]
filter_fields = {'name':['icontains'],'notes':['icontains'],'owner__username':['icontains'],'path_exists':['exact']}
ordering_fields = ('name','owner__username','created','updated','stats__num_files','stats__bytes')
def get_queryset(self):
return Share.user_queryset(self.request.user,include_stats=False).select_related('owner','stats').prefetch_related('tags','user_permissions__user','group_permissions__group')
@detail_route(['GET'])
@throttle_classes([UserRateThrottle])
def directory_size(self, request, *args, **kwargs):
share = self.get_object()
subdir = request.query_params.get('subdir','')
test_path(subdir,share=share)
size = du(os.path.join(share.get_path(),subdir))
return Response({'share':share.id,'subdir':subdir,'size':size})
@action(detail=False, methods=['GET'], permission_classes=[IsAuthenticated])
def export(self, request):
queryset = self.get_queryset()
serializer = self.get_serializer(queryset, many=True)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="shares_{}.csv"'.format(str(timezone.now())[:19].replace(' ','_'))
writer = csv.writer(response, delimiter='\t')
writer.writerow(['id','name','url','users','groups','bytes','tags','owner','slug','created','updated','secure','read_only','notes','path_exists'])
for r in serializer.data:
row = [r['id'],r['name'],r['url'],', '.join(r['users']),', '.join(r['groups']),r['stats'].get('bytes') if r['stats'] else '',', '.join([t['name'] for t in r['tags']]),r['owner'].get('username'),r['slug'],r['created'],r['updated'],r['secure'],r['read_only'],r['notes'],r['path_exists'] ]
writer.writerow([c.encode('ascii', 'replace') if hasattr(c,'decode') else c for c in row])
return response
class GroupViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = GroupSerializer
permission_classes = (IsAuthenticated,DjangoModelPermissions,)
filter_fields = {'name':['icontains']}
model = Group
def get_queryset(self):
if self.request.user.is_superuser or self.request.user.is_staff:
return Group.objects.all()
else:
return self.request.user.groups.all()
@detail_route(['POST'],permission_classes=[ManageGroupPermission])
def update_users(self, request, *args, **kwargs):
users = request.data.get('users')
group = self.get_object()
# old_users = GroupSerializer(group).data['users']
# old_user_ids = [u['id'] for u in old_users]
# remove_users = set(old_user_ids) - set(user_ids)
# add_users = set(user_ids) - set(old_user_ids)
group.user_set = [u['id'] for u in users]
#clear permissions
ct = ContentType.objects.get_for_model(Group)
UserObjectPermission.objects.filter(content_type=ct,object_pk=group.id).delete()
#assign permissions
for user in users:
if 'manage_group' in user['permissions']:
user = User.objects.get(id=user['id'])
assign_perm('manage_group', user, group)
return self.retrieve(request,*args,**kwargs)#Response({'status':'success'})
# @detail_route(['POST'])
# def remove_user(self,request,*args,**kwargs):
# # user = request.query_params.get('user')
# # self.get_object().user_set.remove(user)
# return Response({'status':'success'})
class MessageViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = MessageSerializer
permission_classes = (IsAuthenticated,)
filter_backends = (ActiveMessageFilter,)
model = Message
def get_queryset(self):
return Message.objects.all().order_by('-created')
# return Message.objects.filter(active=True).filter(Q(expires__gte=datetime.datetime.today())|Q(expires=None)).exclude(viewed_by__id=self.request.user.id)
@detail_route(['POST','GET'],permission_classes=[IsAuthenticated])
def dismiss(self, request, pk=None):
message = self.get_object()
message.viewed_by.add(request.user)
message.save()
return Response({'status':'Message dismissed'})
| 47.11399
| 298
| 0.651875
| 4,662
| 0.256351
| 0
| 0
| 13,939
| 0.766469
| 0
| 0
| 3,739
| 0.205598
|
b769a2ddbb714e86c922fd616f0acb57b564206d
| 5,192
|
py
|
Python
|
db_query.py
|
UiOHive/FinseDashboard
|
14184fbc425e87a4b9a13b3a9b31159b181cf357
|
[
"MIT"
] | null | null | null |
db_query.py
|
UiOHive/FinseDashboard
|
14184fbc425e87a4b9a13b3a9b31159b181cf357
|
[
"MIT"
] | null | null | null |
db_query.py
|
UiOHive/FinseDashboard
|
14184fbc425e87a4b9a13b3a9b31159b181cf357
|
[
"MIT"
] | null | null | null |
import datetime
import os, sys
import pprint
import requests
from pandas.io.json import json_normalize
import pandas as pd
URL = 'https://wsn.latice.eu/api/query/v2/'
#URL = 'http://localhost:8000/wsn/api/query/v2/'
#TOKEN = os.getenv('WSN_TOKEN')
TOKEN = os.getenv('WSN_TOKEN')
path = os.getcwd()
def query(
limit=100, # Pagination
fields=None, # Fields to return (all by default)
tags=None, # Tags to return (all by default)
interval=None, # If given will return the average in the interval
debug=False, # Not sent to the API
# Filters
time__gte=None, time__lte=None, # Time is special
**kw):
# Parameters
if time__gte:
time__gte = time__gte.timestamp()
if time__lte:
time__lte = time__lte.timestamp()
params = {
'limit': limit, # Pagination
'time__gte': time__gte, 'time__lte': time__lte, # Time filter
'fields': fields,
'tags': tags,
'interval': interval,
}
# Filter inside json
for key, value in kw.items():
if value is None:
params[key] = None
continue
if type(value) is datetime.datetime:
value = int(value.timestamp())
if isinstance(value, int):
key += ':int'
params[key] = value
# Query
headers = {'Authorization': 'Token %s' % TOKEN}
response = requests.get(URL, headers=headers, params=params)
response.raise_for_status()
json = response.json()
# Debug
if debug:
pprint.pprint(params)
pprint.pprint(json)
print()
return json
def get_token():
try:
token = os.environ['WSN_TOKEN']
return token
except KeyError:
print("Please set the environment variable WSN_TOKEN in .bashrc as follow: \n\t export WSN_TOKEN=xxxxxxxxxxxxxxxxx ")
sys.exit(1)
def query_df(
limit=100, # Pagination
fields=None, # Fields to return (all by default)
tags=None, # Tags to return (all by default)
interval=None, # If given will return the average in the interval
debug=False, # Not sent to the API
# Filters
time__gte=None, time__lte=None, # Time is special
**kw):
# Parameters
if time__gte:
time__gte = time__gte.timestamp()
if time__lte:
time__lte = time__lte.timestamp()
params = {
'limit': limit, # Pagination
'time__gte': time__gte, 'time__lte': time__lte, # Time filter
'fields': fields,
'tags': tags,
'interval': interval,
}
# Filter inside json
for key, value in kw.items():
if value is None:
params[key] = None
continue
if type(value) is datetime.datetime:
value = int(value.timestamp())
if isinstance(value, int):
key += ':int'
params[key] = value
# Query
headers = {'Authorization': 'Token %s' % TOKEN}
response = requests.get(URL, headers=headers, params=params)
response.raise_for_status()
json = response.json()
# Debug
if debug:
pprint.pprint(params)
pprint.pprint(json)
print()
df = json_normalize(json['results']) # convert json object to pandas dataframe
try:
df.time = pd.to_datetime(df.time)
except:
print('WARNING: no timestamp')
return df
def biomet_metadata():
meta = pd.read_csv(path + '/FINSE-stationary_variables_biomet.csv', sep=';')
return meta
if __name__ == '__main__':
# We need an authentication token
TOKEN = os.getenv('WSN_TOKEN')
# Number of elements to return in every query
limit = 100
# Example 1: Get all the fields and tags of a given mote from a given time.
# This is good to explore the data, but bad on performance.
response = query(limit=limit,
serial=0x1F566F057C105487,
time__gte=datetime.datetime(2017, 11, 15),
debug=True,
)
# Example 2: Get the RSSI of an Xbee module identified by its address
print('==============================================')
response = query(limit=limit,
source_addr_long=0x0013A2004105D4B6,
fields=['rssi'],
debug=True,
)
# Example 3: Get the battery and internal temperature from all motes,
# include the serial tag to tell them apart.
# Frames that don't have at least one of the fields we ask for will not be
# included.
print('==============================================')
response = query(limit=limit,
fields=['bat', 'in_temp'],
tags=['serial'],
debug=True,
)
# Example 4: Get the time the frame was received by the Pi
print('==============================================')
response = query(limit=limit,
serial=408520806,
fields=['received'],
debug=True,
)
# Example 5: Get the battery once every hour
response = query(limit=10,
serial=0x1F566F057C105487,
fields=['bat'],
interval=3600,
debug=True,
)
| 28.217391
| 125
| 0.573575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,817
| 0.349961
|
b76bc1e49faa136f8b812dcedd6dc878d0d8aa05
| 4,716
|
py
|
Python
|
src/olympia/reviews/tests/test_models.py
|
leplatrem/addons-server
|
8b5ebda6f33194aa9fce12c0453574e7f850e6ad
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/reviews/tests/test_models.py
|
leplatrem/addons-server
|
8b5ebda6f33194aa9fce12c0453574e7f850e6ad
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/reviews/tests/test_models.py
|
leplatrem/addons-server
|
8b5ebda6f33194aa9fce12c0453574e7f850e6ad
|
[
"BSD-3-Clause"
] | null | null | null |
from django.utils import translation
from olympia import amo
from olympia.amo.tests import TestCase, ESTestCase
from olympia.addons.models import Addon
from olympia.reviews import tasks
from olympia.reviews.models import (
check_spam, GroupedRating, Review, ReviewFlag, Spam)
from olympia.users.models import UserProfile
class TestReviewModel(TestCase):
fixtures = ['reviews/test_models']
def test_translations(self):
translation.activate('en-US')
# There's en-US and de translations. We should get en-US.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title en', 'en-US')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
translation.activate('de')
# en and de exist, we get de.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title de', 'de')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
def test_soft_delete(self):
assert Review.objects.count() == 2
assert Review.unfiltered.count() == 2
Review.objects.get(id=1).delete()
assert Review.objects.count() == 1
assert Review.unfiltered.count() == 2
Review.objects.filter(id=2).delete()
assert Review.objects.count() == 0
assert Review.unfiltered.count() == 2
def test_filter_for_many_to_many(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
addon = review.addon
assert review in addon._reviews.all()
# Delete the review: it shouldn't be listed anymore.
review.update(deleted=True)
addon = Addon.objects.get(pk=addon.pk)
assert review not in addon._reviews.all()
def test_no_filter_for_relations(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
flag = ReviewFlag.objects.create(review=review,
flag='review_flag_reason_spam')
assert flag.review == review
# Delete the review: reviewflag.review should still work.
review.update(deleted=True)
flag = ReviewFlag.objects.get(pk=flag.pk)
assert flag.review == review
class TestGroupedRating(TestCase):
fixtures = ['reviews/dev-reply']
grouped_ratings = [(1, 0), (2, 0), (3, 0), (4, 1), (5, 0)]
def test_get_none(self):
assert GroupedRating.get(3, update_none=False) is None
def test_set(self):
assert GroupedRating.get(1865, update_none=False) is None
GroupedRating.set(1865)
assert GroupedRating.get(1865, update_none=False) == (
self.grouped_ratings)
def test_cron(self):
assert GroupedRating.get(1865, update_none=False) is None
tasks.addon_grouped_rating(1865)
assert GroupedRating.get(1865, update_none=False) == (
self.grouped_ratings)
def test_update_none(self):
assert GroupedRating.get(1865, update_none=False) is None
assert GroupedRating.get(1865, update_none=True) == (
self.grouped_ratings)
class TestSpamTest(TestCase):
fixtures = ['reviews/test_models']
def test_create_not_there(self):
Review.objects.all().delete()
assert Review.objects.count() == 0
check_spam(1)
def test_add(self):
assert Spam().add(Review.objects.all()[0], 'numbers')
class TestRefreshTest(ESTestCase):
fixtures = ['base/users']
def setUp(self):
super(TestRefreshTest, self).setUp()
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
self.user = UserProfile.objects.all()[0]
self.refresh()
assert self.get_bayesian_rating() == 0.0
def get_bayesian_rating(self):
q = Addon.search().filter(id=self.addon.id)
return list(q.values_dict('bayesian_rating'))[0]['bayesian_rating'][0]
def test_created(self):
assert self.get_bayesian_rating() == 0.0
Review.objects.create(addon=self.addon, user=self.user, rating=4)
self.refresh()
assert self.get_bayesian_rating() == 4.0
def test_edited(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.rating = 1
r.save()
self.refresh()
assert self.get_bayesian_rating() == 2.5
def test_deleted(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.delete()
self.refresh()
assert self.get_bayesian_rating() == 0.0
| 31.44
| 78
| 0.636556
| 4,378
| 0.928329
| 0
| 0
| 0
| 0
| 0
| 0
| 637
| 0.135072
|
b76c4833b7dafb63e3cda3a938dba6519b8c5061
| 1,544
|
py
|
Python
|
snpy/spline2/__init__.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 6
|
2019-01-14T19:40:45.000Z
|
2021-06-05T12:19:39.000Z
|
snpy/spline2/__init__.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 3
|
2017-04-25T20:06:22.000Z
|
2021-06-09T20:46:41.000Z
|
snpy/spline2/__init__.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 8
|
2017-04-25T19:57:57.000Z
|
2021-11-12T11:54:19.000Z
|
''' Spline2.py: wrapper for B. Thijsse et al.'s hyper-spline routines.
Yet another spline interpolation routine. The problem: given a set of
experimental data with noise, find the spline with the optimal number of
knots.
Solution : They use the usual kind of routines to determine least-squares
splines from a given set of knot points. The problem REALLY
boils down to: how many knots do you use? There are two
extremes: put a knot point on each data point to get an
interpolating spline (which sucks for experimental data with
noise). The other extreme is to have the minimal set of knots
to define a polynomial of order k (e.g., a cubic). This also
sucks. Somewhere between the two extremes is a number of
knots that optimally recovers the information in the data and
smooths out the noise.
spline2 starts with a large number of knots (interpolating
spline) and iteratively removes knots until a figure of merit
reaches some prescribed value. In this case, this figure of
merit is the Durbin-Watson statistic, which measures the auto-
correlation between the residuals of the spline fit.
For more details, see:
* Barend J. Thijsse et al., "A Practical Algorithm for Least-Squares
spline Approximation of Data Containing Noise", Computers in Physics,
vol 12 no. 4 July 1998
* http://structureandchange.3me.tudelft.nl/
'''
from .spline2 import *
| 49.806452
| 74
| 0.687824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,520
| 0.984456
|
b76e06eb55845d82a11e8d62864a8001b125af43
| 703
|
py
|
Python
|
halo_app/infra/cache.py
|
halo-framework/halo-app
|
98e057b2f433d97d903589ac75a6c2544174bac8
|
[
"MIT"
] | null | null | null |
halo_app/infra/cache.py
|
halo-framework/halo-app
|
98e057b2f433d97d903589ac75a6c2544174bac8
|
[
"MIT"
] | null | null | null |
halo_app/infra/cache.py
|
halo-framework/halo-app
|
98e057b2f433d97d903589ac75a6c2544174bac8
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import elasticache_auto_discovery
from pymemcache.client.hash import HashClient
# elasticache settings
elasticache_config_endpoint = "your-elasticache-cluster-endpoint:port"
nodes = elasticache_auto_discovery.discover(elasticache_config_endpoint)
nodes = map(lambda x: (x[1], int(x[2])), nodes)
memcache_client = HashClient(nodes)
def put(requestId, event):
"""
This function puts into memcache and get from it.
Memcache is hosted using elasticache
"""
# Put the UUID to the cache.
memcache_client.set(requestId, event)
def get(requestId):
# Get item (UUID) from the cache.
item = memcache_client.get(requestId)
return item
| 25.107143
| 72
| 0.752489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 229
| 0.325747
|
b76e3c3a78b500e4bd43645ac8426138d20f7491
| 659
|
py
|
Python
|
uniclass_to_nf_ea_com_source/b_code/migrators/uniclass_raw_to_domain/evolve/evolve_stage_4/domain_table_data_processor/parent_code_column_adder.py
|
boro-alpha/uniclass_to_nf_ea_com
|
753e97467ce53c25bc86341b915489c2eeeb3f49
|
[
"MIT"
] | 2
|
2021-08-09T14:40:04.000Z
|
2022-03-13T22:22:19.000Z
|
uniclass_to_nf_ea_com_source/b_code/migrators/uniclass_raw_to_domain/evolve/evolve_stage_4/domain_table_data_processor/parent_code_column_adder.py
|
boro-alpha/uniclass_to_nf_ea_com
|
753e97467ce53c25bc86341b915489c2eeeb3f49
|
[
"MIT"
] | null | null | null |
uniclass_to_nf_ea_com_source/b_code/migrators/uniclass_raw_to_domain/evolve/evolve_stage_4/domain_table_data_processor/parent_code_column_adder.py
|
boro-alpha/uniclass_to_nf_ea_com
|
753e97467ce53c25bc86341b915489c2eeeb3f49
|
[
"MIT"
] | null | null | null |
import numpy as np
from uniclass_to_nf_ea_com_source.b_code.configurations.common_constants.uniclass_bclearer_constants import PARENT_CODE_COLUMN_NAME, \
UNICLASS2015_OBJECT_TABLE_NAME
def add_parent_code_column_to_uniclass_objects_table(
dictionary_of_dataframes: dict)\
-> dict:
uniclass_2015_object_table = \
dictionary_of_dataframes[
UNICLASS2015_OBJECT_TABLE_NAME]
uniclass_2015_object_table[
PARENT_CODE_COLUMN_NAME] = \
np.NaN
dictionary_of_dataframes[
UNICLASS2015_OBJECT_TABLE_NAME] = \
uniclass_2015_object_table
return \
dictionary_of_dataframes
| 27.458333
| 134
| 0.752656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b771162a3330c2472592fd5f42c757bebf6eb7a4
| 12,046
|
py
|
Python
|
ext/ANTsPyNet/antspynet/utilities/mixture_density_utilities.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | 2
|
2021-11-16T10:00:33.000Z
|
2021-12-13T02:57:40.000Z
|
ext/ANTsPyNet/antspynet/utilities/mixture_density_utilities.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | null | null | null |
ext/ANTsPyNet/antspynet/utilities/mixture_density_utilities.py
|
tsmonteiro/fmri_proc
|
ee740cfa3c3a7ef8e1ee1ebd3b286a66712e0ec1
|
[
"MIT"
] | 1
|
2021-12-13T02:57:27.000Z
|
2021-12-13T02:57:27.000Z
|
import keras.backend as K
from keras.engine import Layer, InputSpec
from keras.layers import Concatenate
from keras import initializers
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
class MixtureDensityLayer(Layer):
"""
Layer for modeling arbitrary functions using neural networks.
Arguments
---------
output_dimension : integer
Dimensionality of the output.
number_of_mixtures : integer
Number of gaussians used.
Returns
-------
Layer
A keras layer
"""
def __init__(self, output_dimension, number_of_mixtures, **kwargs):
if K.backend() != 'tensorflow':
raise ValueError("Tensorflow required as the backend.")
self.output_dimension = output_dimension
self.number_of_mixtures = number_of_mixtures
super(MixtureDensityLayer, self).__init__(**kwargs)
def build(self, input_shape):
input_dimension = input_shape[-1]
units1 = self.output_dimension * self.number_of_mixtures
self.mu_kernel = self.add_weight(name="mu_kernel",
shape = shape(input_dimension, units1),
initializer=initializers.random_normal(),
trainable=True)
self.mu_bias = self.add_weight(name="mu_bias",
shape = shape(units1),
initializer=initializers.zeros(),
trainable=True)
self.sigma_kernel = self.add_weight(name="sigma_kernel",
shape = shape(input_dimension, units1),
initializer=initializers.random_normal(),
trainable=True)
self.sigma_bias = self.add_weight(name="sigma_bias",
shape = shape(units1),
initializer=initializers.zeros(),
trainable=True)
units2 = self.number_of_mixtures
self.pi_kernel = self.add_weight(name="pi_kernel",
shape = shape(input_dimension, units2),
initializer=initializers.random_normal(),
trainable=True)
self.pi_bias = self.add_weight(name="pi_bias",
shape = shape(units2),
initializer=initializers.zeros(),
trainable=True)
def call(self, inputs, mask=None):
# dense layer for mu (mean) of the gaussians
mu_output = K.dot(inputs, self.mu_kernel)
mu_output = K.bias_add(mu_output, self.mu_bias, data_format='channels_last')
# dense layer for sigma (variance) of the gaussians
sigma_output = K.dot(inputs, self.sigma_kernel)
sigma_output = K.bias_add(sigma_output, self.sigma_bias, data_format='channels_last')
# Avoid NaN's by pushing sigma through the following custom activation
sigma_output = K.elu(sigma_output) + 1 + K.epsilon()
# dense layer for pi (amplitude) of the gaussians
pi_output = K.dot( inputs, self.pi_kernel)
pi_output = K.bias_add(pi_output, self.pi_bias, data_format='channels_last')
output = Concatenate()([mu_output, sigma_output, pi_output], name="mdn_outputs")
return(output)
def compute_output_shape(input_shape):
units = self.number_of_mixtures * (2 * self.output_dimension + 1)
return((input_shape[0], units))
def get_config(self):
config = {"output_dimension": self.output_dimension,
"axis": self.number_of_mixtures}
base_config = super(MixtureDensityLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_mixture_density_loss_function(output_dimension, number_of_mixtures):
"""
Returns a loss function for the mixture density.
Arguments
---------
output_dimension : integer
Dimensionality of the output.
number_of_mixtures : integer
Number of gaussians used.
Returns
-------
Function
A function providing the mean square error accuracy
"""
def loss_function(y_true, y_pred):
dimension = number_of_mixtures * output_dimension
y_pred = tf.reshape(y_pred, [-1, 2 * dimension + number_of_mixtures],
name='reshape_ypred_loss')
y_true = tf.reshape(y_true, [-1, 2 * dimension + number_of_mixtures],
name='reshape_ytrue_loss')
output_mu, output_sigma, output_pi = tf.split(y_pred, axis=-1, name='mdn_coef_split',
num_or_size_splits=[dimension, dimension, number_of_mixtures])
# Construct the mixture models
tfd = tfp.distributions
categorical_distribution = tfd.Categorical(logits=output_pi)
component_splits = [output_dimension] * number_of_mixtures
mu = tf.split(output_mu, num_or_size_splits=component_splits, axis=1)
sigma = tf.split(output_sigma, num_or_size_splits=component_splits, axis=1)
components = []
for i in range(len(mu)):
components.append(tfd.MultivariateNormalDiag(loc = mu[i], scale_diag=sigma[i]))
mixture = tfd.Mixture(cat=categorical_distribution, components=components)
loss = mixture.log_prob(y_true)
loss = tf.negative(loss)
loss = tf.reduce_mean(loss)
return(loss)
with tf.name_scope("MixtureDensityNetwork"):
return(loss_function)
def get_mixture_density_sampling_function(output_dimension, number_of_mixtures):
"""
Returns a sampling function for the mixture density.
Arguments
---------
output_dimension : integer
Dimensionality of the output.
number_of_mixtures : integer
Number of gaussians used.
Returns
-------
Function
A function providing the mean square error accuracy
"""
def sampling_function(y_pred):
dimension = number_of_mixtures * output_dimension
y_pred = tf.reshape(y_pred, [-1, 2 * dimension + number_of_mixtures],
name='reshape_ypred')
output_mu, output_sigma, output_pi = tf.split(y_pred, axis=-1, name='mdn_coef_split',
num_or_size_splits=[dimension, dimension, number_of_mixtures])
# Construct the mixture models
tfd = tfp.distributions
categorical_distribution = tfd.Categorical(logits=output_pi)
component_splits = [output_dimension] * number_of_mixtures
mu = tf.split(output_mu, num_or_size_splits=component_splits, axis=1)
sigma = tf.split(output_sigma, num_or_size_splits=component_splits, axis=1)
components = []
for i in range(len(mu)):
components.append(tfd.MultivariateNormalDiag(loc = mu[i], scale_diag=sigma[i]))
mixture = tfd.Mixture(cat=categorical_distribution, components=components)
sample = mixture.sample()
return(sample)
with tf.name_scope("MixtureDensityNetwork"):
return(sampling_function)
def get_mixture_density_mse_function(output_dimension, number_of_mixtures):
"""
Returns a mse function for the mixture density.
Arguments
---------
output_dimension : integer
Dimensionality of the output.
number_of_mixtures : integer
Number of gaussians used.
Returns
-------
Function
A function providing the mean square error accuracy
"""
def mse_accuracy_function(y_true, y_pred):
dimension = number_of_mixtures * output_dimension
y_pred = tf.reshape(y_pred, [-1, 2 * dimension + number_of_mixtures],
name='reshape_ypred_mse')
y_true = tf.reshape(y_true, [-1, output_dimension],
name='reshape_ytrue_mse')
output_mu, output_sigma, output_pi = tf.split(y_pred, axis=-1, name='mdn_coef_split',
num_or_size_splits=[dimension, dimension, number_of_mixtures])
# Construct the mixture models
tfd = tfp.distributions
categorical_distribution = tfd.Categorical(logits=output_pi)
component_splits = [output_dimension] * number_of_mixtures
mu = tf.split(output_mu, num_or_size_splits=component_splits, axis=1)
sigma = tf.split(output_sigma, num_or_size_splits=component_splits, axis=1)
components = []
for i in range(len(mu)):
components.append(tfd.MultivariateNormalDiag(loc = mu[i], scale_diag=sigma[i]))
mixture = tfd.Mixture(cat=categorical_distribution, components=components)
sample = mixture.sample()
mse = tf.reduce_mean(tf.square(sample-y_true), axis=-1)
return(mse)
with tf.name_scope("MixtureDensityNetwork"):
return(mse_accuracy_function)
def split_mixture_parameters(parameters, output_dimension, number_of_mixtures):
"""
Splits the mixture parameters.
Arguments
---------
parameters : tuple
Parameter to split
output_dimension : integer
Dimensionality of the output.
number_of_mixtures : integer
Number of gaussians used.
Returns
-------
List of arrays
Separate mixture parameters
"""
dimension = number_of_mixtures * output_dimension
mu = parameters[:dimension]
sigma = parameters[dimension:(2 * dimension)]
pi_logits = parameters[-number_of_mixtures:]
return([mu, sigma, pi_logits])
def mixture_density_software_max(logits, temperature=1.0):
"""
Softmax function for mixture density with temperature adjustment.
Arguments
---------
logits : list or numpy array
input
temperature :
The temperature for to adjust the distribution (default 1.0)
Returns
-------
Scalar
Softmax loss value.
"""
e = np.array(logits) / temperature
e -= np.max(e)
e = np.exp(e)
distribution = e / np.sum(e)
return(distribution)
def sample_from_categorical_distribution(distribution):
"""
Softmax function for mixture density with temperature adjustment.
Arguments
---------
distribution :
input categorical distribution from which to sample.
Returns
-------
Scalar
A single sample.
"""
r = np.random.rand(1)
accumulate = 0
for i in range(len(distribution)):
accumulate += distribution[i]
if accumulate >= r:
return(i)
tf.logging.info('Error: sampling categorical model.')
return(-1)
def sample_from_output(parameters, output_dimension, number_of_mixtures,
temperature=1.0, sigma_temperature=1.0):
"""
Softmax function for mixture density with temperature adjustment.
Arguments
---------
output_dimension : integer
Dimensionality of the output.
number_of_mixtures : integer
Number of gaussians used.
temperature :
The temperature for to adjust the distribution (default 1.0)
sigma_temperature :
The temperature for to adjust the distribution (default 1.0)
Returns
-------
Scalar
A single sample.
"""
mu, sigma, pi = split_mixture_parameters(parameters, output_dimension, number_of_mixtures)
pi_softmax = mixture_density_software_max(pi, temperature=temperature)
m = sample_from_categorical_distribution(pi_softmax)
mu_vector = mu[m * output_dimension:(m + 1) * output_dimension]
sigma_vector = sigma[m * output_dimension:(m + 1) * output_dimension] * sigma_temperature
covariance_matrix = np.identity(output_dimension) * sigma_vector
sample = np.random.multivariate_normal(mu_vector, covariance_matrix, 1)
return(sample)
| 30.651399
| 94
| 0.628341
| 3,803
| 0.315706
| 0
| 0
| 0
| 0
| 0
| 0
| 3,438
| 0.285406
|
b7712de7485aecf34c54bcaa9134a8c65cfc7aa6
| 1,481
|
py
|
Python
|
test.py
|
pnawalramka/cowin
|
f8ce6286d38fc0616a385ac0a971c8947f71348c
|
[
"MIT"
] | null | null | null |
test.py
|
pnawalramka/cowin
|
f8ce6286d38fc0616a385ac0a971c8947f71348c
|
[
"MIT"
] | null | null | null |
test.py
|
pnawalramka/cowin
|
f8ce6286d38fc0616a385ac0a971c8947f71348c
|
[
"MIT"
] | null | null | null |
import json
from unittest import mock, TestCase
import check_availability
json_data = \
"""
{
"centers": [
{
"center_id": 1234,
"name": "District General Hostpital",
"name_l": "",
"address": "45 M G Road",
"address_l": "",
"state_name": "Maharashtra",
"state_name_l": "",
"district_name": "Satara",
"district_name_l": "",
"block_name": "Jaoli",
"block_name_l": "",
"pincode": "413608",
"lat": 28.7,
"long": 77.1,
"from": "09:00:00",
"to": "18:00:00",
"fee_type": "Free",
"vaccine_fees": [
{
"vaccine": "COVISHIELD",
"fee": "250"
}
],
"sessions": [
{
"session_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"date": "31-05-2021",
"available_capacity": 50,
"available_capacity_dose1": 25,
"available_capacity_dose2": 25,
"min_age_limit": 18,
"vaccine": "COVISHIELD",
"slots": [
"FORENOON",
"AFTERNOON"
]
}
]
}
]
}
"""
def mock_get(*args, **kwargs):
mock_res = mock.Mock()
mock_res.json.return_value = json.loads(json_data)
return mock_res
class TestCheck(TestCase):
@mock.patch('requests.get', side_effect=mock_get)
def test_check(self, mock_get):
got = check_availability.check('123', 18)
self.assertEqual(1, len(got))
self.assertEqual(1234, got[0]['center_id'])
| 21.779412
| 63
| 0.532073
| 233
| 0.157326
| 0
| 0
| 205
| 0.13842
| 0
| 0
| 1,058
| 0.714382
|
b771d6a65389f019399e4105e7ca9559208f9b9c
| 271
|
py
|
Python
|
pycon_project/apps/proposals/admin.py
|
mitsuhiko/pycon
|
73688a82080539a1c0d575cf3248f55fefb6b9ba
|
[
"BSD-3-Clause"
] | 1
|
2017-09-04T08:19:08.000Z
|
2017-09-04T08:19:08.000Z
|
pycon_project/apps/proposals/admin.py
|
mitsuhiko/pycon
|
73688a82080539a1c0d575cf3248f55fefb6b9ba
|
[
"BSD-3-Clause"
] | null | null | null |
pycon_project/apps/proposals/admin.py
|
mitsuhiko/pycon
|
73688a82080539a1c0d575cf3248f55fefb6b9ba
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from proposals.models import Proposal, ProposalSessionType
admin.site.register(ProposalSessionType)
admin.site.register(Proposal,
list_display = ["title", "session_type", "audience_level", "cancelled", "extreme_pycon", "invited"]
)
| 30.111111
| 103
| 0.785978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.265683
|
b7720c668afa28b45e212a7ec9a950d7f27725bb
| 12,805
|
py
|
Python
|
cemc/mcmc/cov_reaction_crd.py
|
davidkleiven/WangLandau
|
0b253dd98033c53560fe95c76f5e38257834bdf6
|
[
"MIT"
] | 2
|
2022-02-10T00:38:53.000Z
|
2022-03-17T22:08:40.000Z
|
cemc/mcmc/cov_reaction_crd.py
|
davidkleiven/CEMC
|
0b253dd98033c53560fe95c76f5e38257834bdf6
|
[
"MIT"
] | 30
|
2018-05-21T14:52:00.000Z
|
2021-02-24T07:45:09.000Z
|
cemc/mcmc/cov_reaction_crd.py
|
davidkleiven/WangLandau
|
0b253dd98033c53560fe95c76f5e38257834bdf6
|
[
"MIT"
] | 3
|
2018-10-09T14:03:32.000Z
|
2022-02-09T05:36:05.000Z
|
import sys
from cemc.mcmc import ReactionCrdInitializer, ReactionCrdRangeConstraint
import numpy as np
from itertools import product
import time
from numpy.linalg import inv
class CouldNotFindValidStateError(Exception):
pass
class CovarianceCrdInitializer(ReactionCrdInitializer):
"""Initializer for various version of principal moment of covariance matrix.
:param FixedNucleusMC fixed_nuc_mc: Monte Carlo object
:param str matrix_element: Matrix element
:param list cluster_elements: Elements in the clusters
:param int num_matrix_atoms_surface: Number of neighboring matrix atoms
required if a cluster atoms should be considered to be on the
surface
:param str traj_file: Trajectory file when the system is evolved towards
a target value for the reaction coordinate
:param str traj_file_clst: Trajectory file containing only the cluster
:param int output_every: Interval in seconds for how often status
messages should be printed
"""
def __init__(self, fixed_nucl_mc=None, matrix_element=None,
cluster_elements=[], num_matrix_atoms_surface=1,
traj_file="full_system_insertia.traj",
traj_file_clst="clusters_covl.traj",
output_every=10, formula="I1/I3"):
from cemc.mcmc import CovarianceMatrixObserver
if matrix_element in cluster_elements:
raise ValueError("CovarianceCrdInitializer works only when "
"the matrix element is not present in the "
"clustering element!")
allowed_types = ["I1/I3", "2*I1/(I2+I3)", "(I1+I2)/(2*I3)"]
if formula not in allowed_types:
raise ValueError("formula has to be one of {}"
"".format(allowed_types))
self.formula = formula
self.matrix_element = matrix_element
self.cluster_elements = cluster_elements
self.fixed_nucl_mc = fixed_nucl_mc
self.num_matrix_atoms_surface = num_matrix_atoms_surface
self.output_every = output_every
self.cov_obs = CovarianceMatrixObserver(atoms=fixed_nucl_mc.atoms, cluster_elements=cluster_elements)
# Attach the covariance matrix observer to the
# fixed nucleation sampler
self.fixed_nucl_mc.attach(self.cov_obs)
self.traj_file = traj_file
self.traj_file_clst = traj_file_clst
def covariance_matrix(self, atoms, system_changes):
"""Calculate the covariance matrix of the cluster.
:return: Covariance matrix
:rtype: Numpy 3x3 matrix
"""
if system_changes:
self.cov_obs(system_changes)
elif atoms is not None:
# Perform a new calculation from scratch
self.cov_obs.set_atoms(atoms)
cov = self.cov_obs.cov_matrix
# This class should not alter the intertia tensor
# so we undo the changes
if system_changes:
self.cov_obs.undo_last()
return cov
def principal_variance(self, atoms, system_changes):
"""Calculate the covariance of the atoms in cluster elements.
:return: Principal variances
:rtype: numpy 1D array of length 3
"""
eigv = np.linalg.eigvals(self.covariance_matrix(atoms, system_changes))
return eigv
@property
def indices_in_cluster(self):
"""Find the indices of the atoms belonding to the cluster.
:return: Indices of the atoms in the cluster
:rtype: list of int
"""
include = []
for symb in self.cluster_elements:
include += self.fixed_nucl_mc.atoms_tracker.tracker[symb]
return include
def normalized_principal_variance(self, atoms, system_changes):
"""Principal covariance normalized by the largest component.
:return: Normalized principal variance
:rtype: 1D numpy array of length 3
"""
princ_var = self.principal_variance(atoms, system_changes)
return princ_var / np.max(princ_var)
@property
def dist_all_to_all(self):
"""Get distance between all atoms.
:return: All distances between atoms in the clsuter
:rtype: list of numpy 1D arrays
"""
indx = self.indices_in_cluster
cluster = self.fixed_nucl_mc.atoms[indx]
all_distances = []
for indx in range(len(cluster)):
all_indx = list(range(len(cluster)))
del all_indx[indx]
dists = cluster.get_distances(indx, all_indx, mic=True)
all_distances.append(dists)
return all_distances
@property
def dist_all_to_all_flattened(self):
"""Get a flattened list of all distances.
:return: Flattened distance list
:rtype: list of float
"""
dists = self.dist_all_to_all
flat_list = []
for sublist in dists:
flat_list += list(sublist)
return flat_list
def get(self, atoms, system_changes=[]):
"""Get the covariance reaction coordinate.
:param Atoms atoms: Not used. Using the atoms object of fixed_nucl_mc.
:return: The reaction coordinate
:rtype: float
"""
princ = self.principal_variance(atoms, system_changes)
princ = np.sort(princ)
# Make sure they are sorted in the correct order
assert princ[0] <= princ[2]
if self.formula == "I1/I3":
return 1.0 - np.min(princ)/np.max(princ)
elif self.formula == "2*I1/(I2+I3)":
return 1.0 - 2.0 * princ[0]/(princ[1] + princ[2])
elif self.formula == "(I1+I2)/(2*I3)":
return 1.0 - (princ[0] + princ[1])/(2.0*princ[2])
else:
raise ValueError("Unknown formula {}".format(self.formula))
@property
def surface_atoms(self):
"""Return a list of atoms on a surface.
:return: Indices of the atoms on the surface
:rtype: list of int
"""
indx = np.array(self.indices_in_cluster)
neighbors = self.fixed_nucl_mc.network_clust_indx
num_matrix_atoms = np.zeros(len(indx))
for j, i in enumerate(indx):
for t in neighbors:
tr_indx = self.fixed_nucl_mc.get_translated_indx(i, t)
symb = self.fixed_nucl_mc.atoms[tr_indx].symbol
if symb == self.matrix_element:
num_matrix_atoms[j] += 1
return indx[num_matrix_atoms >= self.num_matrix_atoms_surface]
def log(self, msg):
print(msg)
def set(self, atoms, value):
"""Create an atoms object with the correct reaction coordinate.
:param Atoms atom: Atoms object (not used, using the one attached
to the MC object). Argument only included traj_full = TrajectoryWriter(self.traj_file, mode="a")
traj_clst = TrajectoryWriter(self.traj_file_clst, mode="a")parent class
has it.
:param float value: Target value for the react traj_full = TrajectoryWriter(self.traj_file, mode="a")
traj_clst = TrajectoryWriter(self.traj_file_clst, mode="a")dinate
"""
from random import choice, shuffle
# Make sure that the observer is initialized correctly
self.cov_obs.init_com_and_covariance()
self.fixed_nucl_mc.network([])
max_attempts = 1000 * len(self.fixed_nucl_mc.atoms)
attempt = 0
neighbors = self.fixed_nucl_mc.network_clust_indx
atoms = self.fixed_nucl_mc.atoms
calc = atoms.get_calculator()
current_value = self.get(atoms)
current_diff = abs(value - current_value)
should_increase_value = current_diff < value
shoud_decrease_value = not should_increase_value
mc = self.fixed_nucl_mc
output_every = 15
now = time.time()
while attempt < max_attempts:
if self.fixed_nucl_mc.network.num_root_nodes() > 1:
raise RuntimeError("For some unknown reason there are "
"more than one cluster!")
attempt += 1
surf_atoms = self.surface_atoms
rand_surf_atom = choice(surf_atoms)
rand_surf_atom2 = choice(surf_atoms)
shuffle(neighbors)
found_swap_candidate = False
for indx in neighbors:
t_indx = mc.get_translated_indx(rand_surf_atom2, indx)
symb = mc.atoms[t_indx].symbol
if symb == self.matrix_element:
old_symb = mc.atoms[rand_surf_atom].symbol
ch1 = (rand_surf_atom, old_symb, symb)
ch2 = (t_indx, symb, old_symb)
system_changes = [ch1, ch2]
if self.fixed_nucl_mc.network.move_creates_new_cluster(system_changes):
continue
assert self.fixed_nucl_mc.network.num_root_nodes() == 1
if mc._no_constraint_violations(system_changes):
calc.calculate(atoms, ["energy"], system_changes)
found_swap_candidate = True
break
if not found_swap_candidate:
continue
# Get bases its calculation on the atom tracker
new_value = self.get(atoms, system_changes=system_changes)
new_diff = abs(new_value - value)
if time.time() - now > output_every:
print("Current value: {} Target value: {}"
"".format(new_value, value))
sys.stdout.flush()
now = time.time()
if new_diff < current_diff:
# The candidate trial moves brings the system closer to the
# target value, so we accept this move
current_diff = new_diff
# We need to update the covariance observer
self.cov_obs(system_changes)
# Update the network
assert self.fixed_nucl_mc.network.num_root_nodes() == 1
self.fixed_nucl_mc.network(system_changes)
assert self.fixed_nucl_mc.network.num_root_nodes() == 1
# Update the symbol tracker
self.fixed_nucl_mc._update_tracker(system_changes)
calc.clear_history()
else:
calc.undo_changes()
assert self.fixed_nucl_mc.network.num_root_nodes() == 1
if should_increase_value and new_value > value:
break
elif shoud_decrease_value and new_value < value:
break
if attempt == max_attempts:
raise CouldNotFindValidStateError("Did not manage to find a state "
"with reaction coordinate "
"{}!".format(value))
class CovarianceRangeConstraint(ReactionCrdRangeConstraint):
"""Constraint to ensure that the system stays without its bounds.
:param FixedNucleusMC fixed_nuc_mc: Monte Carlo object
:param list range: Upper and lower bound of the reaction coordinate
:param CovarianceCrdInitializer cov_init: Initializer
:param bool verbose: If True print messages every 10 sec
if the constraint is violated
"""
def __init__(self, fixed_nuc_mc=None, range=[0.0, 1.0], cov_init=None,
verbose=False):
super(CovarianceRangeConstraint, self).__init__()
self.update_range(range)
self.mc = fixed_nuc_mc
self._cov_init = cov_init
self.last_print = time.time()
self.verbose = verbose
def get_new_value(self, system_changes):
"""Get new value for reaction coordinate.
:param list system_changes: List with the proposed changes
:return: Reaction coordate after the change
:rtype: float
"""
# Get the new value of the observer
new_val = self._cov_init.get(None, system_changes=system_changes)
return new_val
def __call__(self, system_changes):
"""Check the system is in a valid state after the changes.
:param list system_changes: Proposed changes
:return: True/False, if True the system is still within the bounds
:rtype: bool
"""
new_val = self.get_new_value(system_changes)
ok = (new_val >= self.range[0] and new_val < self.range[1])
if not ok and self.verbose:
# The evaluation of this constraint can be time consuming
# so let the user know at regular intervals
if time.time() - self.last_print > 10:
print("Move violates constraint")
self.last_print = time.time()
return ok
| 38.338323
| 109
| 0.617415
| 12,617
| 0.985318
| 0
| 0
| 1,970
| 0.153846
| 0
| 0
| 4,385
| 0.342444
|
b77285d78a57f47f11411ca9d0f97da9f2e1f31c
| 3,810
|
py
|
Python
|
mysql/main.py
|
migachevalexey/BigQuery-integrations
|
d44546fbd3ff116e35ef90800ade2399e7266d9e
|
[
"MIT"
] | null | null | null |
mysql/main.py
|
migachevalexey/BigQuery-integrations
|
d44546fbd3ff116e35ef90800ade2399e7266d9e
|
[
"MIT"
] | null | null | null |
mysql/main.py
|
migachevalexey/BigQuery-integrations
|
d44546fbd3ff116e35ef90800ade2399e7266d9e
|
[
"MIT"
] | null | null | null |
from google.cloud import bigquery
from mysql.connector import connect
import os
# writeable part of the filesystem for Cloud Functions instance
gc_write_dir = "/tmp"
def get_file_mysql(mysql_configuration):
"""
Querying data using Connector/Python via *host* MySQL server.
The function return the full path to the file that has been downloaded.
"""
# construct MySQLConnection object and query table on a server
try:
cnx = connect(user = mysql_configuration["user"], password = mysql_configuration["psswd"], host = mysql_configuration["host"],
database = mysql_configuration["database"], port = mysql_configuration["port"] )
cursor = cnx.cursor(dictionary = True)
cursor.execute(mysql_configuration["query"])
results = cursor.fetchall()
file_name = "mysql.txt"
with open(file_name, "w") as output_file:
for row in results:
output_file.write(json.dumps(row) + "\n")
file_location = gc_write_dir + "/" + file_name
print("Query <" + mysql_configuration["query"] + "> has completed successfully.")
finally:
try:
cursor.close()
cnx.close()
except:
print("Connection has not been established.")
return file_location
def give_file_gbq(path_to_file, bq_configuration):
"""
Download file from *path_to_file* to BigQuery table using *bq_configuration* settings.
"""
# construct Client object with the path to the table in which data will be stored
client = bigquery.Client(project = bq_configuration["project_id"])
dataset_ref = client.dataset(bq_configuration["dataset_id"])
table_ref = dataset_ref.table(bq_configuration["table_id"])
# determine uploading options
job_config = bigquery.LoadJobConfig()
job_config.source_format = "NEWLINE_DELIMITED_JSON"
job_config.write_disposition = bq_configuration["write_disposition"]
job_config.autodetect = True
# upload the file to BigQuery table
with open(path_to_file, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, location = bq_configuration["location"], job_config = job_config)
job.result()
print("The Job " + job.job_id + " in status " + job.state + " for table " + bq_configuration["project_id"] + "." + bq_configuration["dataset_id"] + "." + bq_configuration["table_id"] + ".")
def mysql(request):
"""
Function to execute.
"""
try:
# get POST data from Flask.request object
request_json = request.get_json()
mysql_configuration = request_json["mysql"]
bq_configuration = request_json["bq"]
if not mysql_configuration.get("query"):
mysql_configuration["query"] = "SELECT * FROM " + mysql_configuration["table_id"]
if not mysql_configuration.get("port"):
mysql_configuration["port"] = 3306
if not bq_configuration.get("location"):
bq_configuration["location"] = "US"
bq_configuration["write_disposition"] = "WRITE_TRUNCATE"
except Exception as error:
print("An error occured with POST request data.")
print(str(error))
raise SystemExit
# go to writable directory
os.chdir(gc_write_dir)
# get the file from MySQL server
try:
mysql_file = get_file_mysql(mysql_configuration)
except Exception as error:
print("An error occured trying to get file from MySQL server.")
print(str(error))
raise SystemExit
# upload the file to BigQuery
try:
give_file_gbq(mysql_file, bq_configuration)
except Exception as error:
print("An error occured trying to upload file to Google BigQuery.")
print(str(error))
| 39.6875
| 193
| 0.664829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,309
| 0.34357
|
b7728a4f2303e9918fa06fb3ec419829f1f73d23
| 2,413
|
py
|
Python
|
arthur.carvalho/snakepro/game.py
|
LUDUSLab/stem-games
|
347afa8b1511d76f8070fa69f27a49b57e551376
|
[
"MIT"
] | 2
|
2021-01-24T01:04:34.000Z
|
2021-05-06T16:25:53.000Z
|
arthur.carvalho/snakepro/game.py
|
LUDUSLab/stem-games
|
347afa8b1511d76f8070fa69f27a49b57e551376
|
[
"MIT"
] | null | null | null |
arthur.carvalho/snakepro/game.py
|
LUDUSLab/stem-games
|
347afa8b1511d76f8070fa69f27a49b57e551376
|
[
"MIT"
] | 3
|
2021-01-26T21:35:43.000Z
|
2021-05-06T16:06:47.000Z
|
from config import *
from fruit import *
from snakebody import *
from wall import *
def scoring(sp, ap):
global score
background_score = pygame.Surface(rectangle)
background_score.fill(color_gray)
score_text = score_font.render(f'Score: {score}', True, color_black, color_gray)
screen.blit(background_score, (0, 0))
screen.blit(score_text, (0, 0))
if sp == ap:
score += 1
def game_over():
global play_sound
score_text = score_font.render(f'Score: {score}', True, color_black, color_73ED73)
screen.fill(color_73ED73)
screen.blit(game_over_text, (130, 200))
screen.blit(score_text, (280, 300))
if play_sound == 0:
game_over_sound.play()
play_sound += 1
pygame.display.update()
def main_loop():
global snake_direction
while game_loop:
game_clock.tick(15)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if snake_direction != RIGHT:
if event.key == pygame.K_LEFT:
snake_direction = LEFT
if snake_direction != DOWN:
if event.key == pygame.K_UP:
snake_direction = UP
if snake_direction != LEFT:
if event.key == pygame.K_RIGHT:
snake_direction = RIGHT
if snake_direction != UP:
if event.key == pygame.K_DOWN:
snake_direction = DOWN
if snake_position[0][0] < 32 or snake_position[0][1] < 64 or snake_position[0][0] > 736 or \
snake_position[0][1] > 576 or \
snake_position[0] in [(128, 160), (384, 384), (640, 160), (128, 512), (640, 512), (384, 256)]:
game_over()
else:
snake_move(snake_direction)
screen.fill(color_73ED73)
wall_draw()
snake_draw()
fruit_draw()
scoring(snake_position[0], fruit_pos)
pygame.display.update()
# score
score_font = pygame.font.Font('assets/PressStart2P.ttf', 30)
score = 0
# game over
game_ove_font = pygame.font.Font('assets/PressStart2P.ttf', 60)
game_over_text = game_ove_font.render('Game Over', True, color_black, color_73ED73)
play_sound = 0
snake_direction = 0
| 25.135417
| 110
| 0.574803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.04683
|
b7728d4f3bcf2508223268aec9d55c9c7919b6a4
| 821
|
py
|
Python
|
preprocessing_pipeline/so/util/log.py
|
sotorrent/preprocessing-pipeline
|
d407a56760fa5ab975dc2043c89f478bd7c89e99
|
[
"Apache-2.0"
] | null | null | null |
preprocessing_pipeline/so/util/log.py
|
sotorrent/preprocessing-pipeline
|
d407a56760fa5ab975dc2043c89f478bd7c89e99
|
[
"Apache-2.0"
] | null | null | null |
preprocessing_pipeline/so/util/log.py
|
sotorrent/preprocessing-pipeline
|
d407a56760fa5ab975dc2043c89f478bd7c89e99
|
[
"Apache-2.0"
] | 1
|
2021-09-22T16:07:18.000Z
|
2021-09-22T16:07:18.000Z
|
import logging
from preprocessing_pipeline.so.util.config import LOG_LEVEL
def initialize_logger(logger_name):
"""
Configure a named logger (see https://stackoverflow.com/a/43794480).
"""
# create logger for module
module_logger = logging.getLogger(logger_name)
# set lowest log level the logger will handle (but not necessarily output)
module_logger.setLevel(LOG_LEVEL)
# disable propagation to root logger
module_logger.propagate = False
log_formatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s] [%(name)s]: %(message)s')
# write log messages to console
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
console_handler.setLevel(LOG_LEVEL)
module_logger.addHandler(console_handler)
return module_logger
| 30.407407
| 96
| 0.742996
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.37028
|
b7734741f7d6376de18287d5b5965809361df47f
| 995
|
py
|
Python
|
utilityFunction/CommandFunc.py
|
The-Fragment/FragmentFembot
|
bca0027b423753eb162590e8fd440a2c1e65d133
|
[
"MIT"
] | null | null | null |
utilityFunction/CommandFunc.py
|
The-Fragment/FragmentFembot
|
bca0027b423753eb162590e8fd440a2c1e65d133
|
[
"MIT"
] | 3
|
2021-03-23T04:58:47.000Z
|
2021-04-02T02:40:54.000Z
|
utilityFunction/CommandFunc.py
|
The-Fragment/FragmentFembot
|
bca0027b423753eb162590e8fd440a2c1e65d133
|
[
"MIT"
] | null | null | null |
import random
# Ex. takes in 2d20 and outputs the string Rolling 2 d20
def roll_str(rolls):
numDice = rolls.split('d')[0]
diceVal = rolls.split('d')[1]
if numDice == '':
numDice = int(1)
return "Rolling %s d%s" % (numDice, diceVal)
# Ex. takes in 2d20 and outputs resultString = 11, 19 results = 30 numDice = 2
def roll(rolls):
results = 0
resultString = ''
try:
numDice = rolls.split('d')[0]
except Exception as e:
print(e)
return "Use proper format!"
rolls, limit = map(str, rolls.split('d'))
if rolls == '':
rolls = int(1)
rolls = int(rolls)
limit = int(limit)
for r in range(rolls):
number = random.randint(1, limit)
results = results + number
if resultString == '':
resultString += str(number)
else:
resultString += ', ' + str(number)
# Returns 3 variables, make sure to store in 3 variables
return resultString, results, numDice
| 27.638889
| 78
| 0.582915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.251256
|
b773ef3748302d9fb0e80bd06beb0fda6a677835
| 12,339
|
py
|
Python
|
src/reviews/tests/test_forms.py
|
Talengi/phase
|
60ff6f37778971ae356c5b2b20e0d174a8288bfe
|
[
"MIT"
] | 8
|
2016-01-29T11:53:40.000Z
|
2020-03-02T22:42:02.000Z
|
src/reviews/tests/test_forms.py
|
Talengi/phase
|
60ff6f37778971ae356c5b2b20e0d174a8288bfe
|
[
"MIT"
] | 289
|
2015-03-23T07:42:52.000Z
|
2022-03-11T23:26:10.000Z
|
src/reviews/tests/test_forms.py
|
Talengi/phase
|
60ff6f37778971ae356c5b2b20e0d174a8288bfe
|
[
"MIT"
] | 7
|
2015-12-08T09:03:20.000Z
|
2020-05-11T15:36:51.000Z
|
# -*- coding: utf-8 -*-
import datetime
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from accounts.factories import UserFactory
from categories.factories import CategoryFactory
from documents.factories import DocumentFactory
from default_documents.models import ContractorDeliverable
from default_documents.forms import ContractorDeliverableRevisionForm
from default_documents.factories import (ContractorDeliverableFactory,
ContractorDeliverableRevisionFactory)
from reviews.models import Review
class BaseReviewFormMixinTests(TestCase):
def setUp(self):
Model = ContentType.objects.get_for_model(ContractorDeliverable)
self.category = CategoryFactory(category_template__metadata_model=Model)
self.user = UserFactory(
email='testadmin@phase.fr',
password='pass',
is_superuser=True,
category=self.category)
self.user2 = UserFactory(
email='user2@phase.fr',
password='pass',
is_superuser=True,
category=self.category)
self.user3 = UserFactory(
email='user3@phase.fr',
password='pass',
is_superuser=True,
category=self.category)
self.data = {
'docclass': 3,
'purpose_of_issue': 'FR',
'created_on': '2015-01-01',
'received_date': '2015-01-01'
}
class ReviewFormMixinTest(BaseReviewFormMixinTests):
def test_review_form_is_valid(self):
form = ContractorDeliverableRevisionForm(self.data, category=self.category)
self.assertTrue(form.is_valid())
def test_user_is_both_leader_and_reviewer(self):
"""A single user cannot appear twice in the same distribution list."""
self.data.update({
'leader': self.user.id,
'approver': self.user2.id,
'reviewers': str(self.user.id),
})
form = ContractorDeliverableRevisionForm(self.data, category=self.category)
self.assertFalse(form.is_valid())
def test_user_is_both_approver_and_reviewer(self):
"""A single user cannot appear twice in the same distribution list."""
self.data.update({
'leader': self.user2.id,
'approver': self.user.id,
'reviewers': str(self.user.id),
})
form = ContractorDeliverableRevisionForm(self.data, category=self.category)
self.assertFalse(form.is_valid())
def test_user_is_both_leader_and_approver(self):
"""A single user cannot appear twice in the same distribution list."""
self.data.update({
'leader': self.user.id,
'approver': self.user.id
})
form = ContractorDeliverableRevisionForm(self.data, category=self.category)
self.assertFalse(form.is_valid())
class UpdateDistribListTests(BaseReviewFormMixinTests):
"""Test distribution list updates during review.
When reviewers, leader or approver are modified during a review, the
actual distribution list must be updated accordingly.
"""
def setUp(self):
super(UpdateDistribListTests, self).setUp()
self.user4 = UserFactory(
email='user4@phase.fr',
password='pass',
is_superuser=True,
category=self.category)
self.doc = DocumentFactory(
metadata_factory_class=ContractorDeliverableFactory,
revision_factory_class=ContractorDeliverableRevisionFactory,
category=self.category,
revision={
'reviewers': [self.user],
'leader': self.user2,
'approver': self.user3,
'received_date': datetime.datetime.today(),
})
self.rev = self.doc.get_latest_revision()
self.data.update({
'reviewers': str(self.user.id),
'leader': self.user2.id,
'approver': self.user3.id,
'review_start_date': datetime.datetime.today(),
'review_due_date': datetime.datetime.today() + datetime.timedelta(days=14)
})
def test_form_is_valid(self):
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertTrue(form.is_valid())
def test_reviewers_cannot_be_added_after_reviewers_step(self):
self.rev.start_review()
self.rev.end_reviewers_step()
reviewers = '{},{}'.format(self.user.id, self.user4.id)
self.data.update({'reviewers': reviewers})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertFalse(form.is_valid())
self.assertTrue('reviewers' in form.errors)
def test_reviewers_cannot_be_deleted_after_reviewers_step(self):
self.rev.start_review()
self.rev.end_reviewers_step()
self.data.update({'reviewers': ''})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertFalse(form.is_valid())
self.assertTrue('reviewers' in form.errors)
def test_reviewer_can_be_added_during_reviewers_step(self):
self.rev.start_review()
# Count initial Reviews
qs = Review.objects \
.filter(document=self.rev.document) \
.filter(revision=self.rev.revision) \
.filter(role='reviewer')
self.assertEqual(qs.count(), 1)
# Add a reviewer
reviewers = '{},{}'.format(self.user.id, self.user4.id)
self.data.update({'reviewers': reviewers})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertTrue(form.is_valid())
form.save()
# Count updated Reviews
self.assertEqual(qs.count(), 2)
def test_reviewer_may_be_deleted_during_reviewers_step(self):
"""A reviewer can be deleted if they didn't submit a review yet."""
self.rev.start_review()
# Count initial Reviews
qs = Review.objects \
.filter(document=self.rev.document) \
.filter(revision=self.rev.revision) \
.filter(role='reviewer')
self.assertEqual(qs.count(), 1)
# Remove a reviewer
reviewers = ''
self.data.update({'reviewers': reviewers})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertTrue(form.is_valid())
form.save()
# Count updated Reviews
self.assertEqual(qs.count(), 0)
def test_reviewer_may_not_be_deleted_during_reviewers_step(self):
"""Reviewers that submitted a review cannot be removed."""
self.rev.reviewers.add(self.user4)
self.rev.start_review()
# Post a review
review = self.rev.get_review(self.user)
review.post_review(comments=None)
# Assert the reviewers stop is still open
self.rev.refresh_from_db()
self.assertIsNone(self.rev.reviewers_step_closed)
# Try to remove the initial reviewer
reviewers = str(self.user4.id)
self.data.update({'reviewers': reviewers})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertFalse(form.is_valid())
self.assertTrue('reviewers' in form.errors)
def test_removing_reviewers_can_end_reviewers_step(self):
"""Remove all reviewers, and the review goes up to leader step."""
self.rev.reviewers.add(self.user4)
self.rev.start_review()
leader_review = self.rev.get_review(self.user2)
self.assertEqual(leader_review.status, 'pending')
# Count Review objects
qs = Review.objects \
.filter(document=self.rev.document) \
.filter(revision=self.rev.revision) \
.filter(role='reviewer')
self.assertEqual(qs.count(), 2)
# Remove one reviewer
self.data.update({'reviewers': str(self.user.id)})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertTrue(form.is_valid())
form.save()
# Assert the reviewers step is still open
self.rev.refresh_from_db()
self.assertIsNone(self.rev.reviewers_step_closed)
self.assertEqual(qs.count(), 1)
# Remove second reviewer
self.data.update({'reviewers': ''})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertTrue(form.is_valid())
form.save()
# Assert the reviewers step is closed
self.rev.refresh_from_db()
self.assertEqual(qs.count(), 0)
self.assertIsNotNone(self.rev.reviewers_step_closed)
leader_review.refresh_from_db()
self.assertEqual(leader_review.status, 'progress')
def test_leader_cannot_be_changed_after_leader_step(self):
self.rev.start_review()
self.rev.end_leader_step()
self.data.update({'leader': self.user4.id})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertFalse(form.is_valid())
self.assertTrue('leader' in form.errors)
def test_update_leader_updates_distrib_list(self):
self.rev.start_review()
review = self.rev.get_review(self.user2)
self.assertEqual(review.role, 'leader')
self.data.update({'leader': self.user4.id})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
rev = form.save()
review = rev.get_review(self.user2)
self.assertIsNone(review)
review = rev.get_review(self.user4)
self.assertEqual(review.role, 'leader')
def test_approver_cannot_be_changed_after_approver_step(self):
self.rev.start_review()
self.rev.end_review()
self.data.update({'approver': self.user4.id})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
self.assertFalse(form.is_valid())
self.assertTrue('approver' in form.errors)
def test_update_approver_updates_distrib_list(self):
self.rev.start_review()
review = self.rev.get_review(self.user3)
self.assertEqual(review.role, 'approver')
self.data.update({'approver': self.user4.id})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
rev = form.save()
review = rev.get_review(self.user3)
self.assertIsNone(review)
review = rev.get_review(self.user4)
self.assertEqual(review.role, 'approver')
def test_removing_approver_during_approver_step_ends_review(self):
self.rev.start_review()
self.rev.end_leader_step()
self.assertIsNone(self.rev.review_end_date)
self.data.update({'approver': ''})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
rev = form.save()
review = rev.get_review(self.user3)
self.assertIsNone(review)
self.assertIsNotNone(self.rev.review_end_date)
def test_removing_approver_before_approver_step_doesnt_end_review(self):
self.rev.start_review()
self.assertIsNone(self.rev.review_end_date)
self.data.update({'approver': ''})
form = ContractorDeliverableRevisionForm(
self.data,
instance=self.rev,
category=self.category)
rev = form.save()
review = rev.get_review(self.user3)
self.assertIsNone(review)
self.assertIsNone(self.rev.review_end_date)
| 34.275
| 86
| 0.630359
| 11,741
| 0.951536
| 0
| 0
| 0
| 0
| 0
| 0
| 1,630
| 0.132101
|
b77448adc21f21e754acd95d28d79c117e90042e
| 2,814
|
py
|
Python
|
WWW/pycopia/WWW/HTML5simple.py
|
kdart/pycopia
|
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
|
[
"Apache-2.0"
] | 89
|
2015-03-26T11:25:20.000Z
|
2022-01-12T06:25:14.000Z
|
WWW/pycopia/WWW/HTML5simple.py
|
kdart/pycopia
|
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
|
[
"Apache-2.0"
] | 1
|
2015-07-05T03:27:43.000Z
|
2015-07-11T06:21:20.000Z
|
WWW/pycopia/WWW/HTML5simple.py
|
kdart/pycopia
|
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
|
[
"Apache-2.0"
] | 30
|
2015-04-30T01:35:54.000Z
|
2022-01-12T06:19:49.000Z
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple HTML5 top-level boilerplate generator.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
# Currenty using this subset of HTML5 features shared by both FF 3.6.x and Chrome 10
BROWSER_FEATURES = ['applicationcache', 'backgroundsize', 'borderimage', 'borderradius',
'boxshadow', 'canvas', 'canvastext', 'csscolumns', 'cssgradients',
'csstransforms', 'draganddrop', 'flexbox', 'fontface', 'geolocation',
'hashchange', 'hsla', 'js', 'localstorage', 'multiplebgs', 'opacity',
'postmessage', 'rgba', 'sessionstorage', 'svg', 'svgclippaths', 'textshadow',
'webworkers']
NO_BROWSER_FEATURES = ['no-audio', 'no-cssanimations', 'no-cssreflections',
'no-csstransforms3d', 'no-csstransitions', 'no-history', 'no-indexeddb',
'no-inlinesvg', 'no-smil', 'no-touch', 'no-video', 'no-webgl', 'no-websockets',
'no-websqldatabase']
FEATURE_CLASS = " ".join(BROWSER_FEATURES) + " " + " ".join(NO_BROWSER_FEATURES)
#### simple templates for use by mostly client-side apps.
SIMPLE_TEMPLATE = """<?xml version="1.0" encoding="{charset}"?>
<!DOCTYPE html>
<html lang="en" xmlns="http://www.w3.org/1999/xhtml" class="{features}">
<head>
<meta charset="{charset}" />
<meta name="robots" content="noindex" />
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>{title}</title>
<link href="/media/css/{appname}.css" type="text/css" rel="stylesheet" />
<!-- <script src="/media/js/modernizr-1.7.min.js" type="text/javascript"></script> -->
<script src="/media/js/packed.js" type="text/javascript"></script>
<script src="/media/js/{appname}.js" type="text/javascript"></script>
</head>
<body>
{body}
</body>
</html>
"""
def new_simple_document(appname, title, charset="utf-8", body=""):
return SIMPLE_TEMPLATE.format(
charset=charset,
features=FEATURE_CLASS,
appname=appname,
title=title,
body=body,
)
if __name__ == "__main__":
docs = new_simple_document("myapp", "MyApp", "utf-8")
print (docs)
| 35.620253
| 90
| 0.678749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,058
| 0.731343
|
b77490b49e8e303cfb2f69ab8e30192b9d37bd8f
| 2,077
|
py
|
Python
|
zairachem/reports/report.py
|
ersilia-os/ersilia-automl-chem
|
fabb1f05d17cff11ec0e084495eed4c0152f2f63
|
[
"MIT"
] | null | null | null |
zairachem/reports/report.py
|
ersilia-os/ersilia-automl-chem
|
fabb1f05d17cff11ec0e084495eed4c0152f2f63
|
[
"MIT"
] | null | null | null |
zairachem/reports/report.py
|
ersilia-os/ersilia-automl-chem
|
fabb1f05d17cff11ec0e084495eed4c0152f2f63
|
[
"MIT"
] | null | null | null |
from .plots import (
ActivesInactivesPlot,
ConfusionPlot,
RocCurvePlot,
ProjectionPlot,
RegressionPlotRaw,
HistogramPlotRaw,
RegressionPlotTransf,
HistogramPlotTransf,
Transformation,
IndividualEstimatorsAurocPlot,
InidvidualEstimatorsR2Plot,
)
from .. import ZairaBase
from ..vars import REPORT_SUBFOLDER
class Reporter(ZairaBase):
def __init__(self, path):
ZairaBase.__init__(self)
if path is None:
self.path = self.get_output_dir()
else:
self.path = path
def _actives_inactives_plot(self):
ActivesInactivesPlot(ax=None, path=self.path).save()
def _confusion_matrix_plot(self):
ConfusionPlot(ax=None, path=self.path).save()
def _roc_curve_plot(self):
RocCurvePlot(ax=None, path=self.path).save()
def _projection_plot(self):
ProjectionPlot(ax=None, path=self.path).save()
def _regression_plot_raw(self):
RegressionPlotRaw(ax=None, path=self.path).save()
def _histogram_plot_raw(self):
HistogramPlotRaw(ax=None, path=self.path).save()
def _regression_plot_transf(self):
RegressionPlotTransf(ax=None, path=self.path).save()
def _histogram_plot_transf(self):
HistogramPlotTransf(ax=None, path=self.path).save()
def _transformation_plot(self):
Transformation(ax=None, path=self.path).save()
def _individual_estimators_auroc_plot(self):
IndividualEstimatorsAurocPlot(ax=None, path=self.path).save()
def _individual_estimators_r2_plot(self):
InidvidualEstimatorsR2Plot(ax=None, path=self.path).save()
def run(self):
self._actives_inactives_plot()
self._confusion_matrix_plot()
self._roc_curve_plot()
self._projection_plot()
self._regression_plot_transf()
self._histogram_plot_transf()
self._regression_plot_raw()
self._histogram_plot_raw()
self._transformation_plot()
self._individual_estimators_auroc_plot()
self._individual_estimators_r2_plot()
| 28.847222
| 69
| 0.692826
| 1,723
| 0.829562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b777765b36345a7cbdc249bcc3d7922fbe5e7398
| 2,198
|
py
|
Python
|
tests/backend/configobj.py
|
edyan/python-anyconfig
|
d237909a6e4848737539b80951b710238c72052f
|
[
"MIT"
] | null | null | null |
tests/backend/configobj.py
|
edyan/python-anyconfig
|
d237909a6e4848737539b80951b710238c72052f
|
[
"MIT"
] | null | null | null |
tests/backend/configobj.py
|
edyan/python-anyconfig
|
d237909a6e4848737539b80951b710238c72052f
|
[
"MIT"
] | null | null | null |
#
# Copyright (C) 2013 - 2017 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods
# pylint: disable=ungrouped-imports
from __future__ import absolute_import
import anyconfig.backend.configobj as TT
import tests.backend.common as TBC
from anyconfig.compat import OrderedDict as ODict
CNF_0_S = """\
# This is the 'initial_comment'
# Which may be several lines
keyword1 = value1
'keyword 2' = 'value 2'
[ "section 1" ]
# This comment goes with keyword 3
keyword 3 = value 3
'keyword 4' = value4, value 5, 'value 6'
[[ sub-section ]] # an inline comment
# sub-section is inside "section 1"
'keyword 5' = 'value 7'
'keyword 6' = '''A multiline value,
that spans more than one line :-)
The line breaks are included in the value.'''
[[[ sub-sub-section ]]]
# sub-sub-section is *in* 'sub-section'
# which is in 'section 1'
'keyword 7' = 'value 8'
[section 2] # an inline comment
keyword8 = "value 9"
keyword9 = value10 # an inline comment
# The 'final_comment'
# Which also may be several lines
"""
_ML_0 = """A multiline value,
that spans more than one line :-)
The line breaks are included in the value."""
CNF_0 = ODict((('keyword1', 'value1'),
('keyword 2', 'value 2'),
('section 1',
ODict((('keyword 3', 'value 3'),
('keyword 4', ['value4', 'value 5', 'value 6']),
('sub-section',
ODict((('keyword 5', 'value 7'),
('keyword 6', _ML_0),
('sub-sub-section',
ODict((('keyword 7', 'value 8'), ))))))))),
('section 2',
ODict((('keyword8', 'value 9'), ('keyword9', 'value10'))))))
class HasParserTrait(TBC.HasParserTrait):
psr = TT.Parser()
cnf = CNF_0
cnf_s = CNF_0_S
class Test_10(TBC.Test_10_dumps_and_loads, HasParserTrait):
load_options = dict(raise_errors=True)
dump_options = dict(indent_type=" ")
class Test_20(TBC.Test_10_dumps_and_loads, HasParserTrait):
pass
# vim:sw=4:ts=4:et:
| 27.135802
| 76
| 0.589172
| 314
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1,304
| 0.593267
|
b777b683adb530c1229387728fa21c1239e09fa6
| 23
|
py
|
Python
|
backend/lorre/__init__.py
|
nhurman/Lorre
|
9a19d00809bd3850d288c9ef1166da016d29b819
|
[
"MIT"
] | null | null | null |
backend/lorre/__init__.py
|
nhurman/Lorre
|
9a19d00809bd3850d288c9ef1166da016d29b819
|
[
"MIT"
] | null | null | null |
backend/lorre/__init__.py
|
nhurman/Lorre
|
9a19d00809bd3850d288c9ef1166da016d29b819
|
[
"MIT"
] | null | null | null |
__author__ = 'nhurman'
| 11.5
| 22
| 0.73913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.391304
|
b778baead541bab3544aa4b5bfec40bbfb0e7118
| 1,794
|
py
|
Python
|
tests/test_spider.py
|
aezhov/sw_downloader
|
325f6fe98579dda4f98613e4513fc46c7bd962c7
|
[
"MIT"
] | 1
|
2020-01-23T06:56:34.000Z
|
2020-01-23T06:56:34.000Z
|
tests/test_spider.py
|
aezhov/sw_downloader
|
325f6fe98579dda4f98613e4513fc46c7bd962c7
|
[
"MIT"
] | null | null | null |
tests/test_spider.py
|
aezhov/sw_downloader
|
325f6fe98579dda4f98613e4513fc46c7bd962c7
|
[
"MIT"
] | 2
|
2020-01-23T06:56:36.000Z
|
2020-01-24T05:34:25.000Z
|
import requests
from scrapy.http import HtmlResponse
from sw_downloader.sw_downloader.spiders.smashing_magazine \
import SmashingMagazineSpider
class TestSmashingMagazineSpider:
def test_ruleset(self):
url = ('https://www.smashingmagazine.com/category/wallpapers')
response = requests.get(url)
spider = SmashingMagazineSpider(year=2018, month=2,
resolution='1024x768')
scrapy_response = HtmlResponse(body=response.content, url=url)
links_gen = spider._requests_to_follow(scrapy_response)
links = list(links_gen)
assert len(links) == 3
def test_image_link_extraction(self):
url = ('https://www.smashingmagazine.com/'
'2018/01/desktop-wallpaper-'
'calendars-february-2018/')
spider = SmashingMagazineSpider(year=2018, month=1,
resolution='1024x768')
response = requests.get(url)
scrapy_response = HtmlResponse(body=response.content, url=url)
items_gen = spider.parse_wallpaper_page(scrapy_response)
items = list(items_gen)
assert len(items) == 26
for item in items:
assert '1024x768.jpg' in item['file_urls'][0] or \
'1024x768.png' in item['file_urls'][0]
def test_extract_posts(self):
url = ('https://www.smashingmagazine.com/category/wallpapers')
spider = SmashingMagazineSpider(year=2018, month=2,
resolution='1024x768')
response = requests.get(url)
scrapy_response = HtmlResponse(body=response.content, url=url)
posts_gen = spider.parse_posts_page(scrapy_response)
posts = list(posts_gen)
assert len(posts) == 2
| 39.866667
| 70
| 0.628763
| 1,642
| 0.915273
| 0
| 0
| 0
| 0
| 0
| 0
| 277
| 0.154404
|
b778ede17d0ff72b747e93fc5f260c19e384862d
| 160
|
py
|
Python
|
1_PROGI/Exe 5.py
|
Julymusso/IFES
|
939277c375dacc7750705c5593537d80ab4cbc0e
|
[
"MIT"
] | null | null | null |
1_PROGI/Exe 5.py
|
Julymusso/IFES
|
939277c375dacc7750705c5593537d80ab4cbc0e
|
[
"MIT"
] | null | null | null |
1_PROGI/Exe 5.py
|
Julymusso/IFES
|
939277c375dacc7750705c5593537d80ab4cbc0e
|
[
"MIT"
] | null | null | null |
#var
#num, media, i: inteiro
media=0
for i in range(1,11,1):
num=int(input("Digite um número: "))
media = media+num
media=media/i
print (media)
| 17.777778
| 40
| 0.61875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.298137
|
b7794cab91544e360a3b3517961177ee512565ea
| 4,289
|
py
|
Python
|
rio/tasks.py
|
soasme/rio
|
e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2
|
[
"MIT"
] | null | null | null |
rio/tasks.py
|
soasme/rio
|
e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2
|
[
"MIT"
] | 14
|
2016-04-14T04:18:41.000Z
|
2016-05-12T03:46:37.000Z
|
rio/tasks.py
|
soasme/rio
|
e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2
|
[
"MIT"
] | 1
|
2016-04-06T08:54:20.000Z
|
2016-04-06T08:54:20.000Z
|
# -*- coding: utf-8 -*-
"""
rio.tasks
~~~~~~~~~~
Implement of rio tasks based on celery.
"""
from time import time
from celery import chord
from requests import ConnectionError
from celery.utils.log import get_task_logger
from rio.core import celery
from rio.core import sentry
from rio.utils.http import dispatch_webhook_request
from rio.utils.http import raven_context
from rio.utils.http import FailureWebhookError
from rio.utils.template import format_template
from rio.signals import webhook_ran
logger = get_task_logger(__name__)
def _build_request_for_calling_webhook(event, webhook, payload):
event_identity = 'uuid=%s,project=%s,action=%s' % (
str(event['uuid']), event['project'], event['action']
)
raw_url = webhook['url']
url = format_template(raw_url, dict(payload=payload))
request = {
'url': url,
'method': webhook['method'],
'headers': {
'X-RIO-EVENT': event_identity,
}
}
if webhook['method'] == 'GET':
request['params'] = payload
elif webhook['headers'].get('Content-Type') == 'application/json':
request['json'] = payload
else:
request['data'] = payload
return request
@celery.task()
def call_webhook(event, webhook, payload):
"""Build request from event,webhook,payoad and parse response."""
started_at = time()
request = _build_request_for_calling_webhook(event, webhook, payload)
logger.info('REQUEST %(uuid)s %(method)s %(url)s %(payload)s' % dict(
uuid=str(event['uuid']),
url=request['url'],
method=request['method'],
payload=payload,
))
try:
content = dispatch_webhook_request(**request)
logger.debug('RESPONSE %(uuid)s %(method)s %(url)s %(data)s' % dict(
uuid=str(event['uuid']),
url=request['url'],
method=request['method'],
data=content,
))
data = dict(
parent=str(event['uuid']),
content=content,
started_at=started_at,
ended_at=time()
)
except (FailureWebhookError, ConnectionError) as exception:
if sentry.client:
http_context = raven_context(**request)
sentry.captureException(data={'request': http_context})
logger.error('RESPONSE %(uuid)s %(method)s %(url)s %(error)s' % dict(
uuid=str(event['uuid']),
method=request['method'],
url=request['url'],
error=exception.message,))
data = dict(
parent=str(event['uuid']),
error=exception.message,
started_at=started_at,
ended_at=time(),
)
webhook_ran.send(None, data=data)
return data
@celery.task()
def merge_webhooks_runset(runset):
"""Make some statistics on the run set.
"""
min_started_at = min([w['started_at'] for w in runset])
max_ended_at = max([w['ended_at'] for w in runset])
ellapse = max_ended_at - min_started_at
errors_count = sum(1 for w in runset if 'error' in w)
total_count = len(runset)
data = dict(
ellapse=ellapse,
errors_count=errors_count,
total_count=total_count,
)
return data
def exec_event(event, webhooks, payload):
"""Execute event.
Merge webhooks run set to do some stats after all
of the webhooks been responded successfully.
+---------+
|webhook-1+--------------------+
+---------+ |
|
+---------+ |
|webhook-2+-------------+ |
+---------+ +------+-----+
|merge runset+------>
+---------+ +------+-----+
|webhook-3+-------------+ |
+---------+ |
|
+---------+ |
|... +--------------------+
+---------+
Error webhook will be propagated. Note that other webhook
calls will still execute.
"""
calls = (
call_webhook.s(event, webhook, payload)
for webhook in webhooks
)
callback = merge_webhooks_runset.s()
call_promise = chord(calls)
promise = call_promise(callback)
return promise
| 26.312883
| 77
| 0.55211
| 0
| 0
| 0
| 0
| 2,024
| 0.471905
| 0
| 0
| 1,393
| 0.324784
|
b77a241d42c4accce70a300b51e344bbc1b51e81
| 2,932
|
py
|
Python
|
security_app/createuserpoolgroup.py
|
jagalembu/chalice_cognito_multi_tenancy
|
b1a6569e716848adfd5b443239fca1ae883d4f4b
|
[
"MIT"
] | null | null | null |
security_app/createuserpoolgroup.py
|
jagalembu/chalice_cognito_multi_tenancy
|
b1a6569e716848adfd5b443239fca1ae883d4f4b
|
[
"MIT"
] | null | null | null |
security_app/createuserpoolgroup.py
|
jagalembu/chalice_cognito_multi_tenancy
|
b1a6569e716848adfd5b443239fca1ae883d4f4b
|
[
"MIT"
] | null | null | null |
import os
import uuid
import json
import argparse
import boto3
USERPOOL = {
'env_var_poolid': 'APPUSERPOOLID',
'env_var_cognito_url': 'COGNITOJWKSURL',
'env_var_pool_client': 'APPCLIENTID',
}
def create_user_pool(pool_name, region, stage):
client = boto3.client('cognito-idp')
resp = client.create_user_pool(
PoolName=pool_name, UsernameAttributes='email')
record_as_env_var(USERPOOL['env_var_poolid'], resp['UserPool']['Id'],
stage)
record_as_env_var(
USERPOOL['env_var_cognito_url'],
f"https://cognito-idp.{region}.amazonaws.com/{resp['UserPool']['Id']}/.well-known/jwks.json",
stage)
def create_user_pool_client(user_pool_id, client_name, stage):
client = boto3.client('cognito-idp')
resp = client.create_user_pool_client(
UserPoolId=user_pool_id,
ClientName=client_name,
#Need to work on this because AWS SRP does not work with secret...
GenerateSecret=False,
)
record_as_env_var(USERPOOL['env_var_pool_client'],
resp['UserPoolClient']['ClientId'], stage)
def create_user_pool_group(user_pool_id, group_name):
client = boto3.client('cognito-idp')
resp = client.create_group(
GroupName=group_name,
UserPoolId=user_pool_id,
)
def create_admin_user(username, user_pool_id, group_name):
client = boto3.client('cognito-idp')
resp = client.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{
'Name': 'email',
'Value': username
},
],
ForceAliasCreation=False,
DesiredDeliveryMediums=[
'EMAIL',
])
response = client.admin_add_user_to_group(
UserPoolId=user_pool_id, Username=username, GroupName=group_name)
def get_env_var_from_config(key, stage):
with open(os.path.join('.chalice', 'config.json')) as f:
data = json.load(f)
return data['stages'][stage]['environment_variables'][key]
raise Exception('problem loading config')
def record_as_env_var(key, value, stage):
with open(os.path.join('.chalice', 'config.json')) as f:
data = json.load(f)
data['stages'].setdefault(stage, {}).setdefault(
'environment_variables', {})[key] = value
with open(os.path.join('.chalice', 'config.json'), 'w') as f:
serialized = json.dumps(data, indent=2, separators=(',', ': '))
f.write(serialized + '\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--stage', default='dev')
parser.add_argument('-g', '--groupname', help='Specify group name to be added to the user pool')
args = parser.parse_args()
create_user_pool_group(
get_env_var_from_config(USERPOOL['env_var_poolid'], args.stage), args.groupname)
if __name__ == '__main__':
main()
| 30.863158
| 104
| 0.645634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 723
| 0.246589
|
b77c8918ea2f71cb1258eb1a156531c01f3b83b2
| 269
|
py
|
Python
|
Chapter 07/hmac-md5.py
|
Prakshal2607/Effective-Python-Penetration-Testing
|
f49fedc172a1add45edb21f66f74746dfa9c944a
|
[
"MIT"
] | 346
|
2016-06-21T11:39:39.000Z
|
2022-01-26T03:19:29.000Z
|
Chapter 07/hmac-md5.py
|
liceaga/Effective-Python-Penetration-Testing
|
0b043885231662efd63402eec3c9cb413b9693e2
|
[
"MIT"
] | 1
|
2016-06-21T11:44:42.000Z
|
2016-11-17T05:10:08.000Z
|
Chapter 07/hmac-md5.py
|
liceaga/Effective-Python-Penetration-Testing
|
0b043885231662efd63402eec3c9cb413b9693e2
|
[
"MIT"
] | 210
|
2016-06-22T12:08:47.000Z
|
2022-03-16T15:54:30.000Z
|
import hmac
hmac_md5 = hmac.new('secret-key')
f = open('sample-file.txt', 'rb')
try:
while True:
block = f.read(1024)
if not block:
break
hmac_md5.update(block)
finally:
f.close()
digest = hmac_md5.hexdigest()
print digest
| 16.8125
| 33
| 0.598513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.122677
|
b77cac8f40a35a229bb7b41f7b04619f55a2baf4
| 31,686
|
py
|
Python
|
dingtalk/python/alibabacloud_dingtalk/alitrip_1_0/models.py
|
yndu13/dingtalk-sdk
|
700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f
|
[
"Apache-2.0"
] | null | null | null |
dingtalk/python/alibabacloud_dingtalk/alitrip_1_0/models.py
|
yndu13/dingtalk-sdk
|
700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f
|
[
"Apache-2.0"
] | null | null | null |
dingtalk/python/alibabacloud_dingtalk/alitrip_1_0/models.py
|
yndu13/dingtalk-sdk
|
700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict, List
class AddCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class AddCityCarApplyRequest(TeaModel):
def __init__(
self,
cause: str = None,
city: str = None,
corp_id: str = None,
date: str = None,
project_code: str = None,
project_name: str = None,
status: int = None,
third_part_apply_id: str = None,
third_part_cost_center_id: str = None,
third_part_invoice_id: str = None,
times_total: int = None,
times_type: int = None,
times_used: int = None,
title: str = None,
user_id: str = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_token_grant_type: int = None,
finished_date: str = None,
):
# 出差事由
self.cause = cause
# 用车城市
self.city = city
# 第三方企业ID
self.corp_id = corp_id
# 用车时间,按天管控,比如传值2021-03-18 20:26:56表示2021-03-18当天可用车,跨天情况配合finishedDate参数使用
self.date = date
# 审批单关联的项目code
self.project_code = project_code
# 审批单关联的项目名
self.project_name = project_name
# 审批单状态:0-申请,1-同意,2-拒绝
self.status = status
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 审批单关联的三方成本中心ID
self.third_part_cost_center_id = third_part_cost_center_id
# 审批单关联的三方发票抬头ID
self.third_part_invoice_id = third_part_invoice_id
# 审批单可用总次数
self.times_total = times_total
# 审批单可用次数类型:1-次数不限制,2-用户可指定次数,3-管理员限制次数;如果企业没有限制审批单使用次数的需求,这个参数传1(次数不限制),同时times_total和times_used都传0即可
self.times_type = times_type
# 审批单已用次数
self.times_used = times_used
# 审批单标题
self.title = title
# 发起审批的第三方员工ID
self.user_id = user_id
# suiteKey
self.ding_suite_key = ding_suite_key
# account
self.ding_corp_id = ding_corp_id
# tokenGrantType
self.ding_token_grant_type = ding_token_grant_type
# 用车截止时间,按天管控,比如date传值2021-03-18 20:26:56、finished_date传值2021-03-30 20:26:56表示2021-03-18(含)到2021-03-30(含)之间可用车,该参数不传值情况使用date作为用车截止时间;
self.finished_date = finished_date
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.cause is not None:
result['cause'] = self.cause
if self.city is not None:
result['city'] = self.city
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.date is not None:
result['date'] = self.date
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.status is not None:
result['status'] = self.status
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.third_part_cost_center_id is not None:
result['thirdPartCostCenterId'] = self.third_part_cost_center_id
if self.third_part_invoice_id is not None:
result['thirdPartInvoiceId'] = self.third_part_invoice_id
if self.times_total is not None:
result['timesTotal'] = self.times_total
if self.times_type is not None:
result['timesType'] = self.times_type
if self.times_used is not None:
result['timesUsed'] = self.times_used
if self.title is not None:
result['title'] = self.title
if self.user_id is not None:
result['userId'] = self.user_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.finished_date is not None:
result['finishedDate'] = self.finished_date
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('cause') is not None:
self.cause = m.get('cause')
if m.get('city') is not None:
self.city = m.get('city')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('date') is not None:
self.date = m.get('date')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('thirdPartCostCenterId') is not None:
self.third_part_cost_center_id = m.get('thirdPartCostCenterId')
if m.get('thirdPartInvoiceId') is not None:
self.third_part_invoice_id = m.get('thirdPartInvoiceId')
if m.get('timesTotal') is not None:
self.times_total = m.get('timesTotal')
if m.get('timesType') is not None:
self.times_type = m.get('timesType')
if m.get('timesUsed') is not None:
self.times_used = m.get('timesUsed')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('finishedDate') is not None:
self.finished_date = m.get('finishedDate')
return self
class AddCityCarApplyResponseBody(TeaModel):
def __init__(
self,
apply_id: int = None,
):
# 商旅内部审批单ID
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class AddCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ApproveCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ApproveCityCarApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
operate_time: str = None,
remark: str = None,
status: int = None,
third_part_apply_id: str = None,
user_id: str = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_token_grant_type: int = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 审批时间
self.operate_time = operate_time
# 审批备注
self.remark = remark
# 审批结果:1-同意,2-拒绝
self.status = status
# 第三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 审批的第三方员工ID
self.user_id = user_id
# suiteKey
self.ding_suite_key = ding_suite_key
# account
self.ding_corp_id = ding_corp_id
# tokenGrantType
self.ding_token_grant_type = ding_token_grant_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.operate_time is not None:
result['operateTime'] = self.operate_time
if self.remark is not None:
result['remark'] = self.remark
if self.status is not None:
result['status'] = self.status
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.user_id is not None:
result['userId'] = self.user_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('operateTime') is not None:
self.operate_time = m.get('operateTime')
if m.get('remark') is not None:
self.remark = m.get('remark')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
return self
class ApproveCityCarApplyResponseBody(TeaModel):
def __init__(
self,
approve_result: bool = None,
):
# 审批结果
self.approve_result = approve_result
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.approve_result is not None:
result['approveResult'] = self.approve_result
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('approveResult') is not None:
self.approve_result = m.get('approveResult')
return self
class ApproveCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ApproveCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ApproveCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryCityCarApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
created_end_at: str = None,
created_start_at: str = None,
page_number: int = None,
page_size: int = None,
third_part_apply_id: str = None,
user_id: str = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 审批单创建时间小于值
self.created_end_at = created_end_at
# 审批单创建时间大于等于值
self.created_start_at = created_start_at
# 页码,要求大于等于1,默认1
self.page_number = page_number
# 每页数据量,要求大于等于1,默认20
self.page_size = page_size
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 第三方员工ID
self.user_id = user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.created_end_at is not None:
result['createdEndAt'] = self.created_end_at
if self.created_start_at is not None:
result['createdStartAt'] = self.created_start_at
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.user_id is not None:
result['userId'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('createdEndAt') is not None:
self.created_end_at = m.get('createdEndAt')
if m.get('createdStartAt') is not None:
self.created_start_at = m.get('createdStartAt')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
return self
class QueryCityCarApplyResponseBodyApplyListApproverList(TeaModel):
def __init__(
self,
note: str = None,
operate_time: str = None,
order: int = None,
status: int = None,
status_desc: str = None,
user_id: str = None,
user_name: str = None,
):
# 审批备注
self.note = note
# 审批时间
self.operate_time = operate_time
# 审批人排序值
self.order = order
# 审批状态枚举:审批状态:0-审批中,1-已同意,2-已拒绝
self.status = status
# 审批状态描述
self.status_desc = status_desc
# 审批员工ID
self.user_id = user_id
# 审批员工名
self.user_name = user_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.note is not None:
result['note'] = self.note
if self.operate_time is not None:
result['operateTime'] = self.operate_time
if self.order is not None:
result['order'] = self.order
if self.status is not None:
result['status'] = self.status
if self.status_desc is not None:
result['statusDesc'] = self.status_desc
if self.user_id is not None:
result['userId'] = self.user_id
if self.user_name is not None:
result['userName'] = self.user_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('note') is not None:
self.note = m.get('note')
if m.get('operateTime') is not None:
self.operate_time = m.get('operateTime')
if m.get('order') is not None:
self.order = m.get('order')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('statusDesc') is not None:
self.status_desc = m.get('statusDesc')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('userName') is not None:
self.user_name = m.get('userName')
return self
class QueryCityCarApplyResponseBodyApplyListItineraryList(TeaModel):
def __init__(
self,
arr_city: str = None,
arr_city_code: str = None,
arr_date: str = None,
cost_center_id: int = None,
cost_center_name: str = None,
dep_city: str = None,
dep_city_code: str = None,
dep_date: str = None,
invoice_id: int = None,
invoice_name: str = None,
itinerary_id: str = None,
project_code: str = None,
project_title: str = None,
traffic_type: int = None,
):
# 目的地城市
self.arr_city = arr_city
# 目的地城市三字码
self.arr_city_code = arr_city_code
# 到达目的地城市时间
self.arr_date = arr_date
# 商旅内部成本中心ID
self.cost_center_id = cost_center_id
# 成本中心名称
self.cost_center_name = cost_center_name
# 出发城市
self.dep_city = dep_city
# 出发城市三字码
self.dep_city_code = dep_city_code
# 出发时间
self.dep_date = dep_date
# 商旅内部发票抬头ID
self.invoice_id = invoice_id
# 发票抬头名称
self.invoice_name = invoice_name
# 商旅内部行程单ID
self.itinerary_id = itinerary_id
# 项目code
self.project_code = project_code
# 项目名称
self.project_title = project_title
# 交通方式:4-市内交通
self.traffic_type = traffic_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_city_code is not None:
result['arrCityCode'] = self.arr_city_code
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.cost_center_id is not None:
result['costCenterId'] = self.cost_center_id
if self.cost_center_name is not None:
result['costCenterName'] = self.cost_center_name
if self.dep_city is not None:
result['depCity'] = self.dep_city
if self.dep_city_code is not None:
result['depCityCode'] = self.dep_city_code
if self.dep_date is not None:
result['depDate'] = self.dep_date
if self.invoice_id is not None:
result['invoiceId'] = self.invoice_id
if self.invoice_name is not None:
result['invoiceName'] = self.invoice_name
if self.itinerary_id is not None:
result['itineraryId'] = self.itinerary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_title is not None:
result['projectTitle'] = self.project_title
if self.traffic_type is not None:
result['trafficType'] = self.traffic_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrCityCode') is not None:
self.arr_city_code = m.get('arrCityCode')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('costCenterId') is not None:
self.cost_center_id = m.get('costCenterId')
if m.get('costCenterName') is not None:
self.cost_center_name = m.get('costCenterName')
if m.get('depCity') is not None:
self.dep_city = m.get('depCity')
if m.get('depCityCode') is not None:
self.dep_city_code = m.get('depCityCode')
if m.get('depDate') is not None:
self.dep_date = m.get('depDate')
if m.get('invoiceId') is not None:
self.invoice_id = m.get('invoiceId')
if m.get('invoiceName') is not None:
self.invoice_name = m.get('invoiceName')
if m.get('itineraryId') is not None:
self.itinerary_id = m.get('itineraryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectTitle') is not None:
self.project_title = m.get('projectTitle')
if m.get('trafficType') is not None:
self.traffic_type = m.get('trafficType')
return self
class QueryCityCarApplyResponseBodyApplyList(TeaModel):
def __init__(
self,
approver_list: List[QueryCityCarApplyResponseBodyApplyListApproverList] = None,
depart_id: str = None,
depart_name: str = None,
gmt_create: str = None,
gmt_modified: str = None,
itinerary_list: List[QueryCityCarApplyResponseBodyApplyListItineraryList] = None,
status: int = None,
status_desc: str = None,
third_part_apply_id: str = None,
trip_cause: str = None,
trip_title: str = None,
user_id: str = None,
user_name: str = None,
):
# 审批单列表
self.approver_list = approver_list
# 员工所在部门ID
self.depart_id = depart_id
# 员工所在部门名
self.depart_name = depart_name
# 创建时间
self.gmt_create = gmt_create
# 最近修改时间
self.gmt_modified = gmt_modified
# 审批单关联的行程
self.itinerary_list = itinerary_list
# 审批单状态:0-申请,1-同意,2-拒绝
self.status = status
# 审批单状态:0-申请,1-同意,2-拒绝
self.status_desc = status_desc
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 申请事由
self.trip_cause = trip_cause
# 审批单标题
self.trip_title = trip_title
# 发起审批员工ID
self.user_id = user_id
# 发起审批员工名
self.user_name = user_name
def validate(self):
if self.approver_list:
for k in self.approver_list:
if k:
k.validate()
if self.itinerary_list:
for k in self.itinerary_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['approverList'] = []
if self.approver_list is not None:
for k in self.approver_list:
result['approverList'].append(k.to_map() if k else None)
if self.depart_id is not None:
result['departId'] = self.depart_id
if self.depart_name is not None:
result['departName'] = self.depart_name
if self.gmt_create is not None:
result['gmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['gmtModified'] = self.gmt_modified
result['itineraryList'] = []
if self.itinerary_list is not None:
for k in self.itinerary_list:
result['itineraryList'].append(k.to_map() if k else None)
if self.status is not None:
result['status'] = self.status
if self.status_desc is not None:
result['statusDesc'] = self.status_desc
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.trip_cause is not None:
result['tripCause'] = self.trip_cause
if self.trip_title is not None:
result['tripTitle'] = self.trip_title
if self.user_id is not None:
result['userId'] = self.user_id
if self.user_name is not None:
result['userName'] = self.user_name
return result
def from_map(self, m: dict = None):
m = m or dict()
self.approver_list = []
if m.get('approverList') is not None:
for k in m.get('approverList'):
temp_model = QueryCityCarApplyResponseBodyApplyListApproverList()
self.approver_list.append(temp_model.from_map(k))
if m.get('departId') is not None:
self.depart_id = m.get('departId')
if m.get('departName') is not None:
self.depart_name = m.get('departName')
if m.get('gmtCreate') is not None:
self.gmt_create = m.get('gmtCreate')
if m.get('gmtModified') is not None:
self.gmt_modified = m.get('gmtModified')
self.itinerary_list = []
if m.get('itineraryList') is not None:
for k in m.get('itineraryList'):
temp_model = QueryCityCarApplyResponseBodyApplyListItineraryList()
self.itinerary_list.append(temp_model.from_map(k))
if m.get('status') is not None:
self.status = m.get('status')
if m.get('statusDesc') is not None:
self.status_desc = m.get('statusDesc')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('tripCause') is not None:
self.trip_cause = m.get('tripCause')
if m.get('tripTitle') is not None:
self.trip_title = m.get('tripTitle')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('userName') is not None:
self.user_name = m.get('userName')
return self
class QueryCityCarApplyResponseBody(TeaModel):
def __init__(
self,
apply_list: List[QueryCityCarApplyResponseBodyApplyList] = None,
total: int = None,
):
# 审批单列表
self.apply_list = apply_list
# 总数
self.total = total
def validate(self):
if self.apply_list:
for k in self.apply_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['applyList'] = []
if self.apply_list is not None:
for k in self.apply_list:
result['applyList'].append(k.to_map() if k else None)
if self.total is not None:
result['total'] = self.total
return result
def from_map(self, m: dict = None):
m = m or dict()
self.apply_list = []
if m.get('applyList') is not None:
for k in m.get('applyList'):
temp_model = QueryCityCarApplyResponseBodyApplyList()
self.apply_list.append(temp_model.from_map(k))
if m.get('total') is not None:
self.total = m.get('total')
return self
class QueryCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| 33.636943
| 142
| 0.581424
| 32,704
| 0.994345
| 0
| 0
| 0
| 0
| 0
| 0
| 5,500
| 0.167224
|
b7800aafff5c8094f397d2e3b7573a73689dfdba
| 560
|
py
|
Python
|
src/while_exit.py
|
Alex9808/py101
|
18c585c1433e8ec6f5e4962e556a781e0c3c3cd5
|
[
"MIT"
] | 25
|
2018-08-14T22:13:13.000Z
|
2021-07-23T04:14:06.000Z
|
src/while_exit.py
|
Alex9808/py101
|
18c585c1433e8ec6f5e4962e556a781e0c3c3cd5
|
[
"MIT"
] | 1
|
2021-05-21T23:46:42.000Z
|
2021-05-21T23:46:42.000Z
|
src/while_exit.py
|
Alex9808/py101
|
18c585c1433e8ec6f5e4962e556a781e0c3c3cd5
|
[
"MIT"
] | 34
|
2018-07-30T20:48:17.000Z
|
2022-02-04T19:01:27.000Z
|
#!/usr/bin/env python
''' Este programa se repetirá 3 veces o hasta que se ingrese
la palabra "despedida" y desplegará sólo el número de intentos
fallidos hasta que cualquiera de los eventos ocurentradara. Al
ingresar la palabra "termina" el programa se detendrá.'''
entrada = ""
suma = 0
while suma < 3:
entrada = input("Clave:")
if entrada == "despedida":
break
elif entrada == "termina":
exit()
suma = suma + 1
print("Intento %d. \n " % suma)
print("Tuviste %d intentos fallidos." % suma)
| 29.473684
| 67
| 0.630357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 364
| 0.644248
|
b78064bab0e579a79f199c581d29dfd0023d9a67
| 1,337
|
py
|
Python
|
src/kong/db.py
|
paulgessinger/kong
|
b1e2ec0c18f432fa2419b2b0dc95ee1e391cf7a5
|
[
"MIT"
] | 3
|
2020-02-14T09:23:56.000Z
|
2020-08-24T16:19:00.000Z
|
src/kong/db.py
|
paulgessinger/kong
|
b1e2ec0c18f432fa2419b2b0dc95ee1e391cf7a5
|
[
"MIT"
] | 159
|
2019-09-16T19:17:16.000Z
|
2022-03-29T19:12:37.000Z
|
src/kong/db.py
|
paulgessinger/kong
|
b1e2ec0c18f432fa2419b2b0dc95ee1e391cf7a5
|
[
"MIT"
] | null | null | null |
"""
Singleton database instance
"""
from typing import TYPE_CHECKING, Any, List, ContextManager, Tuple, Iterable
if not TYPE_CHECKING:
from playhouse.sqlite_ext import SqliteExtDatabase, AutoIncrementField
else: # pragma: no cover
class SqliteExtDatabase:
"""
Mypy stub for the not type-hinted SqliteExtDatabase class
"""
def __init__(self, *args: Any) -> None:
"""
Type stub
:param args:
"""
...
def init(self, *args: Any) -> None:
"""
Type stub
:param args:
:return:
"""
...
def connect(self) -> None:
"""
Type stub
:return:
"""
...
def create_tables(self, tables: List[Any]) -> None:
"""
Type stub
:param tables:
:return:
"""
...
def atomic(self) -> ContextManager[None]:
"""
Type stub
:return:
"""
...
def execute_sql(self, query: str, params: Tuple[Any]) -> Iterable[Tuple[Any]]:
...
class AutoIncrementField:
"""
Type stub
"""
...
database = SqliteExtDatabase(None)
| 20.890625
| 86
| 0.449514
| 1,051
| 0.786088
| 0
| 0
| 0
| 0
| 0
| 0
| 533
| 0.398654
|
b7828d9ca80482a07e76866d18c12b99668834d3
| 2,582
|
py
|
Python
|
selenium_wrapper/factory.py
|
dfeeley/selenium-wrapper
|
f69576802e7847d120a051a2902da5750b9c6203
|
[
"MIT"
] | 1
|
2022-01-14T20:21:19.000Z
|
2022-01-14T20:21:19.000Z
|
selenium_wrapper/factory.py
|
dfeeley/selenium-wrapper
|
f69576802e7847d120a051a2902da5750b9c6203
|
[
"MIT"
] | null | null | null |
selenium_wrapper/factory.py
|
dfeeley/selenium-wrapper
|
f69576802e7847d120a051a2902da5750b9c6203
|
[
"MIT"
] | null | null | null |
import os
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
from .driver import Driver
def remote(url, browser="chrome", **kwargs):
if browser != "chrome":
raise ValueError(f"Browser {browser} not currently supported")
options = _options(**kwargs)
prefs = {
"profile.default_content_settings.popups": 0,
"profile.default_content_setting_values.automatic_downloads": 1,
"download.default_directory": "/home/seluser/Downloads",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True,
}
options.add_experimental_option("prefs", prefs)
capabilities = getattr(DesiredCapabilities, browser.upper())
driver = webdriver.Remote(url, capabilities, options=options)
return Driver(driver, **kwargs)
def local(browser="chrome", **kwargs):
if browser == "chrome":
exec = "chromedriver"
else:
raise ValueError(f"Browser {browser} not currently supported")
options = _options(**kwargs)
driver = webdriver.Chrome(exec, options=options)
if kwargs.get("headless", False):
download_dir = kwargs.get("download-dir", os.path.expanduser("~/Downloads"))
params = {"behavior": "allow", "downloadPath": download_dir}
driver.execute_cdp_cmd("Page.setDownloadBehavior", params)
return Driver(driver, **kwargs)
def _options(**kwargs):
options = Options()
options.headless = kwargs.get("headless", False)
default_user_agent = (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/96.0.4664.110 Safari/537.36"
)
user_agent = kwargs.get("user_agent", default_user_agent)
data_dir = kwargs.get(
"user_data_dir", os.path.expanduser("~/.config/google-chrome-auto")
)
window_size = kwargs.get("window_size", "2560,1440")
profile = kwargs.get("profile", "Default")
options.add_argument("disable-gpu")
options.add_argument(f"window-size={window_size}")
options.add_argument(f"user-data-dir={data_dir}")
options.add_argument(f"user-agent={user_agent}")
options.add_argument(f"profile-directory={profile}")
options.add_argument("remote-debugging-port=9222")
options.add_argument("remote-debugging-address=0.0.0.0")
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option("useAutomationExtension", False)
return options
| 39.723077
| 84
| 0.703718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 933
| 0.361348
|
b782afffa41a32debaad33cd5a697c066dc182f8
| 14,153
|
py
|
Python
|
networking_cisco/tests/unit/saf/agent/vdp/test_lldpad.py
|
arvindsharma16/networking-cisco
|
2f38c25742296f07ab80479c152504353da0a1d4
|
[
"Apache-2.0"
] | null | null | null |
networking_cisco/tests/unit/saf/agent/vdp/test_lldpad.py
|
arvindsharma16/networking-cisco
|
2f38c25742296f07ab80479c152504353da0a1d4
|
[
"Apache-2.0"
] | null | null | null |
networking_cisco/tests/unit/saf/agent/vdp/test_lldpad.py
|
arvindsharma16/networking-cisco
|
2f38c25742296f07ab80479c152504353da0a1d4
|
[
"Apache-2.0"
] | 1
|
2020-08-31T22:53:38.000Z
|
2020-08-31T22:53:38.000Z
|
# Copyrigh 2015 Cisco Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from neutron.tests import base
from networking_cisco.apps.saf.agent.vdp import lldpad
from networking_cisco.apps.saf.agent.vdp import lldpad_constants as vdp_con
from networking_cisco.apps.saf.common import dfa_sys_lib as utils
try:
OrderedDict = collections.OrderedDict
except AttributeError:
import ordereddict
OrderedDict = ordereddict.OrderedDict
class LldpadDriverTest(base.BaseTestCase):
"""A test suite to exercise the Lldpad Driver. """
def setUp(self):
'''Setup for the test scripts '''
super(LldpadDriverTest, self).setUp()
self.root_helper = 'sudo'
self.port_name = "loc_veth"
self.uplink = "eth2"
self.port_str = "loc_veth_eth2"
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
self.fill_default_vsi_params()
self._test_lldp_init()
def _test_lldp_init(self):
'''Tests the initialization '''
with mock.patch('networking_cisco.apps.saf.common.'
'utils.PeriodicTask') as period_fn:
period_obj = period_fn.return_value
parent = mock.MagicMock()
parent.attach_mock(period_obj.run, 'run')
self.lldpad = lldpad.LldpadDriver(self.port_str, self.uplink,
self.root_helper)
expected_calls = [mock.call.run()]
parent.assert_has_calls(expected_calls)
def test_init(self):
'''Place hlder for init '''
pass
def _test_enable_lldp(self, is_ncb=True):
'''Tests the routine the enables EVB cfg '''
self.lldpad.enable_lldp()
if is_ncb is True:
self.execute.assert_called_with(
["lldptool", "-L", "-i", self.port_str, "-g", "ncb",
"adminStatus=rxtx"], root_helper=self.root_helper)
def test_enable_lldp(self):
'''Tests the routine the enables LLDP cfg '''
self._test_enable_lldp(is_ncb=True)
def test_enable_evb(self):
'''Top level routine for EVB cfg test '''
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.lldpad.enable_evb()
expected_calls = [mock.call.execute(["lldptool", "-T", "-i",
self.port_str, "-g", "ncb", "-V",
"evb", "enableTx=yes"],
root_helper=self.root_helper),
mock.call.execute(["lldptool", "-T", "-i",
self.port_str, "-g", "ncb", "-V",
"evb", "-c", "evbgpid=yes"],
root_helper=self.root_helper)]
parent.assert_has_calls(expected_calls)
def fill_default_vsi_params(self):
'''Mock VSI Params '''
self.uuid = "00000000-1111-2222-3333-444455556666"
self.vsiid = self.uuid
self.mgrid = 0
self.typeid = 0
self.typeidver = 0
self.gid = 20000
self.mac = "00:11:22:33:44:55"
self.vlan = 0
self.mgrid_str = "mgrid2=0"
self.typeid_str = "typeid=0"
self.typeidver_str = "typeidver=0"
self.vsiid_str = "uuid=00000000-1111-2222-3333-444455556666"
self.filter_str = "filter=0-00:11:22:33:44:55-20000"
self.mode_str = "mode=assoc"
def _test_vnic_assert(self, test_vlan, vlan_ret, filter_str, new_nwk,
parent, is_rest=0):
'''assert tests called by other test functions '''
if new_nwk:
if is_rest == 1:
expected_calls = [mock.call.execute(["vdptool", "-t", "-i",
self.port_str, "-R",
"-V", "assoc", "-c",
self.mode_str, "-c",
self.mgrid_str, "-c",
self.typeid_str, "-c",
self.typeidver_str, "-c",
self.vsiid_str],
root_helper=(
self.root_helper))]
else:
expected_calls = [mock.call.execute(["vdptool", "-T", "-i",
self.port_str, "-W",
"-V", "assoc", "-c",
self.mode_str, "-c",
self.mgrid_str, "-c",
self.typeid_str, "-c",
self.typeidver_str, "-c",
self.vsiid_str, "-c",
"hints=none", "-c",
filter_str],
root_helper=(
self.root_helper))]
self.assertEqual(vlan_ret, test_vlan)
self.assertEqual(test_vlan,
self.lldpad.vdp_vif_map[self.uuid].
get('vdp_vlan'))
else:
expected_calls = [mock.call.execute(["vdptool", "-T", "-i",
self.port_str, "-V", "assoc",
"-c", self.mode_str,
"-c", self.mgrid_str, "-c",
self.typeid_str, "-c",
self.typeidver_str, "-c",
self.vsiid_str, "-c",
"hints=none", "-c",
filter_str],
root_helper=self.root_helper)]
parent.assert_has_calls(expected_calls)
self.assertEqual(self.mgrid,
self.lldpad.vdp_vif_map[self.uuid].get('mgrid'))
self.assertEqual(self.typeid,
self.lldpad.vdp_vif_map[self.uuid].get('typeid'))
self.assertEqual(self.typeidver,
self.lldpad.vdp_vif_map[self.uuid].get('typeid_ver'))
self.assertEqual(self.vsiid,
self.lldpad.vdp_vif_map[self.uuid].get('vsiid'))
self.assertEqual(vdp_con.VDP_FILTER_GIDMACVID,
self.lldpad.vdp_vif_map[self.uuid].get('filter_frmt'))
self.assertEqual(self.gid,
self.lldpad.vdp_vif_map[self.uuid].get('gid'))
self.assertEqual(self.mac,
self.lldpad.vdp_vif_map[self.uuid].get('mac'))
def test_vdp_port_up_new_nwk(self):
'''Tests the case when a VM comes for a new network '''
expected_vlan = 3003
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.execute.return_value = ("Response from VDP\n\tmode = assoc\n\t"
"mgrid2 = 0\n\ttypeid = 0\n\t"
"typeidver = 0\n\tuuid = 00000000-1111-"
"2222-3333-444455556666\n\t"
"filter = 3003-00:12:22:33:44:55-0\n")
self.lldpad.send_vdp_query_msg = mock.Mock()
vlan_ret = self.lldpad.send_vdp_vnic_up(port_uuid=self.uuid,
vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac,
new_network=True)
self._test_vnic_assert(expected_vlan, vlan_ret, self.filter_str, True,
parent)
def test_vdp_port_up_new_nwk_after_restart(self):
'''Tests the case when a VM comes for a new network after restart '''
expected_vlan = 3003
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.execute.return_value = ("M000080c4C3010000001509LLDPLeth5"
"020000000304mode0005assoc06mgrid2"
"0001006typeid0001009typeidver0001004"
"uuid002400000000-1111-2222-3333-44445555"
"6666\nR00C3010000001509LLDPLeth500000003"
"010504mode0005assoc06mgrid20001006typeid"
"0001009typeidver0001004uuid00000000-1111"
"-2222-3333-44445555666605hints0001006"
"filter001c3003-00:12:22:33:44:55-2000003"
"oui006105cisco07vm_name000bFW_SRVC_RTR07"
"vm_uuid002467f338a6-0925-42aa-b2df-e8114"
"e9fd0da09ipv4_addr00020l\n")
vlan_ret = self.lldpad.send_vdp_vnic_up(port_uuid=self.uuid,
vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac,
new_network=True)
self._test_vnic_assert(expected_vlan, vlan_ret, self.filter_str, True,
parent, is_rest=1)
def test_vdp_port_up_new_nwk_invalid_vlan(self):
'''
Tests the case when an invalid VLAN is rteturned for a VM that comes
up for a new network
'''
expected_vlan = -1
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.execute.return_value = "\nReturn from vsievt -11"
vlan_ret = self.lldpad.send_vdp_vnic_up(port_uuid=self.uuid,
vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac,
new_network=True)
self._test_vnic_assert(expected_vlan, vlan_ret, self.filter_str, True,
parent)
def test_vdp_port_up_old_nwk(self):
'''Tests the case when a VM comes for an existing network '''
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
self.execute.return_value = ("Response from VDP\n\tmode = assoc\n\t"
"mgrid2 = 0\n\ttypeid = 0\n\t"
"typeidver = 0\n\tuuid = 00000000-1111-"
"2222-3333-444455556666\n\t"
"filter = 3003-00:12:22:33:44:55-0\n")
filter_str = "filter=0-00:11:22:33:44:55-20000"
stored_vlan = 3003
self.lldpad.send_vdp_vnic_up(port_uuid=self.uuid, vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac, vlan=0,
new_network=False)
self._test_vnic_assert(stored_vlan,
self.lldpad.vdp_vif_map[self.uuid].
get('vdp_vlan'), filter_str, False, parent)
def test_vdp_port_down(self):
'''Tests the case when a VM goes down '''
parent = mock.MagicMock()
parent.attach_mock(self.execute, 'execute')
filter_str = "filter=100-00:11:22:33:44:55-20000"
stored_vlan = 100
mode_str = "mode=deassoc"
self.lldpad.send_vdp_vnic_down(port_uuid=self.uuid, vsiid=self.vsiid,
mgrid=self.mgrid,
typeid=self.typeid,
typeid_ver=self.typeidver,
gid=self.gid,
mac=self.mac, vlan=stored_vlan)
self.execute.assert_called_with(
["vdptool", "-T", "-i", self.port_str,
"-V", "deassoc", "-c", mode_str, "-c", self.mgrid_str,
"-c", self.typeid_str, "-c", self.typeidver_str,
"-c", self.vsiid_str, "-c", "hints=none",
"-c", filter_str], root_helper=self.root_helper)
self.assertNotIn(self.uuid, self.lldpad.vdp_vif_map)
| 50.366548
| 79
| 0.466191
| 13,122
| 0.927153
| 0
| 0
| 0
| 0
| 0
| 0
| 3,057
| 0.215997
|
b7836e4485ed6acdd7e26cba6a9e54fccf62bd20
| 15,656
|
py
|
Python
|
py/prospect/viewer/cds.py
|
segasai/prospect
|
0237fad5b04f5a982ba4178320e1ca5bb5fa408f
|
[
"BSD-3-Clause"
] | 4
|
2018-10-22T21:12:43.000Z
|
2019-07-11T20:08:12.000Z
|
py/prospect/viewer/cds.py
|
segasai/prospect
|
0237fad5b04f5a982ba4178320e1ca5bb5fa408f
|
[
"BSD-3-Clause"
] | 54
|
2019-12-07T08:24:53.000Z
|
2022-02-25T10:17:59.000Z
|
py/prospect/viewer/cds.py
|
segasai/prospect
|
0237fad5b04f5a982ba4178320e1ca5bb5fa408f
|
[
"BSD-3-Clause"
] | 5
|
2019-12-20T21:47:40.000Z
|
2021-01-20T23:54:43.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
===================
prospect.viewer.cds
===================
Class containing all bokeh's ColumnDataSource objects needed in viewer.py
"""
import numpy as np
from pkg_resources import resource_filename
import bokeh.plotting as bk
from bokeh.models import ColumnDataSource
_specutils_imported = True
try:
from specutils import Spectrum1D, SpectrumList
except ImportError:
_specutils_imported = False
from ..coaddcam import coaddcam_prospect
from ..utilities import supported_desitarget_masks, vi_file_fields
def _airtovac(w):
"""Convert air wavelengths to vacuum wavelengths. Don't convert less than 2000 Å.
Parameters
----------
w : :class:`float`
Wavelength [Å] of the line in air.
Returns
-------
:class:`float`
Wavelength [Å] of the line in vacuum.
"""
if w < 2000.0:
return w;
vac = w
for iter in range(2):
sigma2 = (1.0e4/vac)*(1.0e4/vac)
fact = 1.0 + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/(57.362 - sigma2)
vac = w*fact
return vac
class ViewerCDS(object):
"""
Encapsulates Bokeh ColumnDataSource objects to be passed to js callback functions.
"""
def __init__(self):
self.cds_spectra = None
self.cds_median_spectra = None
self.cds_coaddcam_spec = None
self.cds_model = None
self.cds_model_2ndfit = None
self.cds_othermodel = None
self.cds_metadata = None
def load_spectra(self, spectra, with_noise=True):
""" Creates column data source for observed spectra """
self.cds_spectra = list()
is_desispec = False
if _specutils_imported and isinstance(spectra, SpectrumList):
s = spectra
bands = spectra.bands
elif _specutils_imported and isinstance(spectra, Spectrum1D):
s = [spectra]
bands = ['coadd']
else : # Assume desispec Spectra obj
is_desispec = True
s = spectra
bands = spectra.bands
for j, band in enumerate(bands):
input_wave = s.wave[band] if is_desispec else s[j].spectral_axis.value
input_nspec = spectra.num_spectra() if is_desispec else s[j].flux.shape[0]
cdsdata = dict(
origwave = input_wave.copy(),
plotwave = input_wave.copy(),
)
for i in range(input_nspec):
key = 'origflux'+str(i)
input_flux = spectra.flux[band][i] if is_desispec else s[j].flux.value[i, :]
cdsdata[key] = input_flux.copy()
if with_noise :
key = 'orignoise'+str(i)
input_ivar = spectra.ivar[band][i] if is_desispec else s[j].uncertainty.array[i, :]
noise = np.zeros(len(input_ivar))
w, = np.where( (input_ivar > 0) )
noise[w] = 1/np.sqrt(input_ivar[w])
cdsdata[key] = noise
cdsdata['plotflux'] = cdsdata['origflux0']
if with_noise :
cdsdata['plotnoise'] = cdsdata['orignoise0']
self.cds_spectra.append( ColumnDataSource(cdsdata, name=band) )
def compute_median_spectra(self, spectra):
""" Stores the median value for each spectrum into CDS.
Simple concatenation of all values from different bands.
"""
cdsdata = dict(median=[])
for i in range(spectra.num_spectra()):
flux_array = np.concatenate( tuple([spectra.flux[band][i] for band in spectra.bands]) )
w, = np.where( ~np.isnan(flux_array) )
if len(w)==0 :
cdsdata['median'].append(1)
else :
cdsdata['median'].append(np.median(flux_array[w]))
self.cds_median_spectra = ColumnDataSource(cdsdata)
def init_coaddcam_spec(self, spectra, with_noise=True):
""" Creates column data source for camera-coadded observed spectra
Do NOT store all coadded spectra in CDS obj, to reduce size of html files
Except for the first spectrum, coaddition is done later in javascript
"""
coadd_wave, coadd_flux, coadd_ivar = coaddcam_prospect(spectra)
cds_coaddcam_data = dict(
origwave = coadd_wave.copy(),
plotwave = coadd_wave.copy(),
plotflux = coadd_flux[0,:].copy(),
plotnoise = np.ones(len(coadd_wave))
)
if with_noise :
w, = np.where( (coadd_ivar[0,:] > 0) )
cds_coaddcam_data['plotnoise'][w] = 1/np.sqrt(coadd_ivar[0,:][w])
self.cds_coaddcam_spec = ColumnDataSource(cds_coaddcam_data)
def init_model(self, model, second_fit=False):
""" Creates a CDS for model spectrum """
mwave, mflux = model
cdsdata = dict(
origwave = mwave.copy(),
plotwave = mwave.copy(),
plotflux = np.zeros(len(mwave)),
)
for i in range(len(mflux)):
key = 'origflux'+str(i)
cdsdata[key] = mflux[i]
cdsdata['plotflux'] = cdsdata['origflux0']
if second_fit:
self.cds_model_2ndfit = ColumnDataSource(cdsdata)
else:
self.cds_model = ColumnDataSource(cdsdata)
def init_othermodel(self, zcatalog):
""" Initialize CDS for the 'other model' curve, from the best fit """
self.cds_othermodel = ColumnDataSource({
'plotwave' : self.cds_model.data['plotwave'],
'origwave' : self.cds_model.data['origwave'],
'origflux' : self.cds_model.data['origflux0'],
'plotflux' : self.cds_model.data['origflux0'],
'zref' : zcatalog['Z'][0]+np.zeros(len(self.cds_model.data['origflux0'])) # Track z reference in model
})
def load_metadata(self, spectra, mask_type=None, zcatalog=None, survey='DESI'):
""" Creates column data source for target-related metadata,
from fibermap, zcatalog and VI files
"""
if survey == 'DESI':
nspec = spectra.num_spectra()
# Optional metadata:
fibermap_keys = ['HPXPIXEL', 'MORPHTYPE', 'CAMERA',
'COADD_NUMEXP', 'COADD_EXPTIME',
'COADD_NUMNIGHT', 'COADD_NUMTILE']
# Optional metadata, will check matching FIRST/LAST/NUM keys in fibermap:
special_fm_keys = ['FIBER', 'NIGHT', 'EXPID', 'TILEID']
# Mandatory keys if zcatalog is set:
self.zcat_keys = ['Z', 'SPECTYPE', 'SUBTYPE', 'ZERR', 'ZWARN', 'DELTACHI2']
# Mandatory metadata:
self.phot_bands = ['G','R','Z', 'W1', 'W2']
supported_masks = supported_desitarget_masks
# Galactic extinction coefficients:
# - Wise bands from https://github.com/dstndstn/tractor/blob/master/tractor/sfd.py
# - Other bands from desiutil.dust (updated coefficients Apr 2021,
# matching https://desi.lbl.gov/trac/wiki/ImagingStandardBandpass)
R_extinction = {'W1':0.184, 'W2':0.113, 'W3':0.0241, 'W4':0.00910,
'G_N':3.258, 'R_N':2.176, 'Z_N':1.199,
'G_S':3.212, 'R_S':2.164, 'Z_S':1.211}
elif survey == 'SDSS':
nspec = spectra.flux.shape[0]
# Mandatory keys if zcatalog is set:
self.zcat_keys = ['Z', 'CLASS', 'SUBCLASS', 'Z_ERR', 'ZWARNING', 'RCHI2DIFF']
# Mandatory metadata:
self.phot_bands = ['u', 'g', 'r', 'i', 'z']
supported_masks = ['PRIMTARGET', 'SECTARGET',
'BOSS_TARGET1', 'BOSS_TARGET2',
'ANCILLARY_TARGET1', 'ANCILLARY_TARGET2',
'EBOSS_TARGET0', 'EBOSS_TARGET1', 'EBOSS_TARGET2']
else:
raise ValueError('Wrong survey')
self.cds_metadata = ColumnDataSource()
#- Generic metadata
if survey == 'DESI':
#- Special case for targetids: No int64 in js !!
self.cds_metadata.add([str(x) for x in spectra.fibermap['TARGETID']], name='TARGETID')
#- "Special" keys: check for FIRST/LAST/NUM
for fm_key in special_fm_keys:
use_first_last_num = False
if all([ (x+fm_key in spectra.fibermap.keys()) for x in ['FIRST_','LAST_','NUM_'] ]):
if np.any(spectra.fibermap['NUM_'+fm_key] > 1) : # if NUM==1, use fm_key only
use_first_last_num = True
self.cds_metadata.add(spectra.fibermap['FIRST_'+fm_key], name='FIRST_'+fm_key)
self.cds_metadata.add(spectra.fibermap['LAST_'+fm_key], name='LAST_'+fm_key)
self.cds_metadata.add(spectra.fibermap['NUM_'+fm_key], name='NUM_'+fm_key)
if (not use_first_last_num) and fm_key in spectra.fibermap.keys():
# Do not load placeholder metadata:
if not (np.all(spectra.fibermap[fm_key]==0) or np.all(spectra.fibermap[fm_key]==-1)):
self.cds_metadata.add(spectra.fibermap[fm_key], name=fm_key)
#- "Normal" keys
for fm_key in fibermap_keys:
# Arbitrary choice:
if fm_key == 'COADD_NUMEXP' and 'NUM_EXPID' in self.cds_metadata.data.keys():
continue
if fm_key == 'COADD_NUMNIGHT' and 'NUM_NIGHT' in self.cds_metadata.data.keys():
continue
if fm_key == 'COADD_NUMTILE' and 'NUM_TILEID' in self.cds_metadata.data.keys():
continue
if fm_key in spectra.fibermap.keys():
if not (np.all(spectra.fibermap[fm_key]==0) or np.all(spectra.fibermap[fm_key]==-1)):
self.cds_metadata.add(spectra.fibermap[fm_key], name=fm_key)
elif survey == 'SDSS':
#- Set 'TARGETID' name to OBJID for convenience
self.cds_metadata.add([str(x.tolist()) for x in spectra.meta['plugmap']['OBJID']], name='TARGETID')
#- Photometry
for i, bandname in enumerate(self.phot_bands) :
if survey == 'SDSS':
mag = spectra.meta['plugmap']['MAG'][:, i]
else :
mag = np.zeros(nspec)
flux = spectra.fibermap['FLUX_'+bandname]
extinction = np.ones(len(flux))
if ('MW_TRANSMISSION_'+bandname) in spectra.fibermap.keys():
extinction = spectra.fibermap['MW_TRANSMISSION_'+bandname]
elif ('EBV' in spectra.fibermap.keys()) and (bandname.upper() in ['W1','W2','W3','W4']):
extinction = 10**(- R_extinction[bandname.upper()] * spectra.fibermap['EBV'])
elif all(x in spectra.fibermap.keys() for x in ['EBV','PHOTSYS']) and (bandname.upper() in ['G','R','Z']):
for photsys in ['N', 'S']:
wphot, = np.where(spectra.fibermap['PHOTSYS'] == photsys)
a_band = R_extinction[bandname.upper()+"_"+photsys] * spectra.fibermap['EBV'][wphot]
extinction[wphot] = 10**(-a_band / 2.5)
w, = np.where( (flux>0) & (extinction>0) )
mag[w] = -2.5*np.log10(flux[w]/extinction[w])+22.5
self.cds_metadata.add(mag, name='mag_'+bandname)
#- Targeting masks
if mask_type is not None:
if survey == 'DESI':
if mask_type not in spectra.fibermap.keys():
mask_candidates = [x for x in spectra.fibermap.keys() if '_TARGET' in x]
raise ValueError(mask_type+" is not in spectra.fibermap.\n Hints of available masks: "+(' '.join(mask_candidates)))
mask_used = supported_masks[mask_type]
target_bits = spectra.fibermap[mask_type]
target_info = [ ' '.join(mask_used.names(x)) for x in target_bits ]
elif survey == 'SDSS':
assert mask_type in supported_masks
target_info = [ mask_type + ' (DUMMY)' for x in spectra.meta['plugmap'] ] # placeholder
self.cds_metadata.add(target_info, name='Targeting masks')
#- Software versions
#- TODO : get template version (from zcatalog...)
if survey == 'SDSS':
spec_version = 'SDSS'
else :
spec_version = '0'
for xx,yy in spectra.meta.items() :
if yy=="desispec" : spec_version = spectra.meta[xx.replace('NAM','VER')]
self.cds_metadata.add([spec_version for i in range(nspec)], name='spec_version')
redrock_version = ["-1" for i in range(nspec)]
if zcatalog is not None:
if 'RRVER' in zcatalog.keys(): redrock_version = zcatalog['RRVER'].data
self.cds_metadata.add(redrock_version, name='redrock_version')
self.cds_metadata.add(np.zeros(nspec)-1, name='template_version')
#- Redshift fit
if zcatalog is not None:
for zcat_key in self.zcat_keys:
if 'TYPE' in zcat_key or 'CLASS' in zcat_key:
data = zcatalog[zcat_key].astype('U{0:d}'.format(zcatalog[zcat_key].dtype.itemsize))
else :
data = zcatalog[zcat_key]
self.cds_metadata.add(data, name=zcat_key)
#- VI informations
default_vi_info = [ (x[1],x[3]) for x in vi_file_fields if x[0][0:3]=="VI_" ]
for vi_key, vi_value in default_vi_info:
self.cds_metadata.add([vi_value for i in range(nspec)], name=vi_key)
def load_spectral_lines(self, z=0):
line_data = dict(
restwave = [],
plotwave = [],
name = [],
longname = [],
plotname = [],
emission = [],
major = [],
#y = []
)
for line_category in ('emission', 'absorption'):
# encoding=utf-8 is needed to read greek letters
line_array = np.genfromtxt(resource_filename('prospect', "data/{0}_lines.txt".format(line_category)),
delimiter=",",
dtype=[("name", "|U20"),
("longname", "|U20"),
("wavelength", float),
("vacuum", bool),
("major", bool)],
encoding='utf-8')
vacuum_wavelengths = line_array['wavelength']
w, = np.where(line_array['vacuum']==False)
vacuum_wavelengths[w] = np.array([_airtovac(wave) for wave in line_array['wavelength'][w]])
line_data['restwave'].extend(vacuum_wavelengths)
line_data['plotwave'].extend(vacuum_wavelengths * (1+z))
line_data['name'].extend(line_array['name'])
line_data['longname'].extend(line_array['longname'])
line_data['plotname'].extend(line_array['name'])
emission_flag = True if line_category=='emission' else False
line_data['emission'].extend([emission_flag for row in line_array])
line_data['major'].extend(line_array['major'])
self.cds_spectral_lines = ColumnDataSource(line_data)
| 45.37971
| 135
| 0.551993
| 14,500
| 0.925985
| 0
| 0
| 0
| 0
| 0
| 0
| 3,780
| 0.241395
|
b785c28b83b4fcf0dd190a336d9e93908aec9ec2
| 4,865
|
py
|
Python
|
tests/test_32_read_registration.py
|
jschlyter/oidcendpoint
|
3192e6043f00b22809f63329893c50bedbdeafbc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_32_read_registration.py
|
jschlyter/oidcendpoint
|
3192e6043f00b22809f63329893c50bedbdeafbc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_32_read_registration.py
|
jschlyter/oidcendpoint
|
3192e6043f00b22809f63329893c50bedbdeafbc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: latin-1 -*-
import json
import pytest
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.read_registration import RegistrationRead
from oidcendpoint.oidc.registration import Registration
from oidcendpoint.oidc.token import AccessToken
from oidcendpoint.oidc.userinfo import UserInfo
from oidcmsg.oidc import RegistrationRequest
KEYDEFS = [
{"type": "RSA", "key": "", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
]
RESPONSE_TYPES_SUPPORTED = [
["code"],
["token"],
["id_token"],
["code", "token"],
["code", "id_token"],
["id_token", "token"],
["code", "token", "id_token"],
["none"],
]
CAPABILITIES = {
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code",
"implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer",
"refresh_token",
],
}
msg = {
"application_type": "web",
"redirect_uris": [
"https://client.example.org/callback",
"https://client.example.org/callback2",
],
"client_name": "My Example",
"client_name#ja-Jpan-JP": "クライアント名",
"subject_type": "pairwise",
"token_endpoint_auth_method": "client_secret_basic",
"jwks_uri": "https://client.example.org/my_public_keys.jwks",
"userinfo_encrypted_response_alg": "RSA1_5",
"userinfo_encrypted_response_enc": "A128CBC-HS256",
"contacts": ["ve7jtb@example.org", "mary@example.org"],
"request_uris": [
"https://client.example.org/rf.txt#qpXaRLh_n93TT",
"https://client.example.org/rf.txt",
],
"post_logout_redirect_uris": [
"https://rp.example.com/pl?foo=bar",
"https://rp.example.com/pl",
],
}
CLI_REQ = RegistrationRequest(**msg)
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_endpoint(self):
conf = {
"issuer": "https://example.com/",
"password": "mycket hemligt",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": CAPABILITIES,
"jwks": {"key_defs": KEYDEFS, "uri_path": "static/jwks.json"},
"endpoint": {
"registration": {
"path": "registration",
"class": Registration,
"kwargs": {"client_auth_method": None},
},
"registration_api": {
"path": "registration_api",
"class": RegistrationRead,
"kwargs": {"client_authn_method": ["bearer_header"]},
},
"authorization": {
"path": "authorization",
"class": Authorization,
"kwargs": {},
},
"token": {
"path": "token",
"class": AccessToken,
"kwargs": {
"client_authn_method": [
"client_secret_post",
"client_secret_basic",
"client_secret_jwt",
"private_key_jwt",
]
},
},
"userinfo": {"path": "userinfo", "class": UserInfo, "kwargs": {}},
},
"template_dir": "template",
}
endpoint_context = EndpointContext(conf)
self.registration_endpoint = endpoint_context.endpoint["registration"]
self.registration_api_endpoint = endpoint_context.endpoint["registration_read"]
def test_do_response(self):
_req = self.registration_endpoint.parse_request(CLI_REQ.to_json())
_resp = self.registration_endpoint.process_request(request=_req)
msg = self.registration_endpoint.do_response(**_resp)
assert isinstance(msg, dict)
_msg = json.loads(msg["response"])
assert _msg
_api_req = self.registration_api_endpoint.parse_request(
"client_id={}".format(_resp["response_args"]["client_id"]),
auth="Bearer {}".format(
_resp["response_args"]["registration_access_token"]
),
)
assert set(_api_req.keys()) == {"client_id"}
_info = self.registration_api_endpoint.process_request(request=_api_req)
assert set(_info.keys()) == {"response_args"}
assert _info["response_args"] == _resp["response_args"]
_endp_response = self.registration_api_endpoint.do_response(_info)
assert set(_endp_response.keys()) == {"response", "http_headers"}
assert ("Content-type", "application/json") in _endp_response["http_headers"]
| 35.510949
| 87
| 0.567523
| 3,013
| 0.617545
| 0
| 0
| 1,881
| 0.38553
| 0
| 0
| 1,850
| 0.379176
|
b786deb138ef01a31f9f64e2908250e1c39d81e6
| 253
|
py
|
Python
|
thippiproject/modelapp/admin.py
|
Anandgowda18/djangocomplete
|
c9eebe7834e404c73deca295289142a1e95ab573
|
[
"Apache-2.0"
] | null | null | null |
thippiproject/modelapp/admin.py
|
Anandgowda18/djangocomplete
|
c9eebe7834e404c73deca295289142a1e95ab573
|
[
"Apache-2.0"
] | null | null | null |
thippiproject/modelapp/admin.py
|
Anandgowda18/djangocomplete
|
c9eebe7834e404c73deca295289142a1e95ab573
|
[
"Apache-2.0"
] | 1
|
2021-08-31T10:20:49.000Z
|
2021-08-31T10:20:49.000Z
|
from django.contrib import admin
from modelapp.models import Project
# Register your models here.
class Projectadmin(admin.ModelAdmin):
list_display = ['startdate','enddate','name','assignedto','priority']
admin.site.register(Project,Projectadmin)
| 31.625
| 73
| 0.786561
| 111
| 0.438735
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.300395
|
b786e3a7dfda3169fee4e233a099bad862bcb4ab
| 621
|
py
|
Python
|
2013.6/2015.6.py
|
luisalvaradoar/olimpiada.ct
|
aea479116036abe1c9cca3b482fed61f275ed319
|
[
"Apache-2.0"
] | null | null | null |
2013.6/2015.6.py
|
luisalvaradoar/olimpiada.ct
|
aea479116036abe1c9cca3b482fed61f275ed319
|
[
"Apache-2.0"
] | null | null | null |
2013.6/2015.6.py
|
luisalvaradoar/olimpiada.ct
|
aea479116036abe1c9cca3b482fed61f275ed319
|
[
"Apache-2.0"
] | null | null | null |
def transaccion(retiro, saldo):
if retiro % 5 != 0:
return(saldo)
elif (saldo - retiro) < 0:
return(saldo)
elif saldo == retiro:
return(saldo)
else:
return(saldo - retiro - 0.5)
def main():
entrada = open("input.txt","r")
salida = open("output.txt","w")
T = int(entrada.readline())
for i in range(T):
tran = entrada.readline().split(' ')
M = float(tran[0])
S = float(tran[1])
res = transaccion(M, S)
salida.write("Caso #{}: {} \n".format(i + 1, res))
entrada.close()
salida.close()
main()
#15 min
| 19.40625
| 58
| 0.516908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.090177
|
b78725b75722098cca7515b9adddc2dea857de17
| 538
|
py
|
Python
|
hash.py
|
STR-Coding-Club/blockchain-demo
|
fae29a52d97c7595fa1e9d99527c4b034b721b8f
|
[
"MIT"
] | null | null | null |
hash.py
|
STR-Coding-Club/blockchain-demo
|
fae29a52d97c7595fa1e9d99527c4b034b721b8f
|
[
"MIT"
] | null | null | null |
hash.py
|
STR-Coding-Club/blockchain-demo
|
fae29a52d97c7595fa1e9d99527c4b034b721b8f
|
[
"MIT"
] | null | null | null |
import hashlib, json
# def hash(to_hash):
# h = hashlib.md5()
# h.update(bytes(str(to_hash), 'utf-8'))
# return h.hexdigest()
def hash(to_hash, proof_of_work):
s = ''.join(str(_) for _ in (to_hash[:2])) #Add block number and previous hash to string
s += ''.join(str(_) for _ in (to_hash[2])) #Add list of transactions to string
if proof_of_work:
#if the hash function is a proof of work
s += str(proof_of_work)
hash = 32
for c in s:
hash = (hash+ord(c)) % 256
return hash
| 24.454545
| 92
| 0.60223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.44052
|
b78782c19f15096aec39ba8d508066aa4591ae37
| 1,008
|
py
|
Python
|
covid-modeling-master/generate_ma_csv.py
|
rdkap42/caedus-covid
|
f64a833bdf386708fcb9394f94026c48f8d474ee
|
[
"MIT"
] | 10
|
2020-03-17T21:21:50.000Z
|
2020-04-30T02:30:47.000Z
|
covid-modeling-master/generate_ma_csv.py
|
rdkap42/caedus-covid
|
f64a833bdf386708fcb9394f94026c48f8d474ee
|
[
"MIT"
] | 5
|
2020-03-17T04:39:03.000Z
|
2021-04-30T21:11:14.000Z
|
covid-modeling-master/generate_ma_csv.py
|
rdkap42/caedus-covid
|
f64a833bdf386708fcb9394f94026c48f8d474ee
|
[
"MIT"
] | null | null | null |
import logging
import time
import numpy as np
from eda import ma_data, tx_data
from sir_fitting_us import seir_experiment, make_csv_from_tx_traj
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.info("Fitting model.")
# initial values taken from previous fit, used to seed MH sampler efficiently.
x0 = np.array([ 0.393, -2.586, -3.241, -5.874, -24.999])
# ma_traj = seir_experiment(ma_data, x0, iterations=10000)
tx_traj = seir_experiment(tx_data, x0, iterations=10000)
# mean_ll = np.mean([ll for (x, ll) in ma_traj])
mean_ll = np.mean([ll for (x, ll) in tx_traj])
logger.info("Model fitting finished with mean log-likelihood: {}".format(mean_ll))
if mean_ll < -2000:
raise AssertionError(
"""Mean log-likelihood {} less than threshold of
-20. This is probably an error.""".format(mean_ll)
)
underscored_time = time.ctime().replace(" ", "_")
fname = "ma_seir_output_{}.csv".format(underscored_time)
make_csv_from_tx_traj(tx_traj, tx_data, fname)
| 31.5
| 82
| 0.727183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 374
| 0.371032
|
b7886dbd5b5bd5591584039afc3ce54cfdba530a
| 6,877
|
py
|
Python
|
For_Simulator/learning/plot_gmm_nd.py
|
a-taniguchi/CSL-BGM
|
64bd803289e55b76a219c02ea040325a8a5b949e
|
[
"MIT"
] | 1
|
2018-09-27T12:19:05.000Z
|
2018-09-27T12:19:05.000Z
|
learning/plot_gmm_nd.py
|
neuronalX/CSL-BGM
|
2fc66611928783c9b65c675ec84d9c06e0b6cd8a
|
[
"MIT"
] | null | null | null |
learning/plot_gmm_nd.py
|
neuronalX/CSL-BGM
|
2fc66611928783c9b65c675ec84d9c06e0b6cd8a
|
[
"MIT"
] | 1
|
2018-09-27T12:19:21.000Z
|
2018-09-27T12:19:21.000Z
|
#coding:utf-8
#gaussian plot (position category)
#Akira Taniguchi 2016/06/16
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from __init__ import *
from numpy.random import multinomial,uniform,dirichlet
from scipy.stats import multivariate_normal,invwishart,rv_discrete
trialname = "testss"#raw_input("trialname?(folder) >")
start = "1"#raw_input("start number?>")
end = "40"#raw_input("end number?>")
filename = raw_input("learning trial name?>")#"001"#
sn = int(start)
en = int(end)
Data = int(en) - int(sn) +1
foldername = datafolder + trialname+"("+str(sn).zfill(3)+"-"+str(en).zfill(3)+")"
Mu_p = [ np.array([0 for i in xrange(dim_p)]) for k in xrange(Kp) ]
Sig_p = [ np.eye(dim_p)*sig_p_init for k in xrange(Kp) ]
#p_dm = [[[-0.3945, 0.0165]], [[-0.3555, -0.006], [-0.336, 0.18]], [[-0.438, -0.0315], [-0.315, 0.0225], [-0.2355, 0.18]], [[-0.453, -0.018], [-0.3, -0.1005], [-0.258, -0.0255]], [[-0.438, 0.036], [-0.318, 0.1875], [-0.3, 0.0795]], [[-0.5535, 0.0675], [-0.336, -0.0465]], [[-0.3885, 0.0555], [-0.3465, -0.126]], [[-0.3555, -0.1425], [-0.324, -0.039], [-0.273, 0.0825]], [[-0.3885, 0.135]], [[-0.285, -0.0135]], [[-0.5265, 0.045], [-0.33, 0.18], [-0.2685, 0.0165]], [[-0.453, 0.015], [-0.3795, 0.231]], [[-0.3825, -0.231]], [[-0.327, -0.18], [-0.309, -0.0075]], [[-0.3735, -0.1455]], [[-0.2685, -0.0135]], [[-0.438, 0.033], [-0.36, 0.204], [-0.2955, 0.0855]], [[-0.45, 0.048]], [[-0.447, -0.006], [-0.3735, 0.1785]], [[-0.4005, 0.1755], [-0.2655, -0.0705]]]
p_temp = []
#for d in xrange(D):
# p_temp = p_temp + p_dm[d]
#[[-0.319936213, 0.117489433],[-0.345566772, -0.00810185],[-0.362990185, -0.042447971],[-0.277759177, 0.083363745]]
#Sig_p = [[] , [], [] ,[]]
#Sig_p[0] = [[0.010389635, 0.001709343],[0.001709343, 0.018386732]]
#[[0.005423979, 0.000652657],[0.000652657, 0.001134736]]
#Sig_p[1] = [[0.001920786, -0.001210214],[-0.001210214, 0.002644612]]
#Sig_p[2] = [[0.003648299, -0.000312398],[-0.000312398, 0.001518234]]
#Sig_p[3] = [[0.001851727, -0.000656013],[-0.000656013, 0.004825636]]
k=0
for line in open(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_Mu_p.csv', 'r'):
itemList = line[:-1].split(',')
#for i in xrange(len(itemList)):
Mu_p[k] = [float(itemList[0]),float(itemList[1])]
k = k + 1
k=0
i=0
for line in open(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_Sig_p.csv', 'r'):
itemList = line[:-1].split(',')
if k < Kp:
if (i == 0):
#for i in xrange(len(itemList)):
print itemList
Sig_p[k][0][0] = float(itemList[0])
Sig_p[k][0][1] = float(itemList[1])
i = i + 1
elif (i == 1):
#for i in xrange(len(itemList)):
print itemList
Sig_p[k][1][0] = float(itemList[0])
Sig_p[k][1][1] = float(itemList[1])
i = i + 1
elif (i == 2):
i = 0
k = k + 1
zp = []
pi_p = [0.0 for k in range(Kp)] #[0.017826621173443864,0.28554229470170217,0.041570976925928926,0.1265347852145472,0.52852532198437785]
dm = 0
for line in open(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_zp.csv', 'r'):
itemList = line[:-1].split(',')
for i in range(len(itemList)):
if itemList[i] != '':
#print dm,itemList[i]
zp = zp + [int(itemList[i])]
dm = dm + 1
for line in open(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_pi_p.csv', 'r'):
itemList = line[:-1].split(',')
for i in range(len(pi_p)):
pi_p[i] = float(itemList[i])
colors = ['b', 'g', 'm', 'r', 'c', 'y', 'k', 'orange', 'purple', 'brown']
color_iter = itertools.cycle(colors)
splot = plt.subplot(1, 1,1)
for k,(mean,covar,color) in enumerate(zip(Mu_p,Sig_p,color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse([mean[1],mean[0]], v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
#splot.add_artist(ell)
#ガウス分布から大量にサンプリングしてプロットする場合
for i in range(int(5000*2*pi_p[k])):#)):#
X = multivariate_normal.rvs(mean=mean, cov=covar)
plt.scatter(X[1],X[0], s=5, marker='.', color=color, alpha=0.2)
#データをクラスごとに色分けしてプロットする場合
#for i in range(len(p_temp)):
# plt.scatter(p_temp[i][1],p_temp[i][0], marker='x', c=colors[zp[i]])
"""
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
#gmm = mixture.GMM(n_components=5, covariance_type='full')
#gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
#for i, (clf, title) in enumerate([#(gmm, 'GMM'),
# (dpgmm, 'Dirichlet Process GMM')]):
"""
#clf=dpgmm
title = 'Position category'#data'
#Y_ = clf.predict(X)
#print Y_
"""
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
print covar
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
#if not np.any(Y_ == i):
# continue
#plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
"""
plt.ylim(-0.2, -0.8)
plt.xlim(-0.3, 0.3)
#plt.xticks([-0.8+0.1*i for i in range(7)])
#plt.yticks([-0.3+0.1*i for i in range(7)])
plt.title(title)
#w, h = plt.get_figwidth(), plt.get_figheight()
#ax = plt.add_axes((0.5 - 0.5 * 0.8 * h / w, 0.1, 0.8 * h / w, 0.8))
#aspect = (ax.get_xlim()[1] - ax.get_xlim()[0]) / (ax.get_ylim()[1] - ax.get_ylim()[0])
#ax.set_aspect(aspect)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_position_data_plot_p1nd.eps', dpi=150)
plt.savefig(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_position_data_plot_p1nd.png', dpi=150)
plt.show()
| 38.418994
| 757
| 0.574378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,198
| 0.601864
|
b789e2f11f57608cb00f55b63d916cf77099aa18
| 338
|
py
|
Python
|
condiment/tests/test_simple.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | 3
|
2020-09-13T09:55:02.000Z
|
2021-01-23T11:13:59.000Z
|
condiment/tests/test_simple.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | 2
|
2018-03-16T22:01:12.000Z
|
2020-01-13T10:33:22.000Z
|
condiment/tests/test_simple.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | 2
|
2015-04-26T15:31:40.000Z
|
2017-09-12T12:36:14.000Z
|
#exclude
import condiment; condiment.install()
#endexclude
if WITH_TIMEBOMB:
timebomb = int(WITH_TIMEBOMB)
print 'timebomb feature is activated, and set to', timebomb
if WITH_INAPP_PURCHASE:
print 'inapp purchase feature is activated'
if WITH_TIMEBOMB and WITH_INAPP_PURCHASE:
print 'both features have been activated'
| 24.142857
| 63
| 0.775148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.39645
|
b78d491f7594796538d954c219bf7266a1779707
| 1,096
|
py
|
Python
|
tests/init.py
|
touqir14/Emergent
|
0782e1304702f3c95092953ce5da1d8ed69bbe6f
|
[
"MIT"
] | null | null | null |
tests/init.py
|
touqir14/Emergent
|
0782e1304702f3c95092953ce5da1d8ed69bbe6f
|
[
"MIT"
] | null | null | null |
tests/init.py
|
touqir14/Emergent
|
0782e1304702f3c95092953ce5da1d8ed69bbe6f
|
[
"MIT"
] | null | null | null |
import pathlib
import os
import sys
from multiprocessing import resource_tracker
def modify_resource_tracker():
# See discussion: https://bugs.python.org/issue39959
# See source code: https://github.com/python/cpython/blob/master/Lib/multiprocessing/resource_tracker.py
rt = resource_tracker._resource_tracker
def register(name, rtype):
if rtype == 'shared_memory':
# print("Do nothing for shared_memory")
return
else:
rt._send('REGISTER', name, rtype)
def unregister(name, rtype):
if rtype == 'shared_memory':
# print("Do nothing for shared_memory")
return
else:
rt._send('UNREGISTER', name, rtype)
resource_tracker.register = register
resource_tracker.unregister = register
def add_Emergent_paths():
Emergent_path = pathlib.Path(__file__).absolute().parent.parent
test_path = os.path.join(Emergent_path, 'tests')
EmergentMain_path = os.path.join(Emergent_path, 'Emergent')
sys.path.append(test_path)
sys.path.append(EmergentMain_path)
modify_resource_tracker()
add_Emergent_paths()
| 27.4
| 106
| 0.718978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.277372
|
b78f243f121a8fb7c7eb83f7fd3d5745810817b9
| 12,586
|
py
|
Python
|
data-processor/modules/wikia_handler.py
|
stephanietuerk/art-history-jobs
|
d10718ce7c8eb591cea9808945d01a2273425a58
|
[
"MIT"
] | null | null | null |
data-processor/modules/wikia_handler.py
|
stephanietuerk/art-history-jobs
|
d10718ce7c8eb591cea9808945d01a2273425a58
|
[
"MIT"
] | null | null | null |
data-processor/modules/wikia_handler.py
|
stephanietuerk/art-history-jobs
|
d10718ce7c8eb591cea9808945d01a2273425a58
|
[
"MIT"
] | null | null | null |
import csv
import re
import unicodedata
import bs4
import wikia
from modules import utils
from modules.config import Config
class WikiaHandler():
def __init__(self):
config = Config()
self.scraping_config = config.get_scraping_config()
self.parsing_config = config.get_parsing_config()
self.out_config = config.get_out_config()
self.make_page_names()
def make_page_names(self):
years = self.scraping_config['years']
page_name = self.scraping_config['sub_page']
names = [f'{page_name} {year}-{year+1}' for year in years]
names = [x.replace('Art History 2018-2019','Art History 2018-19') for x in names]
names = [x.replace('Art History 2019-2020','Art History Jobs 2019-20') for x in names]
names = [x.replace('Art History 2020-2021','Art History 2020-21') for x in names]
names = [x.replace('Art History 2021-2022','Art History 2021-22') for x in names]
self.page_names = names
def create_fields_file(self):
data = []
for page_name in self.page_names:
print(f'Begin processing {page_name}')
year = self.get_year_from_page_name(page_name)
html = self.get_html_for_page(page_name)
sections = utils.get_sections_for_tag(html, 'h2')
fields_in_page = []
for section in sections:
soup = bs4.BeautifulSoup(section, 'html.parser')
section_title_list = soup.select('h2 .mw-headline')
if self.is_field_title(section_title_list):
field = self.clean_text(section_title_list[0].text)
if field not in fields_in_page:
fields_in_page.append(field)
if len(data) > 0:
fields_in_list = []
for item in data:
fields_in_list.append(item['field'])
fields_in_list = set(fields_in_list)
if field not in fields_in_list:
item = {'field': field, 'years': year}
data.append(item)
else:
for item in data:
if field == item['field']:
item['years'] = item['years'] + ',' + year
else:
item = {'field': field, 'years': year}
data.append(item)
self.write_fields_file(data)
def get_year_from_page_name(self, page_name):
year_regex = re.compile(r'\d{4,}(?=-)')
return year_regex.search(page_name).group()
def get_html_for_page(self, page_name):
page_content = wikia.page(self.scraping_config['main_wiki'], page_name)
return page_content.html()
def get_sections_for_tag(self, html, tag):
sections = html.split(f'<{tag}>')
sections = [f'<{tag}>' + section for section in sections][1:]
return sections
def write_fields_file(self, data):
keys = data[0].keys()
with open(f"{self.out_config}/{self.out_config['fields_file']}", 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, keys)
writer.writeheader()
writer.writerows(data)
def is_field_title(self, section_title_list):
return len(section_title_list) > 0 and not self.section_is_excluded(section_title_list[0].text) and not self.scraping_config['tt_key'] in section_title_list[0].text and not self.scraping_config['non_tt_key'] in section_title_list[0].text
def create_scrape_file(self):
self.get_fields_dict()
data = []
for page in self.page_names:
print(f'Begin processing {page}')
page_data = self.get_page_data(page)
data.extend(page_data)
keys = data[0].keys()
print(keys)
print(data[0])
self.weight_jobs(data)
with open(f"{self.out_config['path']}/{self.out_config['new_scrape_file']}", 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, keys)
writer.writeheader()
writer.writerows(data)
def get_fields_dict(self):
with open(self.parsing_config['fields_dictionary'], mode='r') as infile:
reader = csv.reader(infile)
self.fields_dict = {rows[0]:rows[1] for rows in reader}
def get_page_data(self, page_name):
html = self.get_html_for_page(page_name)
self.current_year = self.get_year_from_page_name(page_name)
if self.page_is_segmented_by_tt_status(html):
data = self.process_page_segmented_by_tt_status(html)
else:
data = self.process_unsegmented_page(html)
return data
def page_is_segmented_by_tt_status(self, html):
soup = bs4.BeautifulSoup(html, 'html.parser')
return soup.find(id='TENURE_TRACK_JOBS') is not None
def process_page_segmented_by_tt_status(self, html):
sections = utils.get_sections_for_tag(html, 'h2')
jobs = []
grouped_sections = self.group_sections_by_tt_status(sections)
for section in grouped_sections['tt']:
tt_jobs = self.get_jobs_from_field_section(section, True)
jobs.extend(tt_jobs)
for section in grouped_sections['non_tt']:
non_tt_jobs = self.get_jobs_from_field_section(section, False)
jobs.extend(non_tt_jobs)
return jobs
def process_unsegmented_page(self, html):
sections = utils.get_sections_for_tag(html, 'h2')
jobs = []
for section in sections:
section_title_list = utils.get_selection_from_content(section, 'h2 .mw-headline')
if len(section_title_list) > 0 and not self.section_is_excluded(section_title_list[0].text):
data = self.get_jobs_from_field_section(section)
jobs.extend(data)
return jobs
def group_sections_by_tt_status(self, sections):
tt_sections = []
non_tt_sections = []
section_type = None
for section in sections:
section_title_list = utils.get_selection_from_content(section, 'h2 .mw-headline')
if len(section_title_list) > 0 and not self.section_is_excluded(section_title_list[0].text):
section_title = section_title_list[0].text
if self.scraping_config['tt_key'] in section_title or self.scraping_config['non_tt_key'] in section_title:
if self.scraping_config['tt_key'] in section_title:
section_type = 'tt'
if self.scraping_config['non_tt_key'] in section_title:
section_type = 'non_tt'
else:
if section_type == 'tt':
tt_sections.append(section)
if section_type == 'non_tt':
non_tt_sections.append(section)
return {'tt': tt_sections, 'non_tt': non_tt_sections}
def section_is_excluded(self, section_title):
excluded_sections = self.scraping_config['excluded_sections']
for excluded_section in excluded_sections:
if excluded_section in section_title:
return True
return False
def get_jobs_from_field_section(self, html, isTt = None):
original_field = self.get_field(html)
normalized_field = self.normalize_field(original_field)
job_listings = utils.get_sections_for_tag(html, 'h3')
jobs = []
if len(job_listings) > 0:
for job in job_listings:
title_list = utils.get_selection_from_content(job, 'h3 .mw-headline')
body = bs4.BeautifulSoup(job, 'html.parser').get_text()
if self.scraping_config['end_marker'] in body:
body = body.split(self.scraping_config['end_marker'])[0]
if len(title_list) > 0:
headline = self.clean_text(title_list[0].get_text())
if not 'see also' in headline.lower():
body = self.clean_text(body)
job_type_keys = self.get_job_type_keys(headline, body)
# print('job_type_keys', job_type_keys)
if len(job_type_keys) == 0:
print('No job type keys found', headline)
if original_field == 'Fellowships':
isTt = False
data = {
'year': self.current_year,
'field': normalized_field,
'original_field': self.clean_text(original_field),
'keys': ', '.join(job_type_keys),
'is_tt': self.get_tenure_status(job_type_keys, isTt),
'rank': self.get_rank(job_type_keys),
'headline': headline,
# 'school': None,
# 'department': None,
# 'location': self.get_location_from_headline(headline),
'text': self.clean_body(body),
}
jobs.append(data)
return jobs
def get_field(self, section):
field_header = utils.get_selection_from_content(section, 'h2 .mw-headline')
return self.clean_text(field_header[0].text)
def normalize_field(self, field):
return self.fields_dict[field]
def clean_text(self, text):
for string in self.parsing_config['strip']:
if string == '\xa0':
print(text, string)
text = text.replace(string, ' ')
text = unicodedata.normalize('NFKD', text)
return text
def clean_body(self, body):
weird_stuff_regex = r'[][[:cntrl:]]'
return re.sub(weird_stuff_regex, '', body)
def get_location_from_headline(self, headline):
location_regex = re.compile(r'\([^\n)]*[A-Z]{2}\)')
return location_regex.search(headline).group().replace('(', '').replace(')', '') if location_regex.search(headline) else None
def get_job_type_keys(self, headline, body):
title_keys = self.get_matching_keys(headline, self.parsing_config['HEADLINE'])
text_keys = self.get_matching_keys(body, self.parsing_config['BODY'])
return title_keys + text_keys
def get_tenure_status(self, keys, isTt):
if isTt == False:
return False
elif len(keys) == 0:
return 'manual'
elif 'tt' in keys or isTt == True:
if 'non_tt' in keys or 'vap' in keys or 'lecturer' in keys or 'postdoc' in keys or 'contract' in keys or isTt == False:
return 'manual'
else:
return True
elif ('tt' not in keys) and ('assistant_prof' in keys or 'associate_prof' in keys or 'full_prof' in keys):
if ('vap' not in keys and 'lecturer' not in keys and 'postdoc' not in keys and 'contract' not in keys):
return True
else:
return 'manual'
elif len(keys) == 1 and 'open_rank' in keys:
return 'manual'
else:
return False
def get_rank(self, keys):
keys = list(set(keys))
if 'tt' in keys:
keys.remove('tt')
if 'non_tt' in keys:
keys.remove('non_tt')
return ', '.join(keys)
def get_matching_keys(self, text, job_type_dict):
types = []
for key, value in job_type_dict.items():
if self.any_in_list_in_text(value, text):
types.append(key)
return types
def any_in_list_in_text(self, list, text):
match = False
for word in list:
if word in text.lower():
match = True
return match
def weight_jobs(self, jobs):
for job in jobs:
matches = []
matches.append(job)
for job_to_compare in jobs:
if job_to_compare['year'] == job['year'] and job_to_compare['headline'] == job['headline'] and job_to_compare['text'] == job['text'] and job_to_compare['original_field'] is not job['original_field']:
matches.append(job_to_compare)
job['count'] = 1 / len(matches)
| 44.161404
| 245
| 0.571667
| 12,443
| 0.988638
| 0
| 0
| 0
| 0
| 0
| 0
| 1,473
| 0.117035
|
b78f91bde4f8867fdadaaee058845f1ee62f598c
| 3,949
|
py
|
Python
|
ucsmsdk/mometa/cimcvmedia/CimcvmediaExtMgmtRuleEntry.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/cimcvmedia/CimcvmediaExtMgmtRuleEntry.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/cimcvmedia/CimcvmediaExtMgmtRuleEntry.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for CimcvmediaExtMgmtRuleEntry ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class CimcvmediaExtMgmtRuleEntryConsts():
MOUNT_PROTOCOL_CIFS = "cifs"
MOUNT_PROTOCOL_HTTP = "http"
MOUNT_PROTOCOL_HTTPS = "https"
MOUNT_PROTOCOL_NFS = "nfs"
MOUNT_PROTOCOL_UNKNOWN = "unknown"
class CimcvmediaExtMgmtRuleEntry(ManagedObject):
"""This is CimcvmediaExtMgmtRuleEntry class."""
consts = CimcvmediaExtMgmtRuleEntryConsts()
naming_props = set([u'mappingName'])
mo_meta = MoMeta("CimcvmediaExtMgmtRuleEntry", "cimcvmediaExtMgmtRuleEntry", "ext-mgmt-rule-[mapping_name]", VersionMeta.Version222c, "InputOutput", 0x3f, [], ["read-only"], [u'cimcvmediaActualMountList'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version222c, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"ext_mgmt_ip_addr": MoPropertyMeta("ext_mgmt_ip_addr", "extMgmtIpAddr", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"mapping_name": MoPropertyMeta("mapping_name", "mappingName", "string", VersionMeta.Version222c, MoPropertyMeta.NAMING, 0x8, None, None, r"""[a-zA-Z0-9][a-zA-Z0-9_.:-]{0,63}""", [], []),
"mgmt_if_ip_addr": MoPropertyMeta("mgmt_if_ip_addr", "mgmtIfIpAddr", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"mount_protocol": MoPropertyMeta("mount_protocol", "mountProtocol", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cifs", "http", "https", "nfs", "unknown"], []),
"remote_ip_addr": MoPropertyMeta("remote_ip_addr", "remoteIpAddr", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"remote_port": MoPropertyMeta("remote_port", "remotePort", "uint", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"extMgmtIpAddr": "ext_mgmt_ip_addr",
"mappingName": "mapping_name",
"mgmtIfIpAddr": "mgmt_if_ip_addr",
"mountProtocol": "mount_protocol",
"remoteIpAddr": "remote_ip_addr",
"remotePort": "remote_port",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, mapping_name, **kwargs):
self._dirty_mask = 0
self.mapping_name = mapping_name
self.child_action = None
self.ext_mgmt_ip_addr = None
self.mgmt_if_ip_addr = None
self.mount_protocol = None
self.remote_ip_addr = None
self.remote_port = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "CimcvmediaExtMgmtRuleEntry", parent_mo_or_dn, **kwargs)
| 58.940299
| 248
| 0.66042
| 3,697
| 0.936186
| 0
| 0
| 0
| 0
| 0
| 0
| 1,519
| 0.384654
|
b78fac6287214282885abd8ffbebb076e0bdd37a
| 3,672
|
py
|
Python
|
paddlescience/pde/pde_navier_stokes.py
|
juneweng/PaddleScience
|
f30ce908b6fbec2403936007d12d9701f74fd00e
|
[
"Apache-2.0"
] | null | null | null |
paddlescience/pde/pde_navier_stokes.py
|
juneweng/PaddleScience
|
f30ce908b6fbec2403936007d12d9701f74fd00e
|
[
"Apache-2.0"
] | null | null | null |
paddlescience/pde/pde_navier_stokes.py
|
juneweng/PaddleScience
|
f30ce908b6fbec2403936007d12d9701f74fd00e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .pde_base import PDE
class NavierStokes(PDE):
"""
Two dimentional time-independent Navier-Stokes equation
.. math::
:nowrap:
\\begin{eqnarray*}
\\frac{\\partial u}{\\partial x} + \\frac{\\partial u}{\\partial y} & = & 0, \\\\
u \\frac{\\partial u}{\\partial x} + v \\frac{\partial u}{\\partial y} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 u}{\\partial x^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 u}{\\partial y^2} + dp/dx & = & 0,\\\\
u \\frac{\\partial v}{\\partial x} + v \\frac{\partial v}{\\partial y} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 v}{\\partial x^2} - \\frac{\\nu}{\\rho} \\frac{\\partial^2 v}{\\partial y^2} + dp/dy & = & 0.
\\end{eqnarray*}
Parameters
----------
nu : float
Kinematic viscosity
rho : float
Density
Example:
>>> import paddlescience as psci
>>> pde = psci.pde.NavierStokes(0.01, 1.0)
"""
def __init__(self, nu=0.01, rho=1.0):
dim = 2
super(NavierStokes, self).__init__(dim + 1)
if dim == 2:
# continuty
self.add_item(0, 1.0, "du/dx")
self.add_item(0, 1.0, "dv/dy")
# momentum x
self.add_item(1, 1.0, "u", "du/dx")
self.add_item(1, 1.0, "v", "du/dy")
self.add_item(1, -nu / rho, "d2u/dx2")
self.add_item(1, -nu / rho, "d2u/dy2")
self.add_item(1, 1.0 / rho, "dw/dx")
# momentum y
self.add_item(2, 1.0, "u", "dv/dx")
self.add_item(2, 1.0, "v", "dv/dy")
self.add_item(2, -nu / rho, "d2v/dx2")
self.add_item(2, -nu / rho, "d2v/dy2")
self.add_item(2, 1.0 / rho, "dw/dy")
elif dim == 3:
# continuty
self.add_item(0, 1.0, "du/dx")
self.add_item(0, 1.0, "dv/dy")
self.add_item(0, 1.0, "dw/dz")
# momentum x
self.add_item(1, 1.0, "u", "du/dx")
self.add_item(1, 1.0, "v", "du/dy")
self.add_item(1, 1.0, "w", "du/dz")
self.add_item(1, -nu / rho, "d2u/dx2")
self.add_item(1, -nu / rho, "d2u/dy2")
self.add_item(1, -nu / rho, "d2u/dz2")
self.add_item(1, 1.0 / rho, "dp/dx")
# momentum y
self.add_item(2, 1.0, "u", "dv/dx")
self.add_item(2, 1.0, "v", "dv/dy")
self.add_item(2, 1.0, "w", "dv/dz")
self.add_item(2, -nu / rho, "d2v/dx2")
self.add_item(2, -nu / rho, "d2v/dy2")
self.add_item(2, -nu / rho, "d2v/dz2")
self.add_item(2, 1.0 / rho, "dp/dy")
# momentum z
self.add_item(3, 1.0, "u", "dw/dx")
self.add_item(3, 1.0, "v", "dw/dy")
self.add_item(3, 1.0, "w", "dw/dz")
self.add_item(3, -nu / rho, "d2w/dx2")
self.add_item(3, -nu / rho, "d2w/dy2")
self.add_item(3, -nu / rho, "d2w/dz2")
self.add_item(3, 1.0 / rho, "dp/dz")
| 40.351648
| 223
| 0.510349
| 3,032
| 0.825708
| 0
| 0
| 0
| 0
| 0
| 0
| 1,924
| 0.523965
|
b7907344916d1e840e9f663cfdac58234f01e739
| 327
|
py
|
Python
|
tests/strict/it_mod_double_fun.py
|
Euromance/pycopy
|
540cfcc52d17667a5f6b2a176427cc031029b78f
|
[
"MIT"
] | 663
|
2018-12-30T00:17:59.000Z
|
2022-03-14T05:03:41.000Z
|
tests/strict/it_mod_double_fun.py
|
Euromance/pycopy
|
540cfcc52d17667a5f6b2a176427cc031029b78f
|
[
"MIT"
] | 41
|
2019-06-06T08:31:19.000Z
|
2022-02-13T16:53:41.000Z
|
tests/strict/it_mod_double_fun.py
|
Euromance/pycopy
|
540cfcc52d17667a5f6b2a176427cc031029b78f
|
[
"MIT"
] | 60
|
2019-06-01T04:25:00.000Z
|
2022-02-25T01:47:31.000Z
|
import mod
def foo():
return 1
try:
mod.foo = foo
except RuntimeError:
print("RuntimeError1")
print(mod.foo())
try:
mod.foo = 1
except RuntimeError:
print("RuntimeError2")
print(mod.foo)
try:
mod.foo = 2
except RuntimeError:
print("RuntimeError3")
print(mod.foo)
def __main__():
pass
| 10.21875
| 26
| 0.642202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.137615
|
b791735533ee827f9ea58cc60b007fcd8bb18dbd
| 2,295
|
py
|
Python
|
LungCancer.py
|
SpiroGanas/Lung-Cancer-Machine-Learning
|
4e38b3df9b560194f67ab03e9aaefad6c84216db
|
[
"MIT"
] | 1
|
2019-04-28T02:25:32.000Z
|
2019-04-28T02:25:32.000Z
|
LungCancer.py
|
SpiroGanas/Lung-Cancer-Machine-Learning
|
4e38b3df9b560194f67ab03e9aaefad6c84216db
|
[
"MIT"
] | null | null | null |
LungCancer.py
|
SpiroGanas/Lung-Cancer-Machine-Learning
|
4e38b3df9b560194f67ab03e9aaefad6c84216db
|
[
"MIT"
] | null | null | null |
# Spiro Ganas
# 9/27/17
#
# Python 3 script to
######### NOTES ########################################
# 1. Each "slice" is a 512x512 image, stored in a single .dcm file.
# 2. A 3-dimensional CT Scan consists of between 94 and 541 slices (according to the Stage 1 data).
# 3. We need to rescale all the CT scans so they have the same number of slices.
#########################################################
####### CONSTANTS ##########################
# This folder contains one subfolder per patient
data_dir = 'D:\\Lung Cancer Dataset\\stage1'
# This CSV lists all patients and shows which ones have canver
truth_file = 'D:\\Lung Cancer Dataset\\stage1_labels.csv'
# These values are used to downscale the images to a uniform size
IMAGE_DIMESNION = 512 # Downscale the images so the size of a slice is IMAGE_DIMENSIONxIMAGE_DIMENSION
NUMBER_OF_SLICES =20 # A patient can have between X and Y slices. This downscales all patients to the same number of slices.
#############################################
import dicom #http://pydicom.readthedocs.io/en/stable/getting_started.html
#print("pydicom version: ", dicom.__version__)
import os
import math
import numpy as np
import pandas as pd
import tensorflow as tf
# Change this to wherever you are storing your data:
# IF YOU ARE FOLLOWING ON KAGGLE, YOU CAN ONLY PLAY WITH THE SAMPLE DATA, WHICH IS MUCH SMALLER
labels_df = pd.read_csv(truth_file, index_col=0)
#patients = list(labels_df.index.values)
patients = os.listdir(data_dir)
print(labels_df.head())
print(patients)
print(len(patients))
temp_min = 9999
temp_max = 0
for patient in patients:
try:
label = labels_df.get_value(patient, 'cancer')
path = data_dir +'/'+ patient
# a couple great 1-liners from: https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.ImagePositionPatient[2]))
print(slices[0].pixel_array.shape, len(slices))
if len(slices)>temp_max: temp_max=len(slices)
if len(slices) < temp_min: temp_min = len(slices)
except:
pass
print("Minimum number of slices:", temp_min)
print("Maximum number of slices:", temp_max)
| 27.650602
| 127
| 0.662309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,406
| 0.612636
|
b791ac3d3021c73d2b06aeec225eba82f7ecf599
| 852
|
py
|
Python
|
setup.py
|
LEGO-Robotics/aistorms
|
abfb6b978215507d331d9ade764310ee9d8c9448
|
[
"MIT"
] | 1
|
2020-06-06T22:47:09.000Z
|
2020-06-06T22:47:09.000Z
|
setup.py
|
AntoniLuongPham/LEGO-Mindstorms-AI
|
abfb6b978215507d331d9ade764310ee9d8c9448
|
[
"MIT"
] | null | null | null |
setup.py
|
AntoniLuongPham/LEGO-Mindstorms-AI
|
abfb6b978215507d331d9ade764310ee9d8c9448
|
[
"MIT"
] | 1
|
2020-06-08T15:53:42.000Z
|
2020-06-08T15:53:42.000Z
|
import json
import os
from setuptools import find_packages, setup
PACKAGE_NAMESPACE_NAME = 'aistorms'
METADATA_FILE_NAME = 'metadata.json'
REQUIREMENTS_FILE_NAME = 'requirements.txt'
_metadata = \
json.load(
open(os.path.join(
os.path.dirname(__file__),
PACKAGE_NAMESPACE_NAME,
METADATA_FILE_NAME)))
setup(
name=_metadata['PACKAGE'],
author=_metadata['AUTHOR'],
author_email=_metadata['AUTHOR_EMAIL'],
url=_metadata['URL'],
version=_metadata['VERSION'],
description=_metadata['DESCRIPTION'],
long_description=_metadata['DESCRIPTION'],
keywords=_metadata['DESCRIPTION'],
packages=find_packages(),
include_package_data=True,
install_requires=
[s for s in open(REQUIREMENTS_FILE_NAME).readlines()
if not s.startswith('#')])
| 24.342857
| 60
| 0.676056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 130
| 0.152582
|
b791e622824560b49e99db6ea638309e033920f8
| 3,357
|
py
|
Python
|
scripts/merge.py
|
jgonzalezdemendibil/movie_publisher
|
0e58cb616e6e6c2c5cac7cb5016e0874c0409b42
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/merge.py
|
jgonzalezdemendibil/movie_publisher
|
0e58cb616e6e6c2c5cac7cb5016e0874c0409b42
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/merge.py
|
jgonzalezdemendibil/movie_publisher
|
0e58cb616e6e6c2c5cac7cb5016e0874c0409b42
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# Copied from https://raw.githubusercontent.com/srv/srv_tools/kinetic/bag_tools/scripts/merge.py since this script is
# not released for indigo.
"""
Copyright (c) 2015,
Enrique Fernandez Perdomo
Clearpath Robotics, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Systems, Robotics and Vision Group, University of
the Balearican Islands nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
import rosbag
import argparse
import os
import sys
def merge(inbags, outbag='output.bag', topics=None, exclude_topics=[], raw=True):
# Open output bag file:
try:
out = rosbag.Bag(outbag, 'a' if os.path.exists(outbag) else 'w')
except IOError as e:
print('Failed to open output bag file %s!: %s' % (outbag, e.message), file=sys.stderr)
return 127
# Write the messages from the input bag files into the output one:
for inbag in inbags:
try:
print(' Processing input bagfile: %s' % inbag)
for topic, msg, t in rosbag.Bag(inbag, 'r').read_messages(topics=topics, raw=raw):
if topic not in args.exclude_topics:
out.write(topic, msg, t, raw=raw)
except IOError as e:
print('Failed to open input bag file %s!: %s' % (inbag, e.message), file=sys.stderr)
return 127
out.close()
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Merge multiple bag files into a single one.')
parser.add_argument('inbag', help='input bagfile(s)', nargs='+')
parser.add_argument('--output', help='output bag file', default='output.bag')
parser.add_argument('--topics', help='topics to merge from the input bag files', nargs='+', default=None)
parser.add_argument('--exclude_topics', help='topics not to merge from the input bag files', nargs='+', default=[])
args = parser.parse_args()
try:
sys.exit(merge(args.inbag, args.output, args.topics, args.exclude_topics))
except Exception, e:
import traceback
traceback.print_exc()
| 42.493671
| 117
| 0.739053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,230
| 0.664284
|
b7925483ba95f2bb530529066114343ba1164af4
| 1,567
|
py
|
Python
|
src/pyforest/auto_import.py
|
tnwei/pyforest
|
73b36298e8cbce9a861c13c01509e34f0e3397fe
|
[
"MIT"
] | 1,002
|
2019-08-13T15:00:39.000Z
|
2022-03-29T19:14:41.000Z
|
src/pyforest/auto_import.py
|
tnwei/pyforest
|
73b36298e8cbce9a861c13c01509e34f0e3397fe
|
[
"MIT"
] | 40
|
2019-08-13T19:17:49.000Z
|
2022-02-14T08:46:09.000Z
|
src/pyforest/auto_import.py
|
tnwei/pyforest
|
73b36298e8cbce9a861c13c01509e34f0e3397fe
|
[
"MIT"
] | 202
|
2019-08-13T19:37:25.000Z
|
2022-03-21T20:05:27.000Z
|
from pathlib import Path
IPYTHON_STARTUP_FOLDER = Path.home() / ".ipython" / "profile_default" / "startup"
STARTUP_FILE = IPYTHON_STARTUP_FOLDER / "pyforest_autoimport.py"
def _create_or_reset_startup_file():
if STARTUP_FILE.exists():
STARTUP_FILE.unlink() # deletes the old file
# this is important if someone messed around with the file
# if he calls our method, he expects that we repair everything
# therefore, we delete the old file and write a new, valid version
STARTUP_FILE.touch() # create a new file
def _write_into_startup_file():
with STARTUP_FILE.open("w") as file:
file.write(
f"""
# HOW TO DEACTIVATE AUTO-IMPORT:
# if you dont want to auto-import pyforest, you have two options:
# 0) if you only want to disable the auto-import temporarily and activate it later,
# you can uncomment the import statement below
# 1) if you never want to auto-import pyforest again, you can delete this file
try:
import pyforest # uncomment this line if you temporarily dont want to auto-import pyforest
pass
except:
pass
"""
)
def setup():
if not IPYTHON_STARTUP_FOLDER.exists():
print(
f"Error: Could not find the default IPython startup folder at {IPYTHON_STARTUP_FOLDER}"
)
return False
_create_or_reset_startup_file()
_write_into_startup_file()
print(
"Success: pyforest is now available in Jupyter Notebook, Jupyter Lab and IPython because it was added to the IPython auto import"
)
return True
| 31.34
| 137
| 0.699426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 954
| 0.608807
|
b7936727533c1b6e64a5988a2f89fc499bcee832
| 23,430
|
py
|
Python
|
tests/torch/pruning/test_model_pruning_analysis.py
|
sarthakpati/nncf
|
29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac
|
[
"Apache-2.0"
] | null | null | null |
tests/torch/pruning/test_model_pruning_analysis.py
|
sarthakpati/nncf
|
29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac
|
[
"Apache-2.0"
] | 1
|
2021-07-23T07:46:52.000Z
|
2021-07-23T07:46:52.000Z
|
tests/torch/pruning/test_model_pruning_analysis.py
|
sarthakpati/nncf
|
29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Callable
from typing import Dict
from typing import List
from typing import Tuple
from typing import Type
import pytest
import torch
from torch import nn
from nncf.common.graph import NNCFNodeName
from nncf.common.pruning.clusterization import Cluster
from nncf.common.pruning.clusterization import Clusterization
from nncf.common.pruning.model_analysis import ModelAnalyzer
from nncf.common.pruning.model_analysis import cluster_special_ops
from nncf.torch.dynamic_graph.graph_tracer import ModelInputInfo
from nncf.torch.layers import NNCF_PRUNING_MODULES_DICT
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.pruning.export_helpers import PTElementwise
from nncf.torch.pruning.export_helpers import PTIdentityMaskForwardOps
from nncf.torch.pruning.export_helpers import PT_PRUNING_OPERATOR_METATYPES
from nncf.common.pruning.utils import is_depthwise_conv
from nncf.torch.pruning.filter_pruning.algo import FilterPruningBuilder
from tests.torch.helpers import create_compressed_model_and_algo_for_test
from tests.torch.helpers import create_nncf_model_and_single_algo_builder
from tests.torch.pruning.helpers import PruningTestModelEltwise
from tests.torch.pruning.helpers import PruningTestModelSharedConvs
from tests.torch.pruning.helpers import TestModelBranching
from tests.torch.pruning.helpers import TestModelDiffConvs
from tests.torch.pruning.helpers import TestModelEltwiseCombination
from tests.torch.pruning.helpers import TestModelResidualConnection
from tests.torch.pruning.helpers import TestModelShuffleNetUnit
from tests.torch.pruning.helpers import TestModelShuffleNetUnitDW
from tests.torch.pruning.helpers import get_basic_pruning_config
# pylint: disable=protected-access
def create_nncf_model_and_pruning_builder(model: torch.nn.Module, config_params: Dict) -> Tuple[NNCFNetwork,
FilterPruningBuilder]:
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config['compression']['algorithm'] = 'filter_pruning'
for key, value in config_params.items():
nncf_config['compression']['params'][key] = value
nncf_model, pruning_builder = create_nncf_model_and_single_algo_builder(model, nncf_config)
return nncf_model, pruning_builder
class GroupPruningModulesTestStruct:
def __init__(self, model: Type[torch.nn.Module],
non_pruned_module_nodes: List[NNCFNodeName],
pruned_groups: List[List[NNCFNodeName]],
pruned_groups_by_node_id: [List[List[int]]],
prune_params: Tuple[bool, bool, bool]):
self.model = model
self.non_pruned_module_nodes = non_pruned_module_nodes
self.pruned_groups = pruned_groups
self.pruned_groups_by_node_id = pruned_groups_by_node_id
self.prune_params = prune_params
GROUP_PRUNING_MODULES_TEST_CASES = [
GroupPruningModulesTestStruct(model=PruningTestModelEltwise,
non_pruned_module_nodes=['PruningTestModelEltwise/NNCFConv2d[conv1]/conv2d_0',
'PruningTestModelEltwise/NNCFConv2d[conv4]/conv2d_0'],
pruned_groups=[['PruningTestModelEltwise/NNCFConv2d[conv2]/conv2d_0',
'PruningTestModelEltwise/NNCFConv2d[conv3]/conv2d_0']],
pruned_groups_by_node_id=[[3, 4]],
prune_params=(False, False, False)),
GroupPruningModulesTestStruct(model=PruningTestModelEltwise,
non_pruned_module_nodes=[],
pruned_groups=[['PruningTestModelEltwise/NNCFConv2d[conv1]/conv2d_0'],
['PruningTestModelEltwise/NNCFConv2d[conv4]/conv2d_0'],
['PruningTestModelEltwise/NNCFConv2d[conv2]/conv2d_0',
'PruningTestModelEltwise/NNCFConv2d[conv3]/conv2d_0']],
pruned_groups_by_node_id=[[1], [7], [3, 4]],
prune_params=(True, True, False)),
GroupPruningModulesTestStruct(model=TestModelBranching,
non_pruned_module_nodes=[],
pruned_groups=[['TestModelBranching/NNCFConv2d[conv1]/conv2d_0',
'TestModelBranching/NNCFConv2d[conv2]/conv2d_0',
'TestModelBranching/NNCFConv2d[conv3]/conv2d_0']],
pruned_groups_by_node_id=[[1, 2, 4]],
prune_params=(True, True, False)),
GroupPruningModulesTestStruct(model=TestModelBranching,
non_pruned_module_nodes=['TestModelBranching/NNCFConv2d[conv1]/conv2d_0',
'TestModelBranching/NNCFConv2d[conv2]/conv2d_0',
'TestModelBranching/NNCFConv2d[conv3]/conv2d_0'],
pruned_groups=[['TestModelBranching/NNCFConv2d[conv4]/conv2d_0',
'TestModelBranching/NNCFConv2d[conv5]/conv2d_0']],
pruned_groups_by_node_id=[[7, 8]],
prune_params=(False, True, False)),
GroupPruningModulesTestStruct(model=TestModelBranching,
non_pruned_module_nodes=['TestModelBranching/NNCFConv2d[conv4]/conv2d_0',
'TestModelBranching/NNCFConv2d[conv5]/conv2d_0'],
pruned_groups=[['TestModelBranching/NNCFConv2d[conv1]/conv2d_0',
'TestModelBranching/NNCFConv2d[conv2]/conv2d_0',
'TestModelBranching/NNCFConv2d[conv3]/conv2d_0']],
pruned_groups_by_node_id=[[1, 2, 4]],
prune_params=(True, False, False)),
GroupPruningModulesTestStruct(model=TestModelResidualConnection,
non_pruned_module_nodes=['TestModelResidualConnection/NNCFConv2d[conv4]/conv2d_0',
'TestModelResidualConnection/NNCFConv2d[conv5]/conv2d_0'],
pruned_groups=[['TestModelResidualConnection/NNCFConv2d[conv1]/conv2d_0',
'TestModelResidualConnection/NNCFConv2d[conv2]/conv2d_0',
'TestModelResidualConnection/NNCFConv2d[conv3]/conv2d_0']],
pruned_groups_by_node_id=[[1, 2, 4]],
prune_params=(True, True, False)),
GroupPruningModulesTestStruct(model=TestModelEltwiseCombination,
non_pruned_module_nodes=[],
pruned_groups=[['TestModelEltwiseCombination/NNCFConv2d[conv1]/conv2d_0',
'TestModelEltwiseCombination/NNCFConv2d[conv2]/conv2d_0',
'TestModelEltwiseCombination/NNCFConv2d[conv4]/conv2d_0',
'TestModelEltwiseCombination/NNCFConv2d[conv3]/conv2d_0',],
['TestModelEltwiseCombination/NNCFConv2d[conv5]/conv2d_0',
'TestModelEltwiseCombination/NNCFConv2d[conv6]/conv2d_0']],
pruned_groups_by_node_id=[[1, 2, 4, 6], [8, 9]],
prune_params=(True, True, False)),
GroupPruningModulesTestStruct(model=PruningTestModelSharedConvs,
non_pruned_module_nodes=['PruningTestModelSharedConvs/NNCFConv2d[conv1]/conv2d_0',
'PruningTestModelSharedConvs/NNCFConv2d[conv3]/conv2d_0',
'PruningTestModelSharedConvs/NNCFConv2d[conv3]/conv2d_1'],
pruned_groups=[['PruningTestModelSharedConvs/NNCFConv2d[conv2]/conv2d_0',
'PruningTestModelSharedConvs/NNCFConv2d[conv2]/conv2d_1']],
pruned_groups_by_node_id=[[3, 4]],
prune_params=(False, False, False))
]
@pytest.fixture(params=GROUP_PRUNING_MODULES_TEST_CASES, name='test_input_info_struct_')
def test_input_info_struct(request):
return request.param
def test_groups(test_input_info_struct_: GroupPruningModulesTestStruct):
model = test_input_info_struct_.model
non_pruned_module_nodes = test_input_info_struct_.non_pruned_module_nodes
pruned_groups = test_input_info_struct_.pruned_groups
prune_first, prune_last, prune_downsample = test_input_info_struct_.prune_params
model = model()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config['compression']['algorithm'] = 'filter_pruning'
nncf_config['compression']['params']['prune_first_conv'] = prune_first
nncf_config['compression']['params']['prune_last_conv'] = prune_last
nncf_config['compression']['params']['prune_downsample_convs'] = prune_downsample
compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(model, nncf_config)
# 1. Check all not pruned modules
clusters = compression_ctrl.pruned_module_groups_info
all_pruned_modules_info = clusters.get_all_nodes()
all_pruned_modules = [info.module for info in all_pruned_modules_info]
print([minfo.node_name for minfo in all_pruned_modules_info])
for node_name in non_pruned_module_nodes:
module = compressed_model.get_containing_module(node_name)
assert module is not None and module not in all_pruned_modules
# 2. Check that all pruned groups are valid
for group in pruned_groups:
first_node_name = group[0]
cluster = clusters.get_cluster_containing_element(first_node_name)
cluster_modules = [n.module for n in cluster.elements]
group_modules = [compressed_model.get_containing_module(node_name) for node_name in group]
assert cluster_modules == group_modules
def test_pruning_node_selector(test_input_info_struct_: GroupPruningModulesTestStruct):
model = test_input_info_struct_.model
non_pruned_module_nodes = test_input_info_struct_.non_pruned_module_nodes
pruned_groups_by_node_id = test_input_info_struct_.pruned_groups_by_node_id
prune_first, prune_last, prune_downsample = test_input_info_struct_.prune_params
pruning_operations = [v.op_func_name for v in NNCF_PRUNING_MODULES_DICT]
grouping_operations = PTElementwise.get_all_op_aliases()
from nncf.common.pruning.pruning_node_selector import PruningNodeSelector
pruning_node_selector = PruningNodeSelector(PT_PRUNING_OPERATOR_METATYPES,
pruning_operations,
grouping_operations,
None,
None,
prune_first,
prune_last,
prune_downsample)
model = model()
model.eval()
nncf_network = NNCFNetwork(model, input_infos=[ModelInputInfo([1, 1, 8, 8])])
graph = nncf_network.get_original_graph()
pruning_groups = pruning_node_selector.create_pruning_groups(graph)
# 1. Check all not pruned modules
all_pruned_nodes = pruning_groups.get_all_nodes()
all_pruned_modules = [nncf_network.get_containing_module(node.node_name)
for node in all_pruned_nodes]
for node_name in non_pruned_module_nodes:
module = nncf_network.get_containing_module(node_name)
assert module is not None and module not in all_pruned_modules
# 2. Check that all pruned groups are valid
for group_by_id in pruned_groups_by_node_id:
first_node_id = group_by_id[0]
cluster = pruning_groups.get_cluster_containing_element(first_node_id)
cluster_node_ids = [n.node_id for n in cluster.elements]
cluster_node_ids.sort()
assert cluster_node_ids == group_by_id
class GroupSpecialModulesTestStruct:
def __init__(self, model: Callable, eltwise_clusters):
self.model = model
self.eltwise_clusters = eltwise_clusters
GROUP_SPECIAL_MODULES_TEST_CASES = [
GroupSpecialModulesTestStruct(
model=TestModelBranching,
eltwise_clusters=[[3, 5], [9]],
),
GroupSpecialModulesTestStruct(
model=TestModelResidualConnection,
eltwise_clusters=[[3, 5], [9]],
),
GroupSpecialModulesTestStruct(
model=TestModelEltwiseCombination,
eltwise_clusters=[[3, 5, 7], [10]]
)
]
@pytest.fixture(params=GROUP_SPECIAL_MODULES_TEST_CASES, name='test_special_ops_struct')
def test_special_ops_struct_(request):
return request.param
def test_group_special_nodes(test_special_ops_struct: GroupSpecialModulesTestStruct):
model = test_special_ops_struct.model()
nncf_model, algo_builder = create_nncf_model_and_pruning_builder(model,
{'prune_first_conv': True,
'prune_last_conv': True})
special_ops_clusterization = cluster_special_ops(nncf_model.get_original_graph(),
algo_builder.get_types_of_grouping_ops(),
PTIdentityMaskForwardOps.get_all_op_aliases())
for ref_cluster in test_special_ops_struct.eltwise_clusters:
cluster = special_ops_clusterization.get_cluster_containing_element(ref_cluster[0])
assert sorted([node.node_id for node in cluster.elements]) == sorted(ref_cluster)
class ModelAnalyserTestStruct:
def __init__(self, model: nn.Module, ref_can_prune: dict):
self.model = model
self.ref_can_prune = ref_can_prune
MODEL_ANALYSER_TEST_CASES = [
ModelAnalyserTestStruct(
model=TestModelResidualConnection,
ref_can_prune={0: True, 1: True, 2: True, 3: True, 4: True, 5: True, 6: True, 7: False, 8: False, 9: False,
10: False, 11: False, 12: False}
)
]
@pytest.fixture(params=MODEL_ANALYSER_TEST_CASES, name='test_struct')
def test_struct_(request):
return request.param
def test_model_analyzer(test_struct: GroupSpecialModulesTestStruct):
model = test_struct.model()
nncf_model, _ = create_nncf_model_and_pruning_builder(model, {'prune_first_conv': True, 'prune_last_conv': True})
model_analyser = ModelAnalyzer(nncf_model.get_original_graph(), PT_PRUNING_OPERATOR_METATYPES, is_depthwise_conv)
can_prune_analysis = model_analyser.analyse_model_before_pruning()
for node_id in can_prune_analysis.keys():
assert can_prune_analysis[node_id] == test_struct.ref_can_prune[node_id]
class ModulePrunableTestStruct:
def __init__(self, model: NNCFNetwork, config_params: dict, is_module_prunable: Dict[NNCFNodeName, bool]):
self.model = model
self.config_params = config_params
self.is_module_prunable = is_module_prunable
IS_MODULE_PRUNABLE_TEST_CASES = [
ModulePrunableTestStruct(
model=TestModelDiffConvs,
config_params={},
is_module_prunable={'TestModelDiffConvs/NNCFConv2d[conv1]/conv2d_0': False,
'TestModelDiffConvs/NNCFConv2d[conv2]/conv2d_0': True,
'TestModelDiffConvs/NNCFConv2d[conv3]/conv2d_0': False,
'TestModelDiffConvs/NNCFConv2d[conv4]/conv2d_0': False},
),
ModulePrunableTestStruct(
model=TestModelDiffConvs,
config_params={'prune_first_conv': True, 'prune_last_conv': True},
is_module_prunable={'TestModelDiffConvs/NNCFConv2d[conv1]/conv2d_0': True,
'TestModelDiffConvs/NNCFConv2d[conv2]/conv2d_0': True,
'TestModelDiffConvs/NNCFConv2d[conv3]/conv2d_0': False,
'TestModelDiffConvs/NNCFConv2d[conv4]/conv2d_0': False},
),
ModulePrunableTestStruct(
model=TestModelDiffConvs,
config_params={'prune_first_conv': True, 'prune_last_conv': True, 'prune_downsample_convs': True},
is_module_prunable={'TestModelDiffConvs/NNCFConv2d[conv1]/conv2d_0': True,
'TestModelDiffConvs/NNCFConv2d[conv2]/conv2d_0': True,
'TestModelDiffConvs/NNCFConv2d[conv3]/conv2d_0': True,
'TestModelDiffConvs/NNCFConv2d[conv4]/conv2d_0': False},
),
ModulePrunableTestStruct(
model=TestModelBranching,
config_params={},
is_module_prunable={'TestModelBranching/NNCFConv2d[conv1]/conv2d_0': False,
'TestModelBranching/NNCFConv2d[conv2]/conv2d_0': False,
'TestModelBranching/NNCFConv2d[conv3]/conv2d_0': False,
'TestModelBranching/NNCFConv2d[conv4]/conv2d_0': False,
'TestModelBranching/NNCFConv2d[conv5]/conv2d_0': False},
),
ModulePrunableTestStruct(
model=TestModelBranching,
config_params={'prune_first_conv': True, 'prune_last_conv': True, },
is_module_prunable={'TestModelBranching/NNCFConv2d[conv1]/conv2d_0': True,
'TestModelBranching/NNCFConv2d[conv2]/conv2d_0': True,
'TestModelBranching/NNCFConv2d[conv3]/conv2d_0': True,
'TestModelBranching/NNCFConv2d[conv4]/conv2d_0': True,
'TestModelBranching/NNCFConv2d[conv5]/conv2d_0': True},
),
ModulePrunableTestStruct(
model=TestModelShuffleNetUnitDW,
config_params={'prune_first_conv': True, 'prune_last_conv': True, },
is_module_prunable={
'TestModelShuffleNetUnitDW/NNCFConv2d[conv]/conv2d_0': True,
'TestModelShuffleNetUnitDW/TestShuffleUnit[unit1]/NNCFConv2d[dw_conv4]/conv2d_0': False,
'TestModelShuffleNetUnitDW/TestShuffleUnit[unit1]/NNCFConv2d[expand_conv5]/conv2d_0': True,
'TestModelShuffleNetUnitDW/TestShuffleUnit[unit1]/NNCFConv2d[compress_conv1]/conv2d_0': True,
'TestModelShuffleNetUnitDW/TestShuffleUnit[unit1]/NNCFConv2d[dw_conv2]/conv2d_0': False,
'TestModelShuffleNetUnitDW/TestShuffleUnit[unit1]/NNCFConv2d[expand_conv3]/conv2d_0': True},
),
ModulePrunableTestStruct(
model=TestModelShuffleNetUnit,
config_params={'prune_first_conv': True, 'prune_last_conv': True, },
is_module_prunable={'TestModelShuffleNetUnit/NNCFConv2d[conv]/conv2d_0': True,
'TestModelShuffleNetUnit/TestShuffleUnit[unit1]/NNCFConv2d[compress_conv1]/conv2d_0': True,
'TestModelShuffleNetUnit/TestShuffleUnit[unit1]/NNCFConv2d[dw_conv2]/conv2d_0': True,
'TestModelShuffleNetUnit/TestShuffleUnit[unit1]/NNCFConv2d[expand_conv3]/conv2d_0': True},
)
]
@pytest.fixture(params=IS_MODULE_PRUNABLE_TEST_CASES, name='test_prunable_struct')
def test_prunable_struct_(request):
return request.param
def test_is_module_prunable(test_prunable_struct: ModulePrunableTestStruct):
model = test_prunable_struct.model()
nncf_model, algo_builder = create_nncf_model_and_pruning_builder(model, test_prunable_struct.config_params)
graph = nncf_model.get_original_graph()
for module_node_name in test_prunable_struct.is_module_prunable:
nncf_node = graph.get_node_by_name(module_node_name)
is_prunable, _ = algo_builder.pruning_node_selector._is_module_prunable(graph, nncf_node)
assert is_prunable == test_prunable_struct.is_module_prunable[module_node_name]
class SimpleNode:
def __init__(self, id_):
self.id = id_
def test_nodes_cluster():
# test creating
cluster_id = 0
nodes = [SimpleNode(0)]
nodes_orders = [0]
cluster = Cluster[SimpleNode](cluster_id, nodes, nodes_orders)
assert cluster.id == cluster_id
assert cluster.elements == nodes
assert cluster.importance == max(nodes_orders)
# test add nodes
new_nodes = [SimpleNode(1), SimpleNode(2)]
new_importance = 4
cluster.add_elements(new_nodes, new_importance)
assert cluster.importance == new_importance
assert cluster.elements == nodes + new_nodes
# test clean
cluster.clean_cluster()
assert cluster.elements == []
assert cluster.importance == 0
def test_clusterization():
nodes_1 = [SimpleNode(0), SimpleNode(1)]
nodes_2 = [SimpleNode(2), SimpleNode(3)]
cluster_1 = Cluster[SimpleNode](1, nodes_1, [node.id for node in nodes_1])
cluster_2 = Cluster[SimpleNode](2, nodes_2, [node.id for node in nodes_2])
clusterization = Clusterization()
# test adding of clusters
clusterization.add_cluster(cluster_1)
assert 1 in clusterization.clusters
assert 0 in clusterization._element_to_cluster and 1 in clusterization._element_to_cluster
# test get_cluster_by_id
assert clusterization.get_cluster_by_id(1) == cluster_1
with pytest.raises(IndexError) as err:
clusterization.get_cluster_by_id(5)
assert 'No cluster with id' in str(err.value)
# test get_cluster_containing_element
assert clusterization.get_cluster_containing_element(1) == cluster_1
with pytest.raises(IndexError) as err:
clusterization.get_cluster_containing_element(10)
assert 'No cluster for node' in str(err.value)
# test deleting
clusterization.delete_cluster(1)
with pytest.raises(IndexError) as err:
clusterization.get_cluster_by_id(1)
clusterization.add_cluster(cluster_1)
clusterization.add_cluster(cluster_2)
# test get_all_clusters
assert clusterization.get_all_clusters() == [cluster_1, cluster_2]
# test get_all_nodes
assert clusterization.get_all_nodes() == nodes_1 + nodes_2
# test merge clusters
clusterization.merge_clusters(1, 2)
assert 2 in clusterization.clusters
with pytest.raises(IndexError) as err:
clusterization.get_cluster_by_id(1)
assert set(clusterization.get_all_nodes()) == set(nodes_1 + nodes_2)
| 51.721854
| 119
| 0.660606
| 1,234
| 0.052668
| 0
| 0
| 566
| 0.024157
| 0
| 0
| 5,308
| 0.226547
|
b7937e5f465626651bc0a037a47d5bc1cfd62cdf
| 261
|
py
|
Python
|
tests/future_division_eval.py
|
mayl8822/onelinerizer
|
bad341f261d35e56872b4c22297a44dc6d5cfab3
|
[
"MIT"
] | 1,062
|
2015-11-18T01:04:33.000Z
|
2022-03-29T07:13:30.000Z
|
tests/future_division_eval.py
|
CoDeRgAnEsh/1line
|
507ef35b0006fc2998463dee92c2fdae53fe0694
|
[
"MIT"
] | 26
|
2015-11-17T06:58:07.000Z
|
2022-01-15T18:11:16.000Z
|
tests/future_division_eval.py
|
CoDeRgAnEsh/1line
|
507ef35b0006fc2998463dee92c2fdae53fe0694
|
[
"MIT"
] | 100
|
2015-11-17T09:01:22.000Z
|
2021-09-12T13:58:28.000Z
|
from __future__ import division
print eval('1/2')
exec('print 1/2')
eval(compile('print 1/2', 'wat.py', 'exec'))
print eval(compile('1/2', 'wat.py', 'eval'))
print eval(compile('1/2', 'wat.py', 'eval', 0, 0))
print eval(compile('1/2', 'wat.py', 'eval', 0, ~0))
| 32.625
| 51
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.375479
|
b7939b77631def2ec27e1370419a519e33e2db7a
| 180
|
py
|
Python
|
matchms/similarity/spec2vec/__init__.py
|
fossabot/matchms
|
090db31b22829dc224e5ed78fb4901e8c14a8e92
|
[
"Apache-2.0"
] | null | null | null |
matchms/similarity/spec2vec/__init__.py
|
fossabot/matchms
|
090db31b22829dc224e5ed78fb4901e8c14a8e92
|
[
"Apache-2.0"
] | null | null | null |
matchms/similarity/spec2vec/__init__.py
|
fossabot/matchms
|
090db31b22829dc224e5ed78fb4901e8c14a8e92
|
[
"Apache-2.0"
] | null | null | null |
from .Document import Document
from .SpectrumDocument import SpectrumDocument
from .Spec2Vec import Spec2Vec
__all__ = [
"Document",
"SpectrumDocument",
"Spec2Vec"
]
| 16.363636
| 46
| 0.738889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.211111
|
b794d1e8ea63cc3bf3146f5e45ab36528d92826b
| 22,826
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_wanphy_ui_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_wanphy_ui_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_wanphy_ui_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" Cisco_IOS_XR_wanphy_ui_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR wanphy\-ui package operational data.
This module contains definitions
for the following management objects\:
wanphy\: WANPHY operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class WanphyAlarmRepStatusEnum(Enum):
"""
WanphyAlarmRepStatusEnum
WANPHY Alarm Report Status
.. data:: disable = 0
Alarm reporting is disable
.. data:: enable = 1
Alarm reporting is enable
"""
disable = 0
enable = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wanphy_ui_oper as meta
return meta._meta_table['WanphyAlarmRepStatusEnum']
class WanphyModeInfoEnum(Enum):
"""
WanphyModeInfoEnum
WANPHY Modes
.. data:: lan = 0
LAN mode
.. data:: wan = 1
WAN mode
"""
lan = 0
wan = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wanphy_ui_oper as meta
return meta._meta_table['WanphyModeInfoEnum']
class Wanphy(object):
"""
WANPHY operational data
.. attribute:: controllers
All WANPHY controller operational data
**type**\: :py:class:`Controllers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper.Wanphy.Controllers>`
"""
_prefix = 'wanphy-ui-oper'
_revision = '2015-11-09'
def __init__(self):
self.controllers = Wanphy.Controllers()
self.controllers.parent = self
class Controllers(object):
"""
All WANPHY controller operational data
.. attribute:: controller
WANPHY controller operational data
**type**\: list of :py:class:`Controller <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper.Wanphy.Controllers.Controller>`
"""
_prefix = 'wanphy-ui-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.controller = YList()
self.controller.parent = self
self.controller.name = 'controller'
class Controller(object):
"""
WANPHY controller operational data
.. attribute:: controller_name <key>
Controller name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: info
WANPHY controller operational data
**type**\: :py:class:`Info <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper.Wanphy.Controllers.Controller.Info>`
"""
_prefix = 'wanphy-ui-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.controller_name = None
self.info = Wanphy.Controllers.Controller.Info()
self.info.parent = self
class Info(object):
"""
WANPHY controller operational data
.. attribute:: admin_mode
Configuration Mode
**type**\: :py:class:`WanphyModeInfoEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper.WanphyModeInfoEnum>`
.. attribute:: line_ais
Line AIS
**type**\: int
**range:** 0..4294967295
.. attribute:: line_bip
Line BIP(B2)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: line_febe
Line FEBE
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: line_rdi
Line RDI
**type**\: int
**range:** 0..4294967295
.. attribute:: operational_mode
Operational Mode
**type**\: :py:class:`WanphyModeInfoEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper.WanphyModeInfoEnum>`
.. attribute:: path_ais
Path AIS
**type**\: int
**range:** 0..4294967295
.. attribute:: path_bip
Path BIP(B3)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: path_febe
Path FEBE
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: path_lop
Path LOP
**type**\: int
**range:** 0..4294967295
.. attribute:: path_newptr
Path NEWPTR
**type**\: int
**range:** 0..4294967295
.. attribute:: path_nse
Path NSE
**type**\: int
**range:** 0..4294967295
.. attribute:: path_pse
Path PSE
**type**\: int
**range:** 0..4294967295
.. attribute:: path_rdi
Path RDI
**type**\: int
**range:** 0..4294967295
.. attribute:: port_state
Port State
**type**\: int
**range:** 0..4294967295
.. attribute:: register_j1_rx0
Register J1\-Rx0
**type**\: int
**range:** 0..4294967295
.. attribute:: register_j1_rx1
Register J1\-Rx1
**type**\: int
**range:** 0..4294967295
.. attribute:: register_j1_rx2
Register J1\-Rx2
**type**\: int
**range:** 0..4294967295
.. attribute:: register_j1_rx3
Register J1\-Rx3
**type**\: int
**range:** 0..4294967295
.. attribute:: register_j1_rx4
Register J1\-Rx4
**type**\: int
**range:** 0..4294967295
.. attribute:: register_j1_rx5
Register J1\-Rx5
**type**\: int
**range:** 0..4294967295
.. attribute:: register_j1_rx6
Register J1\-Rx6
**type**\: int
**range:** 0..4294967295
.. attribute:: register_j1_rx7
Register J1\-Rx7
**type**\: int
**range:** 0..4294967295
.. attribute:: register_l_bip
Register L\_BIP
**type**\: int
**range:** 0..4294967295
.. attribute:: register_l_fe_bip
Register L\_FE\_BIP
**type**\: int
**range:** 0..4294967295
.. attribute:: register_p_bec
Register P\_BEC
**type**\: int
**range:** 0..4294967295
.. attribute:: register_p_febe
Register P\_FEBE
**type**\: int
**range:** 0..4294967295
.. attribute:: register_s_bip
Register S\_BIP
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_ip
Remote IP Address
**type**\: str
.. attribute:: sd_ber_report
SD\_BER Report
**type**\: :py:class:`WanphyAlarmRepStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper.WanphyAlarmRepStatusEnum>`
.. attribute:: sd_ber_threshold
BER thresholds\: SD. Value 'd' in 10e\-%d
**type**\: int
**range:** 0..4294967295
.. attribute:: section_bip
Section BIP(B1)
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: section_lof
Section LOF
**type**\: int
**range:** 0..4294967295
.. attribute:: section_los
Section LOS
**type**\: int
**range:** 0..4294967295
.. attribute:: sf_ber_report
SF\_BER Report
**type**\: :py:class:`WanphyAlarmRepStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper.WanphyAlarmRepStatusEnum>`
.. attribute:: sf_ber_threshold
BER thresholds\: SF. Value 'd' in 10e\-%d
**type**\: int
**range:** 0..4294967295
.. attribute:: wanphy_poll_timer
wanphy poll timer
**type**\: int
**range:** 0..4294967295
.. attribute:: wis_alarms_feaisp
WIS Alarms FEAISP
**type**\: int
**range:** 0..4294967295
.. attribute:: wis_alarms_felcdp
WIS Alarms FELCDP
**type**\: int
**range:** 0..4294967295
.. attribute:: wis_alarms_lfebip
WIS Alarms LFEBIP
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: wis_alarms_pbec
WIS Alarms PBEC
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: wis_alarms_plcd
WIS Alarms PLCD
**type**\: int
**range:** 0..4294967295
.. attribute:: wis_alarms_plmp
WIS Alarms PLMP
**type**\: int
**range:** 0..4294967295
.. attribute:: wis_alarms_ser
WIS Alarms SER
**type**\: int
**range:** 0..4294967295
.. attribute:: wis_alarms_wlos
WIS Alarms WLOS
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'wanphy-ui-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.admin_mode = None
self.line_ais = None
self.line_bip = None
self.line_febe = None
self.line_rdi = None
self.operational_mode = None
self.path_ais = None
self.path_bip = None
self.path_febe = None
self.path_lop = None
self.path_newptr = None
self.path_nse = None
self.path_pse = None
self.path_rdi = None
self.port_state = None
self.register_j1_rx0 = None
self.register_j1_rx1 = None
self.register_j1_rx2 = None
self.register_j1_rx3 = None
self.register_j1_rx4 = None
self.register_j1_rx5 = None
self.register_j1_rx6 = None
self.register_j1_rx7 = None
self.register_l_bip = None
self.register_l_fe_bip = None
self.register_p_bec = None
self.register_p_febe = None
self.register_s_bip = None
self.remote_ip = None
self.sd_ber_report = None
self.sd_ber_threshold = None
self.section_bip = None
self.section_lof = None
self.section_los = None
self.sf_ber_report = None
self.sf_ber_threshold = None
self.wanphy_poll_timer = None
self.wis_alarms_feaisp = None
self.wis_alarms_felcdp = None
self.wis_alarms_lfebip = None
self.wis_alarms_pbec = None
self.wis_alarms_plcd = None
self.wis_alarms_plmp = None
self.wis_alarms_ser = None
self.wis_alarms_wlos = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-wanphy-ui-oper:info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.admin_mode is not None:
return True
if self.line_ais is not None:
return True
if self.line_bip is not None:
return True
if self.line_febe is not None:
return True
if self.line_rdi is not None:
return True
if self.operational_mode is not None:
return True
if self.path_ais is not None:
return True
if self.path_bip is not None:
return True
if self.path_febe is not None:
return True
if self.path_lop is not None:
return True
if self.path_newptr is not None:
return True
if self.path_nse is not None:
return True
if self.path_pse is not None:
return True
if self.path_rdi is not None:
return True
if self.port_state is not None:
return True
if self.register_j1_rx0 is not None:
return True
if self.register_j1_rx1 is not None:
return True
if self.register_j1_rx2 is not None:
return True
if self.register_j1_rx3 is not None:
return True
if self.register_j1_rx4 is not None:
return True
if self.register_j1_rx5 is not None:
return True
if self.register_j1_rx6 is not None:
return True
if self.register_j1_rx7 is not None:
return True
if self.register_l_bip is not None:
return True
if self.register_l_fe_bip is not None:
return True
if self.register_p_bec is not None:
return True
if self.register_p_febe is not None:
return True
if self.register_s_bip is not None:
return True
if self.remote_ip is not None:
return True
if self.sd_ber_report is not None:
return True
if self.sd_ber_threshold is not None:
return True
if self.section_bip is not None:
return True
if self.section_lof is not None:
return True
if self.section_los is not None:
return True
if self.sf_ber_report is not None:
return True
if self.sf_ber_threshold is not None:
return True
if self.wanphy_poll_timer is not None:
return True
if self.wis_alarms_feaisp is not None:
return True
if self.wis_alarms_felcdp is not None:
return True
if self.wis_alarms_lfebip is not None:
return True
if self.wis_alarms_pbec is not None:
return True
if self.wis_alarms_plcd is not None:
return True
if self.wis_alarms_plmp is not None:
return True
if self.wis_alarms_ser is not None:
return True
if self.wis_alarms_wlos is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wanphy_ui_oper as meta
return meta._meta_table['Wanphy.Controllers.Controller.Info']['meta_info']
@property
def _common_path(self):
if self.controller_name is None:
raise YPYModelError('Key property controller_name is None')
return '/Cisco-IOS-XR-wanphy-ui-oper:wanphy/Cisco-IOS-XR-wanphy-ui-oper:controllers/Cisco-IOS-XR-wanphy-ui-oper:controller[Cisco-IOS-XR-wanphy-ui-oper:controller-name = ' + str(self.controller_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.controller_name is not None:
return True
if self.info is not None and self.info._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wanphy_ui_oper as meta
return meta._meta_table['Wanphy.Controllers.Controller']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-wanphy-ui-oper:wanphy/Cisco-IOS-XR-wanphy-ui-oper:controllers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.controller is not None:
for child_ref in self.controller:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wanphy_ui_oper as meta
return meta._meta_table['Wanphy.Controllers']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-wanphy-ui-oper:wanphy'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.controllers is not None and self.controllers._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wanphy_ui_oper as meta
return meta._meta_table['Wanphy']['meta_info']
| 30.434667
| 293
| 0.423596
| 22,304
| 0.977131
| 0
| 0
| 2,097
| 0.091869
| 0
| 0
| 12,474
| 0.546482
|
b79500b1906f3f8e77f736bcb5d3e403aa5d9a7d
| 773
|
py
|
Python
|
test/unit/solver/test_lamb.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | 23
|
2021-12-08T02:35:01.000Z
|
2022-03-16T02:23:19.000Z
|
test/unit/solver/test_lamb.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | 4
|
2021-12-23T11:31:17.000Z
|
2022-02-28T01:35:31.000Z
|
test/unit/solver/test_lamb.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import numpy as np
import pytest
from megengine.autodiff import GradManager
from basecls.solver.optimizer import LAMB
@pytest.mark.parametrize("weight_decay", [0.0, 0.001])
@pytest.mark.parametrize("betas", [(0.9, 0.999)])
def test_lamb(weight_decay, betas):
w = np.random.rand(4, 8).astype("float32")
x = np.random.rand(8, 2).astype("float32")
lr = 0.1
n = 5
w = mge.Parameter(w)
x = mge.Tensor(x)
gm = GradManager()
gm.attach([w])
lamb = LAMB([w], lr=lr, weight_decay=weight_decay, betas=betas)
for _ in range(n):
with gm:
y = (w @ x).sum()
gm.backward(y)
lamb.step().clear_grad()
| 26.655172
| 67
| 0.633894
| 0
| 0
| 0
| 0
| 545
| 0.705045
| 0
| 0
| 119
| 0.153946
|
b7961cc47beaac8f9cce44564f475f951b32b24b
| 2,180
|
py
|
Python
|
onenote-dump/__main__.py
|
genericmoniker/onenote-dump
|
78b2ee2dcf854f6bc989d3081f42c97f245e49cc
|
[
"MIT"
] | 39
|
2019-07-17T06:08:56.000Z
|
2022-03-22T09:48:58.000Z
|
onenote-dump/__main__.py
|
nicknickel/onenote-dump
|
84607f89f8c14c51796ab660dc0e741af3ccf55c
|
[
"MIT"
] | 5
|
2020-07-02T02:44:38.000Z
|
2021-11-11T18:16:13.000Z
|
onenote-dump/__main__.py
|
nicknickel/onenote-dump
|
84607f89f8c14c51796ab660dc0e741af3ccf55c
|
[
"MIT"
] | 6
|
2020-11-01T12:46:56.000Z
|
2022-01-12T22:24:50.000Z
|
import argparse
import logging
import os
import pathlib
import time
import log
import onenote_auth
import onenote
import pipeline
logger = logging.getLogger()
def main():
args = parse_args()
if args.verbose:
log.setup_logging(logging.DEBUG)
else:
log.setup_logging(logging.INFO)
# Allow a redirect URI over plain HTTP (no TLS):
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
# Authorize the app:
s = onenote_auth.get_session(args.new_session)
output_dir = pathlib.Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
logger.info('Writing to "%s"', output_dir)
start_time = time.perf_counter()
pipe = pipeline.Pipeline(s, args.notebook, output_dir)
pages = 0
try:
for page_count, page in enumerate(
onenote.get_notebook_pages(s, args.notebook), 1
):
log_msg = f'Page {page_count}: {page["title"]}'
if args.start_page is None or page_count >= args.start_page:
logger.info(log_msg)
pipe.add_page(page)
pages += 1
else:
logger.info(log_msg + ' [skipped]')
if args.max_pages and page_count > args.max_pages:
break
except onenote.NotebookNotFound as e:
logger.error(str(e))
pipe.done()
stop_time = time.perf_counter()
logger.info('Done!')
logger.info('%s pages in %.1f seconds', pages, stop_time - start_time)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('notebook', help='display name of notebook to dump')
parser.add_argument('output_dir', help='directory to which to output')
parser.add_argument(
'-m', '--max-pages', type=int, help='max pages to dump'
)
parser.add_argument(
'-s', '--start-page', type=int, help='start page number to dump'
)
parser.add_argument(
'-n',
'--new-session',
action="store_true",
help='ignore saved auth token',
)
parser.add_argument(
'-v', '--verbose', action="store_true", help='show verbose output'
)
return parser.parse_args()
main()
| 27.25
| 76
| 0.624771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 470
| 0.215596
|
b7971aac602f7d60f9d06ecc4d523b5988cef546
| 10,391
|
py
|
Python
|
venv/lib/python3.8/site-packages/ansible_collections/community/aws/tests/unit/plugins/modules/test_data_pipeline.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | 7
|
2021-11-16T04:05:42.000Z
|
2022-02-19T21:14:29.000Z
|
venv/lib/python3.8/site-packages/ansible_collections/community/aws/tests/unit/plugins/modules/test_data_pipeline.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | 1
|
2022-03-12T02:25:26.000Z
|
2022-03-12T02:25:26.000Z
|
venv/lib/python3.8/site-packages/ansible_collections/community/aws/tests/unit/plugins/modules/test_data_pipeline.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | 1
|
2022-03-01T05:43:07.000Z
|
2022-03-01T05:43:07.000Z
|
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import json
import pytest
from ansible.module_utils._text import to_text
# Magic... Incorrectly identified by pylint as unused
from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import
from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import
from ansible_collections.community.aws.plugins.modules import data_pipeline
# test_api_gateway.py requires the `boto3` and `botocore` modules
boto3 = pytest.importorskip('boto3')
@pytest.fixture(scope='module')
def dp_setup():
"""
Yield a FakeModule object, data pipeline id of a vanilla data pipeline, and data pipeline objects
This fixture is module-scoped, since this can be reused for multiple tests.
"""
Dependencies = collections.namedtuple("Dependencies", ["module", "data_pipeline_id", "objects"])
# get objects to use to test populating and activating the data pipeline
if not os.getenv('PLACEBO_RECORD'):
objects = [{"name": "Every 1 day",
"id": "DefaultSchedule",
"fields": []},
{"name": "Default",
"id": "Default",
"fields": []}]
else:
s3 = boto3.client('s3')
data = s3.get_object(Bucket="ansible-test-datapipeline", Key="pipeline-object/new.json")
objects = json.loads(to_text(data['Body'].read()))
# create a module with vanilla data pipeline parameters
params = {'name': 'ansible-test-create-pipeline',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'timeout': 300,
'objects': [],
'tags': {},
'parameters': [],
'values': []}
module = FakeModule(**params)
# yield a module, the data pipeline id, and the data pipeline objects (that are not yet defining the vanilla data pipeline)
if not os.getenv('PLACEBO_RECORD'):
yield Dependencies(module=module, data_pipeline_id='df-0590406117G8DPQZY2HA', objects=objects)
else:
connection = boto3.client('datapipeline')
changed, result = data_pipeline.create_pipeline(connection, module)
data_pipeline_id = result['data_pipeline']['pipeline_id']
yield Dependencies(module=module, data_pipeline_id=data_pipeline_id, objects=objects)
# remove data pipeline
if os.getenv('PLACEBO_RECORD'):
module.params.update(state='absent')
data_pipeline.delete_pipeline(connection, module)
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('FAIL')
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
def test_create_pipeline_already_exists(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.create_pipeline(connection, dp_setup.module)
assert changed is False
assert "Data Pipeline ansible-test-create-pipeline is present" in result['msg']
def test_pipeline_field(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
pipeline_field_info = data_pipeline.pipeline_field(connection, dp_setup.data_pipeline_id, "@pipelineState")
assert pipeline_field_info == "PENDING"
def test_define_pipeline(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.define_pipeline(connection, dp_setup.module, dp_setup.objects, dp_setup.data_pipeline_id)
assert 'has been updated' in result
def test_deactivate_pipeline(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.deactivate_pipeline(connection, dp_setup.module)
assert "Data Pipeline ansible-test-create-pipeline deactivated" in result['msg']
def test_activate_without_population(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
with pytest.raises(Exception) as error_message:
changed, result = data_pipeline.activate_pipeline(connection, dp_setup.module)
assert error_message == "You need to populate your pipeline before activation."
def test_create_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-unittest-create-pipeline',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'timeout': 300,
'tags': {}}
m = FakeModule(**params)
changed, result = data_pipeline.create_pipeline(connection, m)
assert changed is True
assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline created."
data_pipeline.delete_pipeline(connection, m)
def test_create_pipeline_with_tags(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-unittest-create-pipeline_tags',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
changed, result = data_pipeline.create_pipeline(connection, m)
assert changed is True
assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline_tags created."
data_pipeline.delete_pipeline(connection, m)
def test_delete_nonexistent_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-test-nonexistent',
'description': 'ansible-test-nonexistent',
'state': 'absent',
'objects': [],
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
changed, result = data_pipeline.delete_pipeline(connection, m)
assert changed is False
def test_delete_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-test-nonexistent',
'description': 'ansible-test-nonexistent',
'state': 'absent',
'objects': [],
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
data_pipeline.create_pipeline(connection, m)
changed, result = data_pipeline.delete_pipeline(connection, m)
assert changed is True
def test_build_unique_id_different():
m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id'})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id-different'})
assert data_pipeline.build_unique_id(m) != data_pipeline.build_unique_id(m2)
def test_build_unique_id_same():
m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}})
assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2)
def test_build_unique_id_obj():
# check that the object can be different and the unique id should be the same; should be able to modify objects
m = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'first': 'object'}]})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'second': 'object'}]})
assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2)
def test_format_tags():
unformatted_tags = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
formatted_tags = data_pipeline.format_tags(unformatted_tags)
for tag_set in formatted_tags:
assert unformatted_tags[tag_set['key']] == tag_set['value']
def test_format_empty_tags():
unformatted_tags = {}
formatted_tags = data_pipeline.format_tags(unformatted_tags)
assert formatted_tags == []
def test_pipeline_description(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
dp_id = dp_setup.data_pipeline_id
pipelines = data_pipeline.pipeline_description(connection, dp_id)
assert dp_id == pipelines['pipelineDescriptionList'][0]['pipelineId']
def test_pipeline_description_nonexistent(placeboify, maybe_sleep):
hypothetical_pipeline_id = "df-015440025PF7YGLDK47C"
connection = placeboify.client('datapipeline')
with pytest.raises(Exception) as error:
data_pipeline.pipeline_description(connection, hypothetical_pipeline_id)
assert error == data_pipeline.DataPipelineNotFound
def test_check_dp_exists_true(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
exists = data_pipeline.check_dp_exists(connection, dp_setup.data_pipeline_id)
assert exists is True
def test_check_dp_exists_false(placeboify, maybe_sleep):
hypothetical_pipeline_id = "df-015440025PF7YGLDK47C"
connection = placeboify.client('datapipeline')
exists = data_pipeline.check_dp_exists(connection, hypothetical_pipeline_id)
assert exists is False
def test_check_dp_status(placeboify, maybe_sleep, dp_setup):
inactive_states = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
connection = placeboify.client('datapipeline')
state = data_pipeline.check_dp_status(connection, dp_setup.data_pipeline_id, inactive_states)
assert state is True
def test_activate_pipeline(placeboify, maybe_sleep, dp_setup):
# use objects to define pipeline before activating
connection = placeboify.client('datapipeline')
data_pipeline.define_pipeline(connection,
module=dp_setup.module,
objects=dp_setup.objects,
dp_id=dp_setup.data_pipeline_id)
changed, result = data_pipeline.activate_pipeline(connection, dp_setup.module)
assert changed is True
| 40.909449
| 128
| 0.698008
| 334
| 0.032143
| 1,991
| 0.191608
| 2,023
| 0.194688
| 0
| 0
| 3,072
| 0.29564
|
b7977b33c0e4a7ba789fa84d50fd2df4ed4339db
| 3,433
|
py
|
Python
|
Janela/Cadastro_clientes/Clientes.py
|
marcosj046/Treinando-Python-Tkinter
|
7b88a6b6afba46e6c013b65b952723047c173901
|
[
"MIT"
] | null | null | null |
Janela/Cadastro_clientes/Clientes.py
|
marcosj046/Treinando-Python-Tkinter
|
7b88a6b6afba46e6c013b65b952723047c173901
|
[
"MIT"
] | null | null | null |
Janela/Cadastro_clientes/Clientes.py
|
marcosj046/Treinando-Python-Tkinter
|
7b88a6b6afba46e6c013b65b952723047c173901
|
[
"MIT"
] | null | null | null |
import sqlite3
import tkinter as tk
import pandas as pd
#----------------------------------------------
#Para criação do banco de dados retira o comentário (#) da linha 9 à 23 somente a primeira vez que rodar o cód,
#depois, basta comentar novamente.
#Criando o Banco de Dados
# conexao = sqlite3.connect('Clientes.db')
#
# c = conexao.cursor()
#
# c.execute(''' CREATE TABLE clientes (
# Nome text,
# Sobrenome text,
# Email text,
# Telefone text
# )
# ''')
#
# conexao.commit()
# conexao.close()
#-----------------------------------------------
#Criando as funções
def cadastrar_cliente():
conexao = sqlite3.connect('Clientes.db')
c = conexao.cursor()
c.execute("INSERT INTO clientes VALUES (:nome,:sobrenome,:email,:telefone)",
{
'nome': entry_nome.get(),
'sobrenome': entry_sobrenome.get(),
'email': entry_email.get(),
'telefone': entry_telefone.get()
}
)
conexao.commit()
conexao.close()
#criando uma função para limpar a tela após inserir registros
entry_nome.delete(0, "end")
entry_sobrenome.delete(0, "end")
entry_email.delete(0, "end")
entry_telefone.delete(0, "end")
#Criando a função para exportar as informações do banco em formato xlxs
def exportar_cliente():
conexao = sqlite3.connect('Clientes.db')
c = conexao.cursor()
c.execute("SELECT *, oid FROM clientes") #Criando um select da tabela clientes
clientes_cadastrados = c.fetchall() #Onde eu utilizo a estrutura fetchall para retornar todos os dados da mesma
clientes_cadastrados = pd.DataFrame(clientes_cadastrados, columns=['Nome','Sobrenome','Email','Telefone','Id_banco']) #em seguida transformo a variável em um Dataframe
clientes_cadastrados.to_excel('banco_clientes.xlsx') #Para que assim eu possa exportar como Excel
conexao.commit()
conexao.close()
#-----------------------------------------------
janela = tk.Tk() #estartando a janela
janela.title("Cadastro de Clientes") #Inserindo um título na janela
#Criando as Labels:
label_nome = tk.Label(janela, text="Nome")
label_nome.grid(row=0, column=0, padx=10, pady=10)
label_sobrenome = tk.Label(janela, text="Sobrenome")
label_sobrenome.grid(row=1, column=0, padx=10, pady=10)
label_email = tk.Label(janela, text="Email")
label_email.grid(row=2, column=0, padx=10, pady=10)
label_telefone = tk.Label(janela, text="Telefone")
label_telefone.grid(row=3, column=0, padx=10, pady=10)
#-------------------------------------------------------
#Entrys
entry_nome = tk.Entry(janela, text="Nome", width=30)
entry_nome.grid(row=0, column=2, padx=10, pady=10)
entry_sobrenome = tk.Entry(janela, text="Sobrenome", width=30)
entry_sobrenome.grid(row=1, column=2, padx=10, pady=10)
entry_email = tk.Entry(janela, text="Email", width=30)
entry_email.grid(row=2, column=2, padx=10, pady=10)
entry_telefone = tk.Entry(janela, text="Telefone", width=30)
entry_telefone.grid(row=3, column=2, padx=10, pady=10)
#Botões
botao_Cadastrar = tk.Button(janela, text="Cadastrar Cliente", command = cadastrar_cliente)
botao_Cadastrar.grid(row=4, column=0, padx=10, pady=10, columnspan=2, ipadx=80)
botao_exportar = tk.Button(janela, text="Exportar Cliente", command = exportar_cliente)
botao_exportar.grid(row=4, column=2, padx=10, pady=10, columnspan=2, ipadx=80)
#Obs: ipadx=80 - Basicamente serve para alargar uma estrutura especifíca
janela.mainloop()
| 33.990099
| 171
| 0.669677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,491
| 0.432049
|
b79823a97f320a9bd4990bfd614e964f079da76e
| 14,382
|
py
|
Python
|
pygitbucket/client.py
|
SimiCode/pygitbucket
|
46a22fcc0292231905eb4e0e2da47f8a85743bbd
|
[
"MIT"
] | 1
|
2021-07-04T19:45:05.000Z
|
2021-07-04T19:45:05.000Z
|
pygitbucket/client.py
|
SimiCode/pygitbucket
|
46a22fcc0292231905eb4e0e2da47f8a85743bbd
|
[
"MIT"
] | null | null | null |
pygitbucket/client.py
|
SimiCode/pygitbucket
|
46a22fcc0292231905eb4e0e2da47f8a85743bbd
|
[
"MIT"
] | null | null | null |
import math
import requests
from pygitbucket.exceptions import (
UnknownError,
InvalidIDError,
NotFoundIDError,
NotAuthenticatedError,
PermissionError,
)
class Client:
BASE_URL = "https://api.bitbucket.org/"
def __init__(self, user: str, password: str, owner=None):
"""Initial session with user/password, and setup repository owner
Args:
params:
Returns:
"""
self.user = user
self.password = password
user_data = self.get_user()
# for shared repo, set baseURL to owner
if owner is None:
owner = user_data.get("username")
self.username = owner
def get_user(self, params=None):
"""Returns the currently logged in user.
Args:
params:
Returns:
"""
return self._get("2.0/user", params=params)
def get_privileges(self, params=None):
"""Gets a list of all the privilege across all an account's repositories.
If a repository has no individual users with privileges, it does not appear in this list.
Only the repository owner, a team account administrator, or an account with administrative
rights on the repository can make this call. This method has the following parameters:
Args:
params:
Returns:
"""
return self._get(f"1.0/privileges/{self.username}", params=params)
def get_repositories(self, params=None):
"""Returns a paginated list of all repositories owned by the specified account or UUID.
The result can be narrowed down based on the authenticated user's role.
E.g. with ?role=contributor, only those repositories that the authenticated user has write access to are
returned (this includes any repo the user is an admin on, as that implies write access).
This endpoint also supports filtering and sorting of the results. See filtering and sorting for more details.
Args:
params:
Returns:
"""
return self._get(f"2.0/repositories/{self.username}", params=params)
def get_repository(self, repository_slug, params=None):
"""Returns the object describing this repository.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}",
params=params
)
def get_repository_pipelines(self, repository_slug, page=None, params=None):
"""Returns the object describing this repository's pipelines.
Args:
repository_slug:
page: page of the pipelines data
params:
Returns:
"""
page_num = str(page) if page else "1"
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/pipelines/?page={page_num}",
params=params,
)
def get_latest_pipelines(self, repository_slug, params=None):
"""Returns the object describing this repository's latest pipelines.
Args:
repository_slug:
params:
Returns:
"""
default_response = self.get_repository_pipelines(repository_slug)
num_pipelines = default_response["size"]
pages = math.ceil(num_pipelines / 10)
latest_pipelines = (
self.get_repository_pipelines(repository_slug, pages)["values"]
+ self.get_repository_pipelines(repository_slug, pages - 1)["values"]
)
return latest_pipelines
# UNDER TESTING !!
def get_last_pipeline(self, repository_slug, branch=None, params=None):
"""Returns the object describing this repository's latest pipelines.
Args:
repository_slug:
params:
Returns:
"""
default_response = self.get_repository_pipelines(repository_slug)
num_pipelines = default_response["size"]
pages = math.ceil(num_pipelines / 10)
last_pipelines = self.get_repository_pipelines(repository_slug, pages)["values"]
if branch:
last_pipelines = [
value for value in last_pipelines
if value['target']['ref_name'] == branch
]
last_pipelines.sort(key=lambda x: x['created_on'])
return last_pipelines[-1]
def get_repository_branches(self, repository_slug, params=None):
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/refs/branches",
params=params,
)
def get_repository_tags(self, repository_slug, params=None):
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/refs/tags",
params=params,
)
def get_repository_components(self, repository_slug, params=None):
"""Returns the components that have been defined in the issue tracker.
This resource is only available on repositories that have the issue tracker enabled.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/components",
params=params,
)
def get_repository_milestones(self, repository_slug, params=None):
"""Returns the milestones that have been defined in the issue tracker.
This resource is only available on repositories that have the issue tracker enabled.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/milestones",
params=params,
)
def get_repository_versions(self, repository_slug, params=None):
"""Returns the versions that have been defined in the issue tracker.
This resource is only available on repositories that have the issue tracker enabled.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/versions",
params=params,
)
def get_repository_source_code(self, repository_slug, params=None):
"""Returns data about the source code of given repository.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/src",
params=params
)
def get_repository_commit_path_source_code(
self, repository_slug, commit_hash, path, params=None
):
"""Returns source code of given path at specified commit_hash of given repository.
Args:
repository_slug:
commit_hash:
path:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/src/{commit_hash}/{path}",
params=params,
)
def trigger_pipeline(self, repository_slug, branch_name, params=None):
"""Triggers the pipeline for a branch of the repo.
This call requires authentication. Private repositories or private issue trackers require
the caller to authenticate with an account that has appropriate authorisation.
Args:
repository_slug:
branch_name: name of repo branch being deployed
data:
params:
The post data should be in the format:
{
"target": {
"ref_type": "branch",
"type": "pipeline_ref_target",
"ref_name": "branch_name"
}
}
Returns:
"""
data = {
"target": {
"ref_type": "branch",
"type": "pipeline_ref_target",
"ref_name": branch_name,
}
}
return self._post(
f"2.0/repositories/{self.username}/{repository_slug}/pipelines/",
data=data,
params=params,
)
def create_issue(self, repository_slug, title, description="", params=None):
"""Creates a new issue.
This call requires authentication. Private repositories or private issue trackers require
the caller to authenticate with an account that has appropriate authorisation.
The authenticated user is used for the issue's reporter field.
Args:
repository_slug:
data:
params:
The post data should be in the format:
{
"title":"title of the issue",
"content":{
"raw":"this should be the description"
}
}
Returns:
"""
data = {"title": title, "content": {"raw": description}}
return self._post(
f"2.0/repositories/{self.username}/{repository_slug}/issues",
data=data,
params=params,
)
def get_issue(self, repository_slug, issue_id, params=None):
"""Returns the specified issue.
Args:
repository_slug:
issue_id:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/issues/{issue_id}",
params=params,
)
def get_issues(self, repository_slug, params=None):
"""Returns the issues in the issue tracker.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/issues",
params=params
)
def delete_issue(self, repository_slug, issue_id, params=None):
"""Deletes the specified issue. This requires write access to the repository.
Args:
repository_slug:
issue_id:
params:
Returns:
"""
return self._delete(
f"2.0/repositories/{self.username}/{repository_slug}/issues/{issue_id}",
params=params,
)
def create_webhook(self, repository_slug, data, params=None):
"""Creates a new webhook on the specified repository.
Example:
{
"description": "Webhook Description",
"url": "https://example.com/",
"active": true,
"events": [
"repo:push",
"issue:created",
"issue:updated"
]
}
Note that this call requires the webhook scope, as well as any scope that applies to the events
that the webhook subscribes to. In the example above that means: webhook, repository and issue.
Also note that the url must properly resolve and cannot be an internal, non-routed address.
Args:
repository_slug:
data:
params:
Returns:
"""
return self._post(
f"2.0/repositories/{self.username}/{repository_slug}/hooks",
data=data,
params=params,
)
def get_webhook(self, repository_slug, webhook_uid, params=None):
"""Returns the webhook with the specified id installed on the specified repository.
Args:
repository_slug:
webhook_uid:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/hooks/{webhook_uid}",
params=params,
)
def get_webhooks(self, repository_slug, params=None):
"""Returns a paginated list of webhooks installed on this repository.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/hooks",
params=params
)
def delete_webhook(self, repository_slug, webhook_uid, params=None):
"""Deletes the specified webhook subscription from the given repository.
Args:
repository_slug:
webhook_uid:
params:
Returns:
"""
return self._delete(
f"2.0/repositories/{self.username}/{repository_slug}/hooks/{webhook_uid}",
params=params,
)
def _get(self, endpoint, params=None):
response = requests.get(
self.BASE_URL + endpoint, params=params, auth=(self.user, self.password)
)
return self._parse(response)
def _post(self, endpoint, params=None, data=None):
response = requests.post(
self.BASE_URL + endpoint,
params=params,
json=data,
auth=(self.user, self.password),
)
return self._parse(response)
def _put(self, endpoint, params=None, data=None):
response = requests.put(
self.BASE_URL + endpoint,
params=params,
json=data,
auth=(self.user, self.password),
)
return self._parse(response)
def _delete(self, endpoint, params=None):
response = requests.delete(
self.BASE_URL + endpoint, params=params, auth=(self.user, self.password)
)
return self._parse(response)
def _parse(self, response):
status_code = response.status_code
if "application/json" in response.headers["Content-Type"]:
r = response.json()
else:
r = response.text
if status_code in (200, 201):
return r
if status_code == 204:
return None
message = None
try:
if "errorMessages" in r:
message = r["errorMessages"]
except Exception:
message = "No error message."
if status_code == 400:
raise InvalidIDError(message)
if status_code == 401:
raise NotAuthenticatedError(message)
if status_code == 403:
raise PermissionError(message)
if status_code == 404:
raise NotFoundIDError(message)
raise UnknownError(message)
| 28.821643
| 117
| 0.578779
| 14,204
| 0.987623
| 0
| 0
| 0
| 0
| 0
| 0
| 7,741
| 0.538242
|
b79836d2b12f5a44dd3688e75af8cc0da3616913
| 3,982
|
py
|
Python
|
egg/zoo/basic_games/data_readers.py
|
renata-nerenata/EGG
|
b8532efc3569defabeba6851986cecb0c6640984
|
[
"MIT"
] | 1
|
2021-05-26T14:23:25.000Z
|
2021-05-26T14:23:25.000Z
|
egg/zoo/basic_games/data_readers.py
|
renata-nerenata/EGG
|
b8532efc3569defabeba6851986cecb0c6640984
|
[
"MIT"
] | 1
|
2019-10-31T16:21:01.000Z
|
2019-10-31T16:21:01.000Z
|
egg/zoo/basic_games/data_readers.py
|
renata-nerenata/EGG
|
b8532efc3569defabeba6851986cecb0c6640984
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.utils.data import Dataset
import numpy as np
# These input-data-processing classes take input data from a text file and convert them to the format
# appropriate for the recognition and discrimination games, so that they can be read by
# the standard pytorch DataLoader. The latter requires the data reading classes to support
# a __len__(self) method, returning the size of the dataset, and a __getitem__(self, idx)
# method, returning the idx-th item in the dataset. We also provide a get_n_features(self) method,
# returning the dimensionality of the Sender input vector after it is transformed to one-hot format.
# The AttValRecoDataset class is used in the reconstruction game. It takes an input file with a
# space-delimited attribute-value vector per line and creates a data-frame with the two mandatory
# fields expected in EGG games, namely sender_input and labels.
# In this case, the two fields contain the same information, namely the input attribute-value vectors,
# represented as one-hot in sender_input, and in the original integer-based format in
# labels.
class AttValRecoDataset(Dataset):
def __init__(self, path,n_attributes,n_values):
frame = np.loadtxt(path, dtype='S10')
self.frame = []
for row in frame:
if (n_attributes==1):
row = row.split()
config = list(map(int, row))
z=torch.zeros((n_attributes,n_values))
for i in range(n_attributes):
z[i,config[i]]=1
label = torch.tensor(list(map(int, row)))
self.frame.append((z.view(-1),label))
def get_n_features(self):
return self.frame[0][0].size(0)
def __len__(self):
return len(self.frame)
def __getitem__(self, idx):
return self.frame[idx]
# The AttValDiscriDataset class, used in the discrimination game takes an input file with a variable
# number of period-delimited fields, where all fields but the last represent attribute-value vectors
# (with space-delimited attributes). The last field contains the index (counting from 0) of the target
# vector.
# Here, we create a data-frame containing 3 fields: sender_input, labels and receiver_input (these are
# expected by EGG, the first two mandatorily so).
# The sender_input corresponds to the target vector (in one-hot format), labels are the indices of the
# target vector location and receiver_input is a matrix with a row for each input vector (in input order).
class AttValDiscriDataset(Dataset):
def __init__(self, path,n_values):
frame = open(path,'r')
self.frame = []
for row in frame:
raw_info = row.split('.')
index_vectors = list([list(map(int,x.split())) for x in raw_info[:-1]])
target_index = int(raw_info[-1])
target_one_hot = []
for index in index_vectors[target_index]:
current=np.zeros(n_values)
current[index]=1
target_one_hot=np.concatenate((target_one_hot,current))
target_one_hot_tensor = torch.FloatTensor(target_one_hot)
one_hot = []
for index_vector in index_vectors:
for index in index_vector:
current=np.zeros(n_values)
current[index]=1
one_hot=np.concatenate((one_hot,current))
one_hot_sequence = torch.FloatTensor(one_hot).view(len(index_vectors),-1)
label= torch.tensor(target_index)
self.frame.append((target_one_hot_tensor,label,one_hot_sequence))
frame.close()
def get_n_features(self):
return self.frame[0][0].size(0)
def __len__(self):
return len(self.frame)
def __getitem__(self, idx):
return self.frame[idx]
| 45.770115
| 106
| 0.678805
| 2,019
| 0.507032
| 0
| 0
| 0
| 0
| 0
| 0
| 1,875
| 0.470869
|
b798b33e304086ef9c7efe2c716e2e87dfcb993a
| 3,594
|
py
|
Python
|
viroconcom/read_write.py
|
adrdrew/viroconcom
|
3eb748ba8e3e076eddd174a0fcdfee3917aa4045
|
[
"MIT"
] | null | null | null |
viroconcom/read_write.py
|
adrdrew/viroconcom
|
3eb748ba8e3e076eddd174a0fcdfee3917aa4045
|
[
"MIT"
] | 1
|
2020-05-18T11:06:28.000Z
|
2020-05-18T11:06:28.000Z
|
viroconcom/read_write.py
|
adrdrew/viroconcom
|
3eb748ba8e3e076eddd174a0fcdfee3917aa4045
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Reads datasets, reads and writes contour coordinates.
"""
import numpy as np
import csv
def read_ecbenchmark_dataset(path='datasets/1year_dataset_A.txt'):
"""
Reads a 2D dataset that uses a an ASCI format with ';' as a seperator.
This format has been used in the EC benchmark,
see https://github.com/ec-benchmark-organizers/ec-benchmark .
Parameters
----------
path : string
Path to dataset including the file name, defaults
to '../datasets/1year_dataset_A.txt'
Returns
-------
x : ndarray of doubles
Observations of the environmental variable 1.
y : ndarray of doubles
Observations of the environmental variable 2.
x_label : str
Label of the environmantal variable 1.
y_label : str
Label of the environmental variable 2.
"""
x = list()
y = list()
x_label = None
y_label = None
with open(path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
idx = 0
for row in reader:
if idx == 0:
x_label = row[1][1:] # Ignore first char (is a white space).
y_label = row[2][1:] # Ignore first char (is a white space).
if idx > 0: # Ignore the header
x.append(float(row[1]))
y.append(float(row[2]))
idx = idx + 1
x = np.asarray(x)
y = np.asarray(y)
return (x, y, x_label, y_label)
def write_contour(x, y, path, label_x='Variable x (unit)',
label_y='Variable y (unit)'):
"""
Writes 2D contour coordinates in an ASCI format with ';' as a seperator.
Parameters
----------
x : ndarray of doubles
Values in the first dimensions of the contour's coordinates.
y : ndarray of doubles
Values in the second dimensions of the contour's coordinates.
path : string
Path including folder and file name where the contour should be saved.
label_x : str
Name and unit of the first environmental variable,
defaults to 'Variable x (unit), could be, for exmaple,
'Significant wave height (m)'.
label_y : str
Name and unit of the second environmental variable,
defaults to 'Variable y (unit)', could be, for example,
'Zero-up-crossing period (s)'.
"""
with open(path, mode='w', newline='') as contour_file:
contour_writer = csv.writer(contour_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
contour_writer.writerow([label_x, label_y])
for xi,yi in zip(x,y):
contour_writer.writerow([str(xi), str(yi)])
def read_contour(path):
"""
Reads 2D contour coordinates in an ASCI format with ';' as a seperator.
Parameters
----------
path : string
Path to contour including the file name.
Returns
-------
x : ndarray of doubles
Observations of the environmental variable 1.
y : ndarray of doubles
Observations of the environmental variable 2.
"""
x = list()
y = list()
with open(path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
idx = 0
for row in reader:
if idx > 0: # Ignore the header
x.append(float(row[0]))
y.append(float(row[1]))
idx = idx + 1
x = np.asarray(x)
y = np.asarray(y)
return (x, y)
| 31.526316
| 107
| 0.574012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,185
| 0.607958
|
b798e6d01b3ed050802b1fbfc6ef1f267c2dd36a
| 601
|
py
|
Python
|
check_python_install/check_numba.py
|
sdpython/_check_python_install
|
06a34e3b8559370c8c41b655ed2acc72c7f1bf34
|
[
"MIT"
] | null | null | null |
check_python_install/check_numba.py
|
sdpython/_check_python_install
|
06a34e3b8559370c8c41b655ed2acc72c7f1bf34
|
[
"MIT"
] | 2
|
2019-11-07T18:11:18.000Z
|
2021-01-02T11:49:38.000Z
|
check_python_install/check_numba.py
|
sdpython/_check_python_install
|
06a34e3b8559370c8c41b655ed2acc72c7f1bf34
|
[
"MIT"
] | null | null | null |
"""
@file
@brief Test for :epkg:`cartopy`.
"""
import numpy
import numba
@numba.jit(nopython=True, parallel=True)
def logistic_regression(Y, X, w, iterations):
"Fits a logistic regression."
for _ in range(iterations):
w -= numpy.dot(((1.0 / (1.0 + numpy.exp(-Y * numpy.dot(X, w))) - 1.0) * Y), X)
return w
def check_numba():
"""
Runs a sample with :epkg:`numba`.
"""
Y = numpy.random.rand(10).astype(numpy.double)
X = numpy.random.rand(10, 2).astype(numpy.double)
w = numpy.random.rand(2).astype(numpy.double)
return logistic_regression(Y, X, w, 2)
| 24.04
| 86
| 0.62396
| 0
| 0
| 0
| 0
| 252
| 0.419301
| 0
| 0
| 124
| 0.206323
|
b799b353c37ba22ae18a337e8553a686eb6d2908
| 793
|
py
|
Python
|
app/utils/to_file.py
|
MichelHanzenScheeren/SpectralClustering
|
be0a2645b965442d57c1fb016556d3003921f84a
|
[
"MIT"
] | null | null | null |
app/utils/to_file.py
|
MichelHanzenScheeren/SpectralClustering
|
be0a2645b965442d57c1fb016556d3003921f84a
|
[
"MIT"
] | null | null | null |
app/utils/to_file.py
|
MichelHanzenScheeren/SpectralClustering
|
be0a2645b965442d57c1fb016556d3003921f84a
|
[
"MIT"
] | null | null | null |
class ToFile:
""" Classe que recebe uma instância de dados e os salva em um arquivo de saída. """
def __init__(self, path, data):
maxs = [len(x) + 4 for x in data.legend]
with open(path, 'w') as file:
file.write('id' + (' ' * 6))
for index, element in enumerate(data.legend):
file.write(element + (' ' * (maxs[index] - len(element))))
file.write('group')
file.write('\n')
for index, element in enumerate(data.values):
spaces = ' ' * (8 - len(f'{index + 1}'))
file.write(f'{index + 1}{spaces}')
for i, e in enumerate(element):
spaces = (' ' * (maxs[i] - len(f'{e}')))
file.write(f'{e}{spaces}')
if len(data.groups) > 0:
file.write(f'{data.groups[index]}')
file.write('\n')
| 37.761905
| 85
| 0.538462
| 794
| 0.998742
| 0
| 0
| 0
| 0
| 0
| 0
| 198
| 0.249057
|
b799cded976768ac0fb3f24b3d043843412ec29f
| 4,056
|
py
|
Python
|
Round 3/fence_design.py
|
e-ntro-py/GoogleCodeJam-2021
|
c42283480fa20a853c6d31d5faf0e83c6ad0f5f7
|
[
"MIT"
] | 30
|
2021-03-27T20:18:15.000Z
|
2022-03-19T06:18:58.000Z
|
Round 3/fence_design.py
|
e-ntro-py/GoogleCodeJam-2021
|
c42283480fa20a853c6d31d5faf0e83c6ad0f5f7
|
[
"MIT"
] | 1
|
2021-05-24T19:14:29.000Z
|
2021-05-25T04:14:10.000Z
|
Round 3/fence_design.py
|
e-ntro-py/GoogleCodeJam-2021
|
c42283480fa20a853c6d31d5faf0e83c6ad0f5f7
|
[
"MIT"
] | 7
|
2021-03-28T12:38:55.000Z
|
2021-09-19T15:30:39.000Z
|
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Round 3 - Problem C. Fence Design
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000436142/0000000000813bc7
#
# Time: O(NlogN) on average, pass in PyPy2 but Python2
# Space: O(N)
#
from random import seed, randint
# Compute the cross product of vectors AB and AC
CW, COLLINEAR, CCW = range(-1, 2)
def ccw(A, B, C):
area = (B[0]-A[0])*(C[1]-A[1]) - (B[1]-A[1])*(C[0]-A[0])
return CCW if area > 0 else CW if area < 0 else COLLINEAR
def same_side(A, B, C, D):
return ccw(A,C,D) == 0 or ccw(B,C,D) == 0 or ccw(A,C,D) == ccw(B,C,D)
def rotate(hull, split):
for i in xrange(len(hull)):
if hull[i] in split and hull[(i-1)%len(hull)] in split:
return hull[i:]+hull[:i]
return hull[:]
def add_result(result, x):
result.add(tuple(sorted(x)))
def add_triangle(P, left_ccw, right_cw, result, lookup):
p, q = 0, 1
while True:
p1 = (p+1)%len(left_ccw)
if ccw(P[left_ccw[p1]], P[left_ccw[p]], P[right_cw[q]]) == CCW:
add_result(result, [left_ccw[p1], right_cw[q]])
lookup.add(left_ccw[p]) # inside the convex hull
p = p1
continue
q1 = (q+1)%len(right_cw)
if ccw(P[left_ccw[p]], P[right_cw[q]], P[right_cw[q1]]) == CCW:
add_result(result, [right_cw[q1], left_ccw[p]])
lookup.add(right_cw[q]) # inside the convex hull
q = q1
continue
break
def conquer(P, left, right, split, result): # Time: O(N)
if len(left) == 2:
return right
if len(right) == 2:
return left
lookup = set()
left_ccw, right_cw = rotate(left, split), rotate(right[::-1], split)
add_triangle(P, left_ccw, right_cw, result, lookup)
right_ccw, left_cw = rotate(right, split), rotate(left[::-1], split)
add_triangle(P, right_ccw, left_cw, result, lookup)
return [x for x in left_ccw if x not in lookup] + \
[x for x in right_ccw[1:-1] if x not in lookup]
def divide(P, f, curr, split, result): # depth at most O(logN) on average => Time: O(NlogN)
if len(curr) == 2:
return curr
if len(curr) == 3: # terminal case
p = next(p for p in curr if p not in split)
for x in split:
add_result(result, [p, x])
return [p, split[0], split[1]] if ccw(P[p], P[split[0]], P[split[1]]) == CCW else [p, split[1], split[0]]
if f: # prefer to use pre-placed fence
new_split = f.pop()
else:
while True:
idx = randint(0, len(curr)-1)
p = curr[idx]
curr[idx], curr[-1] = curr[-1], curr[idx]
q = curr[randint(0, len(curr)-2)]
if p > q:
p, q = q, p
if (p, q) not in result:
break
new_split = (p, q)
add_result(result, new_split)
left = [x for x in curr if ccw(P[new_split[0]], P[new_split[1]], P[x]) != CCW]
right = [x for x in curr if ccw(P[new_split[0]], P[new_split[1]], P[x]) != CW]
return conquer(P,
divide(P, f if f and f[-1][0] in left and f[-1][1] in left else [], left, new_split, result),
divide(P, f if f and f[-1][0] in right and f[-1][1] in right else [], right, new_split, result),
new_split, result)
def fence_design():
N = input()
P = [map(int, raw_input().strip().split()) for _ in xrange(N)]
f = [map(lambda x: int(x)-1, raw_input().strip().split()) for _ in xrange(2)]
f = [tuple(sorted(x)) for x in f]
if not same_side(P[f[0][0]], P[f[0][1]], P[f[1][0]], P[f[1][1]]):
# make sure f[0] will be on the same side of f[1]
f[0], f[1] = f[1], f[0]
result = set()
hull = divide(P, f[:], range(len(P)), [], result)
assert(len(result) == 3*N-3-len(hull))
return "%s\n"%(len(result)-2)+"\n".join("%s %s"%(x[0]+1, x[1]+1) for x in [x for x in result if x not in f])
seed(0)
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, fence_design())
| 38.264151
| 115
| 0.554487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 553
| 0.136341
|
b79b05e86bdffa34c0cb47c7868d179ea550149c
| 50,505
|
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/_block.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1
|
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/_block.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1
|
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/_block.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2
|
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('__block', [dirname(__file__)])
except ImportError:
import __block
return __block
if fp is not None:
try:
_mod = imp.load_module('__block', fp, pathname, description)
finally:
fp.close()
return _mod
__block = swig_import_helper()
del swig_import_helper
else:
import __block
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def gsl_vector_set_zero(*args, **kwargs):
return __block.gsl_vector_set_zero(*args, **kwargs)
gsl_vector_set_zero = __block.gsl_vector_set_zero
def gsl_vector_set_all(*args, **kwargs):
return __block.gsl_vector_set_all(*args, **kwargs)
gsl_vector_set_all = __block.gsl_vector_set_all
def gsl_vector_set_basis(*args, **kwargs):
return __block.gsl_vector_set_basis(*args, **kwargs)
gsl_vector_set_basis = __block.gsl_vector_set_basis
def gsl_vector_fread(*args, **kwargs):
return __block.gsl_vector_fread(*args, **kwargs)
gsl_vector_fread = __block.gsl_vector_fread
def gsl_vector_fwrite(*args, **kwargs):
return __block.gsl_vector_fwrite(*args, **kwargs)
gsl_vector_fwrite = __block.gsl_vector_fwrite
def gsl_vector_fscanf(*args, **kwargs):
return __block.gsl_vector_fscanf(*args, **kwargs)
gsl_vector_fscanf = __block.gsl_vector_fscanf
def gsl_vector_fprintf(*args, **kwargs):
return __block.gsl_vector_fprintf(*args, **kwargs)
gsl_vector_fprintf = __block.gsl_vector_fprintf
def gsl_vector_reverse(*args, **kwargs):
return __block.gsl_vector_reverse(*args, **kwargs)
gsl_vector_reverse = __block.gsl_vector_reverse
def gsl_vector_swap(*args, **kwargs):
return __block.gsl_vector_swap(*args, **kwargs)
gsl_vector_swap = __block.gsl_vector_swap
def gsl_vector_swap_elements(*args, **kwargs):
return __block.gsl_vector_swap_elements(*args, **kwargs)
gsl_vector_swap_elements = __block.gsl_vector_swap_elements
def gsl_vector_max(*args, **kwargs):
return __block.gsl_vector_max(*args, **kwargs)
gsl_vector_max = __block.gsl_vector_max
def gsl_vector_min(*args, **kwargs):
return __block.gsl_vector_min(*args, **kwargs)
gsl_vector_min = __block.gsl_vector_min
def gsl_vector_minmax(*args, **kwargs):
return __block.gsl_vector_minmax(*args, **kwargs)
gsl_vector_minmax = __block.gsl_vector_minmax
def gsl_vector_max_index(*args, **kwargs):
return __block.gsl_vector_max_index(*args, **kwargs)
gsl_vector_max_index = __block.gsl_vector_max_index
def gsl_vector_min_index(*args, **kwargs):
return __block.gsl_vector_min_index(*args, **kwargs)
gsl_vector_min_index = __block.gsl_vector_min_index
def gsl_vector_minmax_index(*args, **kwargs):
return __block.gsl_vector_minmax_index(*args, **kwargs)
gsl_vector_minmax_index = __block.gsl_vector_minmax_index
def gsl_vector_isnull(*args, **kwargs):
return __block.gsl_vector_isnull(*args, **kwargs)
gsl_vector_isnull = __block.gsl_vector_isnull
def gsl_matrix_set_zero(*args, **kwargs):
return __block.gsl_matrix_set_zero(*args, **kwargs)
gsl_matrix_set_zero = __block.gsl_matrix_set_zero
def gsl_matrix_set_all(*args, **kwargs):
return __block.gsl_matrix_set_all(*args, **kwargs)
gsl_matrix_set_all = __block.gsl_matrix_set_all
def gsl_matrix_set_identity(*args, **kwargs):
return __block.gsl_matrix_set_identity(*args, **kwargs)
gsl_matrix_set_identity = __block.gsl_matrix_set_identity
def gsl_matrix_fread(*args, **kwargs):
return __block.gsl_matrix_fread(*args, **kwargs)
gsl_matrix_fread = __block.gsl_matrix_fread
def gsl_matrix_fwrite(*args, **kwargs):
return __block.gsl_matrix_fwrite(*args, **kwargs)
gsl_matrix_fwrite = __block.gsl_matrix_fwrite
def gsl_matrix_fscanf(*args, **kwargs):
return __block.gsl_matrix_fscanf(*args, **kwargs)
gsl_matrix_fscanf = __block.gsl_matrix_fscanf
def gsl_matrix_fprintf(*args, **kwargs):
return __block.gsl_matrix_fprintf(*args, **kwargs)
gsl_matrix_fprintf = __block.gsl_matrix_fprintf
def gsl_matrix_swap(*args, **kwargs):
return __block.gsl_matrix_swap(*args, **kwargs)
gsl_matrix_swap = __block.gsl_matrix_swap
def gsl_matrix_swap_rows(*args, **kwargs):
return __block.gsl_matrix_swap_rows(*args, **kwargs)
gsl_matrix_swap_rows = __block.gsl_matrix_swap_rows
def gsl_matrix_swap_columns(*args, **kwargs):
return __block.gsl_matrix_swap_columns(*args, **kwargs)
gsl_matrix_swap_columns = __block.gsl_matrix_swap_columns
def gsl_matrix_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_swap_rowcol(*args, **kwargs)
gsl_matrix_swap_rowcol = __block.gsl_matrix_swap_rowcol
def gsl_matrix_transpose(*args, **kwargs):
return __block.gsl_matrix_transpose(*args, **kwargs)
gsl_matrix_transpose = __block.gsl_matrix_transpose
def gsl_matrix_max(*args, **kwargs):
return __block.gsl_matrix_max(*args, **kwargs)
gsl_matrix_max = __block.gsl_matrix_max
def gsl_matrix_min(*args, **kwargs):
return __block.gsl_matrix_min(*args, **kwargs)
gsl_matrix_min = __block.gsl_matrix_min
def gsl_matrix_minmax(*args, **kwargs):
return __block.gsl_matrix_minmax(*args, **kwargs)
gsl_matrix_minmax = __block.gsl_matrix_minmax
def gsl_matrix_max_index(*args, **kwargs):
return __block.gsl_matrix_max_index(*args, **kwargs)
gsl_matrix_max_index = __block.gsl_matrix_max_index
def gsl_matrix_min_index(*args, **kwargs):
return __block.gsl_matrix_min_index(*args, **kwargs)
gsl_matrix_min_index = __block.gsl_matrix_min_index
def gsl_matrix_minmax_index(*args, **kwargs):
return __block.gsl_matrix_minmax_index(*args, **kwargs)
gsl_matrix_minmax_index = __block.gsl_matrix_minmax_index
def gsl_matrix_isnull(*args, **kwargs):
return __block.gsl_matrix_isnull(*args, **kwargs)
gsl_matrix_isnull = __block.gsl_matrix_isnull
def gsl_matrix_diagonal(*args, **kwargs):
return __block.gsl_matrix_diagonal(*args, **kwargs)
gsl_matrix_diagonal = __block.gsl_matrix_diagonal
def gsl_matrix_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_subdiagonal(*args, **kwargs)
gsl_matrix_subdiagonal = __block.gsl_matrix_subdiagonal
def gsl_matrix_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_superdiagonal(*args, **kwargs)
gsl_matrix_superdiagonal = __block.gsl_matrix_superdiagonal
def gsl_vector_float_set_zero(*args, **kwargs):
return __block.gsl_vector_float_set_zero(*args, **kwargs)
gsl_vector_float_set_zero = __block.gsl_vector_float_set_zero
def gsl_vector_float_set_all(*args, **kwargs):
return __block.gsl_vector_float_set_all(*args, **kwargs)
gsl_vector_float_set_all = __block.gsl_vector_float_set_all
def gsl_vector_float_set_basis(*args, **kwargs):
return __block.gsl_vector_float_set_basis(*args, **kwargs)
gsl_vector_float_set_basis = __block.gsl_vector_float_set_basis
def gsl_vector_float_fread(*args, **kwargs):
return __block.gsl_vector_float_fread(*args, **kwargs)
gsl_vector_float_fread = __block.gsl_vector_float_fread
def gsl_vector_float_fwrite(*args, **kwargs):
return __block.gsl_vector_float_fwrite(*args, **kwargs)
gsl_vector_float_fwrite = __block.gsl_vector_float_fwrite
def gsl_vector_float_fscanf(*args, **kwargs):
return __block.gsl_vector_float_fscanf(*args, **kwargs)
gsl_vector_float_fscanf = __block.gsl_vector_float_fscanf
def gsl_vector_float_fprintf(*args, **kwargs):
return __block.gsl_vector_float_fprintf(*args, **kwargs)
gsl_vector_float_fprintf = __block.gsl_vector_float_fprintf
def gsl_vector_float_reverse(*args, **kwargs):
return __block.gsl_vector_float_reverse(*args, **kwargs)
gsl_vector_float_reverse = __block.gsl_vector_float_reverse
def gsl_vector_float_swap(*args, **kwargs):
return __block.gsl_vector_float_swap(*args, **kwargs)
gsl_vector_float_swap = __block.gsl_vector_float_swap
def gsl_vector_float_swap_elements(*args, **kwargs):
return __block.gsl_vector_float_swap_elements(*args, **kwargs)
gsl_vector_float_swap_elements = __block.gsl_vector_float_swap_elements
def gsl_vector_float_max(*args, **kwargs):
return __block.gsl_vector_float_max(*args, **kwargs)
gsl_vector_float_max = __block.gsl_vector_float_max
def gsl_vector_float_min(*args, **kwargs):
return __block.gsl_vector_float_min(*args, **kwargs)
gsl_vector_float_min = __block.gsl_vector_float_min
def gsl_vector_float_minmax(*args, **kwargs):
return __block.gsl_vector_float_minmax(*args, **kwargs)
gsl_vector_float_minmax = __block.gsl_vector_float_minmax
def gsl_vector_float_max_index(*args, **kwargs):
return __block.gsl_vector_float_max_index(*args, **kwargs)
gsl_vector_float_max_index = __block.gsl_vector_float_max_index
def gsl_vector_float_min_index(*args, **kwargs):
return __block.gsl_vector_float_min_index(*args, **kwargs)
gsl_vector_float_min_index = __block.gsl_vector_float_min_index
def gsl_vector_float_minmax_index(*args, **kwargs):
return __block.gsl_vector_float_minmax_index(*args, **kwargs)
gsl_vector_float_minmax_index = __block.gsl_vector_float_minmax_index
def gsl_vector_float_isnull(*args, **kwargs):
return __block.gsl_vector_float_isnull(*args, **kwargs)
gsl_vector_float_isnull = __block.gsl_vector_float_isnull
def gsl_matrix_float_set_zero(*args, **kwargs):
return __block.gsl_matrix_float_set_zero(*args, **kwargs)
gsl_matrix_float_set_zero = __block.gsl_matrix_float_set_zero
def gsl_matrix_float_set_all(*args, **kwargs):
return __block.gsl_matrix_float_set_all(*args, **kwargs)
gsl_matrix_float_set_all = __block.gsl_matrix_float_set_all
def gsl_matrix_float_set_identity(*args, **kwargs):
return __block.gsl_matrix_float_set_identity(*args, **kwargs)
gsl_matrix_float_set_identity = __block.gsl_matrix_float_set_identity
def gsl_matrix_float_fread(*args, **kwargs):
return __block.gsl_matrix_float_fread(*args, **kwargs)
gsl_matrix_float_fread = __block.gsl_matrix_float_fread
def gsl_matrix_float_fwrite(*args, **kwargs):
return __block.gsl_matrix_float_fwrite(*args, **kwargs)
gsl_matrix_float_fwrite = __block.gsl_matrix_float_fwrite
def gsl_matrix_float_fscanf(*args, **kwargs):
return __block.gsl_matrix_float_fscanf(*args, **kwargs)
gsl_matrix_float_fscanf = __block.gsl_matrix_float_fscanf
def gsl_matrix_float_fprintf(*args, **kwargs):
return __block.gsl_matrix_float_fprintf(*args, **kwargs)
gsl_matrix_float_fprintf = __block.gsl_matrix_float_fprintf
def gsl_matrix_float_swap(*args, **kwargs):
return __block.gsl_matrix_float_swap(*args, **kwargs)
gsl_matrix_float_swap = __block.gsl_matrix_float_swap
def gsl_matrix_float_swap_rows(*args, **kwargs):
return __block.gsl_matrix_float_swap_rows(*args, **kwargs)
gsl_matrix_float_swap_rows = __block.gsl_matrix_float_swap_rows
def gsl_matrix_float_swap_columns(*args, **kwargs):
return __block.gsl_matrix_float_swap_columns(*args, **kwargs)
gsl_matrix_float_swap_columns = __block.gsl_matrix_float_swap_columns
def gsl_matrix_float_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_float_swap_rowcol(*args, **kwargs)
gsl_matrix_float_swap_rowcol = __block.gsl_matrix_float_swap_rowcol
def gsl_matrix_float_transpose(*args, **kwargs):
return __block.gsl_matrix_float_transpose(*args, **kwargs)
gsl_matrix_float_transpose = __block.gsl_matrix_float_transpose
def gsl_matrix_float_max(*args, **kwargs):
return __block.gsl_matrix_float_max(*args, **kwargs)
gsl_matrix_float_max = __block.gsl_matrix_float_max
def gsl_matrix_float_min(*args, **kwargs):
return __block.gsl_matrix_float_min(*args, **kwargs)
gsl_matrix_float_min = __block.gsl_matrix_float_min
def gsl_matrix_float_minmax(*args, **kwargs):
return __block.gsl_matrix_float_minmax(*args, **kwargs)
gsl_matrix_float_minmax = __block.gsl_matrix_float_minmax
def gsl_matrix_float_max_index(*args, **kwargs):
return __block.gsl_matrix_float_max_index(*args, **kwargs)
gsl_matrix_float_max_index = __block.gsl_matrix_float_max_index
def gsl_matrix_float_min_index(*args, **kwargs):
return __block.gsl_matrix_float_min_index(*args, **kwargs)
gsl_matrix_float_min_index = __block.gsl_matrix_float_min_index
def gsl_matrix_float_minmax_index(*args, **kwargs):
return __block.gsl_matrix_float_minmax_index(*args, **kwargs)
gsl_matrix_float_minmax_index = __block.gsl_matrix_float_minmax_index
def gsl_matrix_float_isnull(*args, **kwargs):
return __block.gsl_matrix_float_isnull(*args, **kwargs)
gsl_matrix_float_isnull = __block.gsl_matrix_float_isnull
def gsl_matrix_float_diagonal(*args, **kwargs):
return __block.gsl_matrix_float_diagonal(*args, **kwargs)
gsl_matrix_float_diagonal = __block.gsl_matrix_float_diagonal
def gsl_matrix_float_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_float_subdiagonal(*args, **kwargs)
gsl_matrix_float_subdiagonal = __block.gsl_matrix_float_subdiagonal
def gsl_matrix_float_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_float_superdiagonal(*args, **kwargs)
gsl_matrix_float_superdiagonal = __block.gsl_matrix_float_superdiagonal
def gsl_vector_long_set_zero(*args, **kwargs):
return __block.gsl_vector_long_set_zero(*args, **kwargs)
gsl_vector_long_set_zero = __block.gsl_vector_long_set_zero
def gsl_vector_long_set_all(*args, **kwargs):
return __block.gsl_vector_long_set_all(*args, **kwargs)
gsl_vector_long_set_all = __block.gsl_vector_long_set_all
def gsl_vector_long_set_basis(*args, **kwargs):
return __block.gsl_vector_long_set_basis(*args, **kwargs)
gsl_vector_long_set_basis = __block.gsl_vector_long_set_basis
def gsl_vector_long_fread(*args, **kwargs):
return __block.gsl_vector_long_fread(*args, **kwargs)
gsl_vector_long_fread = __block.gsl_vector_long_fread
def gsl_vector_long_fwrite(*args, **kwargs):
return __block.gsl_vector_long_fwrite(*args, **kwargs)
gsl_vector_long_fwrite = __block.gsl_vector_long_fwrite
def gsl_vector_long_fscanf(*args, **kwargs):
return __block.gsl_vector_long_fscanf(*args, **kwargs)
gsl_vector_long_fscanf = __block.gsl_vector_long_fscanf
def gsl_vector_long_fprintf(*args, **kwargs):
return __block.gsl_vector_long_fprintf(*args, **kwargs)
gsl_vector_long_fprintf = __block.gsl_vector_long_fprintf
def gsl_vector_long_reverse(*args, **kwargs):
return __block.gsl_vector_long_reverse(*args, **kwargs)
gsl_vector_long_reverse = __block.gsl_vector_long_reverse
def gsl_vector_long_swap(*args, **kwargs):
return __block.gsl_vector_long_swap(*args, **kwargs)
gsl_vector_long_swap = __block.gsl_vector_long_swap
def gsl_vector_long_swap_elements(*args, **kwargs):
return __block.gsl_vector_long_swap_elements(*args, **kwargs)
gsl_vector_long_swap_elements = __block.gsl_vector_long_swap_elements
def gsl_vector_long_max(*args, **kwargs):
return __block.gsl_vector_long_max(*args, **kwargs)
gsl_vector_long_max = __block.gsl_vector_long_max
def gsl_vector_long_min(*args, **kwargs):
return __block.gsl_vector_long_min(*args, **kwargs)
gsl_vector_long_min = __block.gsl_vector_long_min
def gsl_vector_long_minmax(*args, **kwargs):
return __block.gsl_vector_long_minmax(*args, **kwargs)
gsl_vector_long_minmax = __block.gsl_vector_long_minmax
def gsl_vector_long_max_index(*args, **kwargs):
return __block.gsl_vector_long_max_index(*args, **kwargs)
gsl_vector_long_max_index = __block.gsl_vector_long_max_index
def gsl_vector_long_min_index(*args, **kwargs):
return __block.gsl_vector_long_min_index(*args, **kwargs)
gsl_vector_long_min_index = __block.gsl_vector_long_min_index
def gsl_vector_long_minmax_index(*args, **kwargs):
return __block.gsl_vector_long_minmax_index(*args, **kwargs)
gsl_vector_long_minmax_index = __block.gsl_vector_long_minmax_index
def gsl_vector_long_isnull(*args, **kwargs):
return __block.gsl_vector_long_isnull(*args, **kwargs)
gsl_vector_long_isnull = __block.gsl_vector_long_isnull
def gsl_matrix_long_set_zero(*args, **kwargs):
return __block.gsl_matrix_long_set_zero(*args, **kwargs)
gsl_matrix_long_set_zero = __block.gsl_matrix_long_set_zero
def gsl_matrix_long_set_all(*args, **kwargs):
return __block.gsl_matrix_long_set_all(*args, **kwargs)
gsl_matrix_long_set_all = __block.gsl_matrix_long_set_all
def gsl_matrix_long_set_identity(*args, **kwargs):
return __block.gsl_matrix_long_set_identity(*args, **kwargs)
gsl_matrix_long_set_identity = __block.gsl_matrix_long_set_identity
def gsl_matrix_long_fread(*args, **kwargs):
return __block.gsl_matrix_long_fread(*args, **kwargs)
gsl_matrix_long_fread = __block.gsl_matrix_long_fread
def gsl_matrix_long_fwrite(*args, **kwargs):
return __block.gsl_matrix_long_fwrite(*args, **kwargs)
gsl_matrix_long_fwrite = __block.gsl_matrix_long_fwrite
def gsl_matrix_long_fscanf(*args, **kwargs):
return __block.gsl_matrix_long_fscanf(*args, **kwargs)
gsl_matrix_long_fscanf = __block.gsl_matrix_long_fscanf
def gsl_matrix_long_fprintf(*args, **kwargs):
return __block.gsl_matrix_long_fprintf(*args, **kwargs)
gsl_matrix_long_fprintf = __block.gsl_matrix_long_fprintf
def gsl_matrix_long_swap(*args, **kwargs):
return __block.gsl_matrix_long_swap(*args, **kwargs)
gsl_matrix_long_swap = __block.gsl_matrix_long_swap
def gsl_matrix_long_swap_rows(*args, **kwargs):
return __block.gsl_matrix_long_swap_rows(*args, **kwargs)
gsl_matrix_long_swap_rows = __block.gsl_matrix_long_swap_rows
def gsl_matrix_long_swap_columns(*args, **kwargs):
return __block.gsl_matrix_long_swap_columns(*args, **kwargs)
gsl_matrix_long_swap_columns = __block.gsl_matrix_long_swap_columns
def gsl_matrix_long_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_long_swap_rowcol(*args, **kwargs)
gsl_matrix_long_swap_rowcol = __block.gsl_matrix_long_swap_rowcol
def gsl_matrix_long_transpose(*args, **kwargs):
return __block.gsl_matrix_long_transpose(*args, **kwargs)
gsl_matrix_long_transpose = __block.gsl_matrix_long_transpose
def gsl_matrix_long_max(*args, **kwargs):
return __block.gsl_matrix_long_max(*args, **kwargs)
gsl_matrix_long_max = __block.gsl_matrix_long_max
def gsl_matrix_long_min(*args, **kwargs):
return __block.gsl_matrix_long_min(*args, **kwargs)
gsl_matrix_long_min = __block.gsl_matrix_long_min
def gsl_matrix_long_minmax(*args, **kwargs):
return __block.gsl_matrix_long_minmax(*args, **kwargs)
gsl_matrix_long_minmax = __block.gsl_matrix_long_minmax
def gsl_matrix_long_max_index(*args, **kwargs):
return __block.gsl_matrix_long_max_index(*args, **kwargs)
gsl_matrix_long_max_index = __block.gsl_matrix_long_max_index
def gsl_matrix_long_min_index(*args, **kwargs):
return __block.gsl_matrix_long_min_index(*args, **kwargs)
gsl_matrix_long_min_index = __block.gsl_matrix_long_min_index
def gsl_matrix_long_minmax_index(*args, **kwargs):
return __block.gsl_matrix_long_minmax_index(*args, **kwargs)
gsl_matrix_long_minmax_index = __block.gsl_matrix_long_minmax_index
def gsl_matrix_long_isnull(*args, **kwargs):
return __block.gsl_matrix_long_isnull(*args, **kwargs)
gsl_matrix_long_isnull = __block.gsl_matrix_long_isnull
def gsl_matrix_long_diagonal(*args, **kwargs):
return __block.gsl_matrix_long_diagonal(*args, **kwargs)
gsl_matrix_long_diagonal = __block.gsl_matrix_long_diagonal
def gsl_matrix_long_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_long_subdiagonal(*args, **kwargs)
gsl_matrix_long_subdiagonal = __block.gsl_matrix_long_subdiagonal
def gsl_matrix_long_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_long_superdiagonal(*args, **kwargs)
gsl_matrix_long_superdiagonal = __block.gsl_matrix_long_superdiagonal
def gsl_vector_int_set_zero(*args, **kwargs):
return __block.gsl_vector_int_set_zero(*args, **kwargs)
gsl_vector_int_set_zero = __block.gsl_vector_int_set_zero
def gsl_vector_int_set_all(*args, **kwargs):
return __block.gsl_vector_int_set_all(*args, **kwargs)
gsl_vector_int_set_all = __block.gsl_vector_int_set_all
def gsl_vector_int_set_basis(*args, **kwargs):
return __block.gsl_vector_int_set_basis(*args, **kwargs)
gsl_vector_int_set_basis = __block.gsl_vector_int_set_basis
def gsl_vector_int_fread(*args, **kwargs):
return __block.gsl_vector_int_fread(*args, **kwargs)
gsl_vector_int_fread = __block.gsl_vector_int_fread
def gsl_vector_int_fwrite(*args, **kwargs):
return __block.gsl_vector_int_fwrite(*args, **kwargs)
gsl_vector_int_fwrite = __block.gsl_vector_int_fwrite
def gsl_vector_int_fscanf(*args, **kwargs):
return __block.gsl_vector_int_fscanf(*args, **kwargs)
gsl_vector_int_fscanf = __block.gsl_vector_int_fscanf
def gsl_vector_int_fprintf(*args, **kwargs):
return __block.gsl_vector_int_fprintf(*args, **kwargs)
gsl_vector_int_fprintf = __block.gsl_vector_int_fprintf
def gsl_vector_int_reverse(*args, **kwargs):
return __block.gsl_vector_int_reverse(*args, **kwargs)
gsl_vector_int_reverse = __block.gsl_vector_int_reverse
def gsl_vector_int_swap(*args, **kwargs):
return __block.gsl_vector_int_swap(*args, **kwargs)
gsl_vector_int_swap = __block.gsl_vector_int_swap
def gsl_vector_int_swap_elements(*args, **kwargs):
return __block.gsl_vector_int_swap_elements(*args, **kwargs)
gsl_vector_int_swap_elements = __block.gsl_vector_int_swap_elements
def gsl_vector_int_max(*args, **kwargs):
return __block.gsl_vector_int_max(*args, **kwargs)
gsl_vector_int_max = __block.gsl_vector_int_max
def gsl_vector_int_min(*args, **kwargs):
return __block.gsl_vector_int_min(*args, **kwargs)
gsl_vector_int_min = __block.gsl_vector_int_min
def gsl_vector_int_minmax(*args, **kwargs):
return __block.gsl_vector_int_minmax(*args, **kwargs)
gsl_vector_int_minmax = __block.gsl_vector_int_minmax
def gsl_vector_int_max_index(*args, **kwargs):
return __block.gsl_vector_int_max_index(*args, **kwargs)
gsl_vector_int_max_index = __block.gsl_vector_int_max_index
def gsl_vector_int_min_index(*args, **kwargs):
return __block.gsl_vector_int_min_index(*args, **kwargs)
gsl_vector_int_min_index = __block.gsl_vector_int_min_index
def gsl_vector_int_minmax_index(*args, **kwargs):
return __block.gsl_vector_int_minmax_index(*args, **kwargs)
gsl_vector_int_minmax_index = __block.gsl_vector_int_minmax_index
def gsl_vector_int_isnull(*args, **kwargs):
return __block.gsl_vector_int_isnull(*args, **kwargs)
gsl_vector_int_isnull = __block.gsl_vector_int_isnull
def gsl_matrix_int_set_zero(*args, **kwargs):
return __block.gsl_matrix_int_set_zero(*args, **kwargs)
gsl_matrix_int_set_zero = __block.gsl_matrix_int_set_zero
def gsl_matrix_int_set_all(*args, **kwargs):
return __block.gsl_matrix_int_set_all(*args, **kwargs)
gsl_matrix_int_set_all = __block.gsl_matrix_int_set_all
def gsl_matrix_int_set_identity(*args, **kwargs):
return __block.gsl_matrix_int_set_identity(*args, **kwargs)
gsl_matrix_int_set_identity = __block.gsl_matrix_int_set_identity
def gsl_matrix_int_fread(*args, **kwargs):
return __block.gsl_matrix_int_fread(*args, **kwargs)
gsl_matrix_int_fread = __block.gsl_matrix_int_fread
def gsl_matrix_int_fwrite(*args, **kwargs):
return __block.gsl_matrix_int_fwrite(*args, **kwargs)
gsl_matrix_int_fwrite = __block.gsl_matrix_int_fwrite
def gsl_matrix_int_fscanf(*args, **kwargs):
return __block.gsl_matrix_int_fscanf(*args, **kwargs)
gsl_matrix_int_fscanf = __block.gsl_matrix_int_fscanf
def gsl_matrix_int_fprintf(*args, **kwargs):
return __block.gsl_matrix_int_fprintf(*args, **kwargs)
gsl_matrix_int_fprintf = __block.gsl_matrix_int_fprintf
def gsl_matrix_int_swap(*args, **kwargs):
return __block.gsl_matrix_int_swap(*args, **kwargs)
gsl_matrix_int_swap = __block.gsl_matrix_int_swap
def gsl_matrix_int_swap_rows(*args, **kwargs):
return __block.gsl_matrix_int_swap_rows(*args, **kwargs)
gsl_matrix_int_swap_rows = __block.gsl_matrix_int_swap_rows
def gsl_matrix_int_swap_columns(*args, **kwargs):
return __block.gsl_matrix_int_swap_columns(*args, **kwargs)
gsl_matrix_int_swap_columns = __block.gsl_matrix_int_swap_columns
def gsl_matrix_int_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_int_swap_rowcol(*args, **kwargs)
gsl_matrix_int_swap_rowcol = __block.gsl_matrix_int_swap_rowcol
def gsl_matrix_int_transpose(*args, **kwargs):
return __block.gsl_matrix_int_transpose(*args, **kwargs)
gsl_matrix_int_transpose = __block.gsl_matrix_int_transpose
def gsl_matrix_int_max(*args, **kwargs):
return __block.gsl_matrix_int_max(*args, **kwargs)
gsl_matrix_int_max = __block.gsl_matrix_int_max
def gsl_matrix_int_min(*args, **kwargs):
return __block.gsl_matrix_int_min(*args, **kwargs)
gsl_matrix_int_min = __block.gsl_matrix_int_min
def gsl_matrix_int_minmax(*args, **kwargs):
return __block.gsl_matrix_int_minmax(*args, **kwargs)
gsl_matrix_int_minmax = __block.gsl_matrix_int_minmax
def gsl_matrix_int_max_index(*args, **kwargs):
return __block.gsl_matrix_int_max_index(*args, **kwargs)
gsl_matrix_int_max_index = __block.gsl_matrix_int_max_index
def gsl_matrix_int_min_index(*args, **kwargs):
return __block.gsl_matrix_int_min_index(*args, **kwargs)
gsl_matrix_int_min_index = __block.gsl_matrix_int_min_index
def gsl_matrix_int_minmax_index(*args, **kwargs):
return __block.gsl_matrix_int_minmax_index(*args, **kwargs)
gsl_matrix_int_minmax_index = __block.gsl_matrix_int_minmax_index
def gsl_matrix_int_isnull(*args, **kwargs):
return __block.gsl_matrix_int_isnull(*args, **kwargs)
gsl_matrix_int_isnull = __block.gsl_matrix_int_isnull
def gsl_matrix_int_diagonal(*args, **kwargs):
return __block.gsl_matrix_int_diagonal(*args, **kwargs)
gsl_matrix_int_diagonal = __block.gsl_matrix_int_diagonal
def gsl_matrix_int_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_int_subdiagonal(*args, **kwargs)
gsl_matrix_int_subdiagonal = __block.gsl_matrix_int_subdiagonal
def gsl_matrix_int_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_int_superdiagonal(*args, **kwargs)
gsl_matrix_int_superdiagonal = __block.gsl_matrix_int_superdiagonal
def gsl_vector_short_set_zero(*args, **kwargs):
return __block.gsl_vector_short_set_zero(*args, **kwargs)
gsl_vector_short_set_zero = __block.gsl_vector_short_set_zero
def gsl_vector_short_set_all(*args, **kwargs):
return __block.gsl_vector_short_set_all(*args, **kwargs)
gsl_vector_short_set_all = __block.gsl_vector_short_set_all
def gsl_vector_short_set_basis(*args, **kwargs):
return __block.gsl_vector_short_set_basis(*args, **kwargs)
gsl_vector_short_set_basis = __block.gsl_vector_short_set_basis
def gsl_vector_short_fread(*args, **kwargs):
return __block.gsl_vector_short_fread(*args, **kwargs)
gsl_vector_short_fread = __block.gsl_vector_short_fread
def gsl_vector_short_fwrite(*args, **kwargs):
return __block.gsl_vector_short_fwrite(*args, **kwargs)
gsl_vector_short_fwrite = __block.gsl_vector_short_fwrite
def gsl_vector_short_fscanf(*args, **kwargs):
return __block.gsl_vector_short_fscanf(*args, **kwargs)
gsl_vector_short_fscanf = __block.gsl_vector_short_fscanf
def gsl_vector_short_fprintf(*args, **kwargs):
return __block.gsl_vector_short_fprintf(*args, **kwargs)
gsl_vector_short_fprintf = __block.gsl_vector_short_fprintf
def gsl_vector_short_reverse(*args, **kwargs):
return __block.gsl_vector_short_reverse(*args, **kwargs)
gsl_vector_short_reverse = __block.gsl_vector_short_reverse
def gsl_vector_short_swap(*args, **kwargs):
return __block.gsl_vector_short_swap(*args, **kwargs)
gsl_vector_short_swap = __block.gsl_vector_short_swap
def gsl_vector_short_swap_elements(*args, **kwargs):
return __block.gsl_vector_short_swap_elements(*args, **kwargs)
gsl_vector_short_swap_elements = __block.gsl_vector_short_swap_elements
def gsl_vector_short_max(*args, **kwargs):
return __block.gsl_vector_short_max(*args, **kwargs)
gsl_vector_short_max = __block.gsl_vector_short_max
def gsl_vector_short_min(*args, **kwargs):
return __block.gsl_vector_short_min(*args, **kwargs)
gsl_vector_short_min = __block.gsl_vector_short_min
def gsl_vector_short_minmax(*args, **kwargs):
return __block.gsl_vector_short_minmax(*args, **kwargs)
gsl_vector_short_minmax = __block.gsl_vector_short_minmax
def gsl_vector_short_max_index(*args, **kwargs):
return __block.gsl_vector_short_max_index(*args, **kwargs)
gsl_vector_short_max_index = __block.gsl_vector_short_max_index
def gsl_vector_short_min_index(*args, **kwargs):
return __block.gsl_vector_short_min_index(*args, **kwargs)
gsl_vector_short_min_index = __block.gsl_vector_short_min_index
def gsl_vector_short_minmax_index(*args, **kwargs):
return __block.gsl_vector_short_minmax_index(*args, **kwargs)
gsl_vector_short_minmax_index = __block.gsl_vector_short_minmax_index
def gsl_vector_short_isnull(*args, **kwargs):
return __block.gsl_vector_short_isnull(*args, **kwargs)
gsl_vector_short_isnull = __block.gsl_vector_short_isnull
def gsl_matrix_short_set_zero(*args, **kwargs):
return __block.gsl_matrix_short_set_zero(*args, **kwargs)
gsl_matrix_short_set_zero = __block.gsl_matrix_short_set_zero
def gsl_matrix_short_set_all(*args, **kwargs):
return __block.gsl_matrix_short_set_all(*args, **kwargs)
gsl_matrix_short_set_all = __block.gsl_matrix_short_set_all
def gsl_matrix_short_set_identity(*args, **kwargs):
return __block.gsl_matrix_short_set_identity(*args, **kwargs)
gsl_matrix_short_set_identity = __block.gsl_matrix_short_set_identity
def gsl_matrix_short_fread(*args, **kwargs):
return __block.gsl_matrix_short_fread(*args, **kwargs)
gsl_matrix_short_fread = __block.gsl_matrix_short_fread
def gsl_matrix_short_fwrite(*args, **kwargs):
return __block.gsl_matrix_short_fwrite(*args, **kwargs)
gsl_matrix_short_fwrite = __block.gsl_matrix_short_fwrite
def gsl_matrix_short_fscanf(*args, **kwargs):
return __block.gsl_matrix_short_fscanf(*args, **kwargs)
gsl_matrix_short_fscanf = __block.gsl_matrix_short_fscanf
def gsl_matrix_short_fprintf(*args, **kwargs):
return __block.gsl_matrix_short_fprintf(*args, **kwargs)
gsl_matrix_short_fprintf = __block.gsl_matrix_short_fprintf
def gsl_matrix_short_swap(*args, **kwargs):
return __block.gsl_matrix_short_swap(*args, **kwargs)
gsl_matrix_short_swap = __block.gsl_matrix_short_swap
def gsl_matrix_short_swap_rows(*args, **kwargs):
return __block.gsl_matrix_short_swap_rows(*args, **kwargs)
gsl_matrix_short_swap_rows = __block.gsl_matrix_short_swap_rows
def gsl_matrix_short_swap_columns(*args, **kwargs):
return __block.gsl_matrix_short_swap_columns(*args, **kwargs)
gsl_matrix_short_swap_columns = __block.gsl_matrix_short_swap_columns
def gsl_matrix_short_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_short_swap_rowcol(*args, **kwargs)
gsl_matrix_short_swap_rowcol = __block.gsl_matrix_short_swap_rowcol
def gsl_matrix_short_transpose(*args, **kwargs):
return __block.gsl_matrix_short_transpose(*args, **kwargs)
gsl_matrix_short_transpose = __block.gsl_matrix_short_transpose
def gsl_matrix_short_max(*args, **kwargs):
return __block.gsl_matrix_short_max(*args, **kwargs)
gsl_matrix_short_max = __block.gsl_matrix_short_max
def gsl_matrix_short_min(*args, **kwargs):
return __block.gsl_matrix_short_min(*args, **kwargs)
gsl_matrix_short_min = __block.gsl_matrix_short_min
def gsl_matrix_short_minmax(*args, **kwargs):
return __block.gsl_matrix_short_minmax(*args, **kwargs)
gsl_matrix_short_minmax = __block.gsl_matrix_short_minmax
def gsl_matrix_short_max_index(*args, **kwargs):
return __block.gsl_matrix_short_max_index(*args, **kwargs)
gsl_matrix_short_max_index = __block.gsl_matrix_short_max_index
def gsl_matrix_short_min_index(*args, **kwargs):
return __block.gsl_matrix_short_min_index(*args, **kwargs)
gsl_matrix_short_min_index = __block.gsl_matrix_short_min_index
def gsl_matrix_short_minmax_index(*args, **kwargs):
return __block.gsl_matrix_short_minmax_index(*args, **kwargs)
gsl_matrix_short_minmax_index = __block.gsl_matrix_short_minmax_index
def gsl_matrix_short_isnull(*args, **kwargs):
return __block.gsl_matrix_short_isnull(*args, **kwargs)
gsl_matrix_short_isnull = __block.gsl_matrix_short_isnull
def gsl_matrix_short_diagonal(*args, **kwargs):
return __block.gsl_matrix_short_diagonal(*args, **kwargs)
gsl_matrix_short_diagonal = __block.gsl_matrix_short_diagonal
def gsl_matrix_short_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_short_subdiagonal(*args, **kwargs)
gsl_matrix_short_subdiagonal = __block.gsl_matrix_short_subdiagonal
def gsl_matrix_short_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_short_superdiagonal(*args, **kwargs)
gsl_matrix_short_superdiagonal = __block.gsl_matrix_short_superdiagonal
def gsl_vector_char_set_zero(*args, **kwargs):
return __block.gsl_vector_char_set_zero(*args, **kwargs)
gsl_vector_char_set_zero = __block.gsl_vector_char_set_zero
def gsl_vector_char_set_all(*args, **kwargs):
return __block.gsl_vector_char_set_all(*args, **kwargs)
gsl_vector_char_set_all = __block.gsl_vector_char_set_all
def gsl_vector_char_set_basis(*args, **kwargs):
return __block.gsl_vector_char_set_basis(*args, **kwargs)
gsl_vector_char_set_basis = __block.gsl_vector_char_set_basis
def gsl_vector_char_fread(*args, **kwargs):
return __block.gsl_vector_char_fread(*args, **kwargs)
gsl_vector_char_fread = __block.gsl_vector_char_fread
def gsl_vector_char_fwrite(*args, **kwargs):
return __block.gsl_vector_char_fwrite(*args, **kwargs)
gsl_vector_char_fwrite = __block.gsl_vector_char_fwrite
def gsl_vector_char_fscanf(*args, **kwargs):
return __block.gsl_vector_char_fscanf(*args, **kwargs)
gsl_vector_char_fscanf = __block.gsl_vector_char_fscanf
def gsl_vector_char_fprintf(*args, **kwargs):
return __block.gsl_vector_char_fprintf(*args, **kwargs)
gsl_vector_char_fprintf = __block.gsl_vector_char_fprintf
def gsl_vector_char_reverse(*args, **kwargs):
return __block.gsl_vector_char_reverse(*args, **kwargs)
gsl_vector_char_reverse = __block.gsl_vector_char_reverse
def gsl_vector_char_swap(*args, **kwargs):
return __block.gsl_vector_char_swap(*args, **kwargs)
gsl_vector_char_swap = __block.gsl_vector_char_swap
def gsl_vector_char_swap_elements(*args, **kwargs):
return __block.gsl_vector_char_swap_elements(*args, **kwargs)
gsl_vector_char_swap_elements = __block.gsl_vector_char_swap_elements
def gsl_vector_char_max(*args, **kwargs):
return __block.gsl_vector_char_max(*args, **kwargs)
gsl_vector_char_max = __block.gsl_vector_char_max
def gsl_vector_char_min(*args, **kwargs):
return __block.gsl_vector_char_min(*args, **kwargs)
gsl_vector_char_min = __block.gsl_vector_char_min
def gsl_vector_char_minmax(*args, **kwargs):
return __block.gsl_vector_char_minmax(*args, **kwargs)
gsl_vector_char_minmax = __block.gsl_vector_char_minmax
def gsl_vector_char_max_index(*args, **kwargs):
return __block.gsl_vector_char_max_index(*args, **kwargs)
gsl_vector_char_max_index = __block.gsl_vector_char_max_index
def gsl_vector_char_min_index(*args, **kwargs):
return __block.gsl_vector_char_min_index(*args, **kwargs)
gsl_vector_char_min_index = __block.gsl_vector_char_min_index
def gsl_vector_char_minmax_index(*args, **kwargs):
return __block.gsl_vector_char_minmax_index(*args, **kwargs)
gsl_vector_char_minmax_index = __block.gsl_vector_char_minmax_index
def gsl_vector_char_isnull(*args, **kwargs):
return __block.gsl_vector_char_isnull(*args, **kwargs)
gsl_vector_char_isnull = __block.gsl_vector_char_isnull
def gsl_matrix_char_set_zero(*args, **kwargs):
return __block.gsl_matrix_char_set_zero(*args, **kwargs)
gsl_matrix_char_set_zero = __block.gsl_matrix_char_set_zero
def gsl_matrix_char_set_all(*args, **kwargs):
return __block.gsl_matrix_char_set_all(*args, **kwargs)
gsl_matrix_char_set_all = __block.gsl_matrix_char_set_all
def gsl_matrix_char_set_identity(*args, **kwargs):
return __block.gsl_matrix_char_set_identity(*args, **kwargs)
gsl_matrix_char_set_identity = __block.gsl_matrix_char_set_identity
def gsl_matrix_char_fread(*args, **kwargs):
return __block.gsl_matrix_char_fread(*args, **kwargs)
gsl_matrix_char_fread = __block.gsl_matrix_char_fread
def gsl_matrix_char_fwrite(*args, **kwargs):
return __block.gsl_matrix_char_fwrite(*args, **kwargs)
gsl_matrix_char_fwrite = __block.gsl_matrix_char_fwrite
def gsl_matrix_char_fscanf(*args, **kwargs):
return __block.gsl_matrix_char_fscanf(*args, **kwargs)
gsl_matrix_char_fscanf = __block.gsl_matrix_char_fscanf
def gsl_matrix_char_fprintf(*args, **kwargs):
return __block.gsl_matrix_char_fprintf(*args, **kwargs)
gsl_matrix_char_fprintf = __block.gsl_matrix_char_fprintf
def gsl_matrix_char_swap(*args, **kwargs):
return __block.gsl_matrix_char_swap(*args, **kwargs)
gsl_matrix_char_swap = __block.gsl_matrix_char_swap
def gsl_matrix_char_swap_rows(*args, **kwargs):
return __block.gsl_matrix_char_swap_rows(*args, **kwargs)
gsl_matrix_char_swap_rows = __block.gsl_matrix_char_swap_rows
def gsl_matrix_char_swap_columns(*args, **kwargs):
return __block.gsl_matrix_char_swap_columns(*args, **kwargs)
gsl_matrix_char_swap_columns = __block.gsl_matrix_char_swap_columns
def gsl_matrix_char_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_char_swap_rowcol(*args, **kwargs)
gsl_matrix_char_swap_rowcol = __block.gsl_matrix_char_swap_rowcol
def gsl_matrix_char_transpose(*args, **kwargs):
return __block.gsl_matrix_char_transpose(*args, **kwargs)
gsl_matrix_char_transpose = __block.gsl_matrix_char_transpose
def gsl_matrix_char_max(*args, **kwargs):
return __block.gsl_matrix_char_max(*args, **kwargs)
gsl_matrix_char_max = __block.gsl_matrix_char_max
def gsl_matrix_char_min(*args, **kwargs):
return __block.gsl_matrix_char_min(*args, **kwargs)
gsl_matrix_char_min = __block.gsl_matrix_char_min
def gsl_matrix_char_minmax(*args, **kwargs):
return __block.gsl_matrix_char_minmax(*args, **kwargs)
gsl_matrix_char_minmax = __block.gsl_matrix_char_minmax
def gsl_matrix_char_max_index(*args, **kwargs):
return __block.gsl_matrix_char_max_index(*args, **kwargs)
gsl_matrix_char_max_index = __block.gsl_matrix_char_max_index
def gsl_matrix_char_min_index(*args, **kwargs):
return __block.gsl_matrix_char_min_index(*args, **kwargs)
gsl_matrix_char_min_index = __block.gsl_matrix_char_min_index
def gsl_matrix_char_minmax_index(*args, **kwargs):
return __block.gsl_matrix_char_minmax_index(*args, **kwargs)
gsl_matrix_char_minmax_index = __block.gsl_matrix_char_minmax_index
def gsl_matrix_char_isnull(*args, **kwargs):
return __block.gsl_matrix_char_isnull(*args, **kwargs)
gsl_matrix_char_isnull = __block.gsl_matrix_char_isnull
def gsl_matrix_char_diagonal(*args, **kwargs):
return __block.gsl_matrix_char_diagonal(*args, **kwargs)
gsl_matrix_char_diagonal = __block.gsl_matrix_char_diagonal
def gsl_matrix_char_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_char_subdiagonal(*args, **kwargs)
gsl_matrix_char_subdiagonal = __block.gsl_matrix_char_subdiagonal
def gsl_matrix_char_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_char_superdiagonal(*args, **kwargs)
gsl_matrix_char_superdiagonal = __block.gsl_matrix_char_superdiagonal
def gsl_vector_complex_set_zero(*args, **kwargs):
return __block.gsl_vector_complex_set_zero(*args, **kwargs)
gsl_vector_complex_set_zero = __block.gsl_vector_complex_set_zero
def gsl_vector_complex_set_all(*args, **kwargs):
return __block.gsl_vector_complex_set_all(*args, **kwargs)
gsl_vector_complex_set_all = __block.gsl_vector_complex_set_all
def gsl_vector_complex_set_basis(*args, **kwargs):
return __block.gsl_vector_complex_set_basis(*args, **kwargs)
gsl_vector_complex_set_basis = __block.gsl_vector_complex_set_basis
def gsl_vector_complex_fread(*args, **kwargs):
return __block.gsl_vector_complex_fread(*args, **kwargs)
gsl_vector_complex_fread = __block.gsl_vector_complex_fread
def gsl_vector_complex_fwrite(*args, **kwargs):
return __block.gsl_vector_complex_fwrite(*args, **kwargs)
gsl_vector_complex_fwrite = __block.gsl_vector_complex_fwrite
def gsl_vector_complex_fscanf(*args, **kwargs):
return __block.gsl_vector_complex_fscanf(*args, **kwargs)
gsl_vector_complex_fscanf = __block.gsl_vector_complex_fscanf
def gsl_vector_complex_fprintf(*args, **kwargs):
return __block.gsl_vector_complex_fprintf(*args, **kwargs)
gsl_vector_complex_fprintf = __block.gsl_vector_complex_fprintf
def gsl_vector_complex_reverse(*args, **kwargs):
return __block.gsl_vector_complex_reverse(*args, **kwargs)
gsl_vector_complex_reverse = __block.gsl_vector_complex_reverse
def gsl_vector_complex_swap(*args, **kwargs):
return __block.gsl_vector_complex_swap(*args, **kwargs)
gsl_vector_complex_swap = __block.gsl_vector_complex_swap
def gsl_vector_complex_swap_elements(*args, **kwargs):
return __block.gsl_vector_complex_swap_elements(*args, **kwargs)
gsl_vector_complex_swap_elements = __block.gsl_vector_complex_swap_elements
def gsl_vector_complex_isnull(*args, **kwargs):
return __block.gsl_vector_complex_isnull(*args, **kwargs)
gsl_vector_complex_isnull = __block.gsl_vector_complex_isnull
def gsl_matrix_complex_set_zero(*args, **kwargs):
return __block.gsl_matrix_complex_set_zero(*args, **kwargs)
gsl_matrix_complex_set_zero = __block.gsl_matrix_complex_set_zero
def gsl_matrix_complex_set_all(*args, **kwargs):
return __block.gsl_matrix_complex_set_all(*args, **kwargs)
gsl_matrix_complex_set_all = __block.gsl_matrix_complex_set_all
def gsl_matrix_complex_set_identity(*args, **kwargs):
return __block.gsl_matrix_complex_set_identity(*args, **kwargs)
gsl_matrix_complex_set_identity = __block.gsl_matrix_complex_set_identity
def gsl_matrix_complex_fread(*args, **kwargs):
return __block.gsl_matrix_complex_fread(*args, **kwargs)
gsl_matrix_complex_fread = __block.gsl_matrix_complex_fread
def gsl_matrix_complex_fwrite(*args, **kwargs):
return __block.gsl_matrix_complex_fwrite(*args, **kwargs)
gsl_matrix_complex_fwrite = __block.gsl_matrix_complex_fwrite
def gsl_matrix_complex_fscanf(*args, **kwargs):
return __block.gsl_matrix_complex_fscanf(*args, **kwargs)
gsl_matrix_complex_fscanf = __block.gsl_matrix_complex_fscanf
def gsl_matrix_complex_fprintf(*args, **kwargs):
return __block.gsl_matrix_complex_fprintf(*args, **kwargs)
gsl_matrix_complex_fprintf = __block.gsl_matrix_complex_fprintf
def gsl_matrix_complex_swap(*args, **kwargs):
return __block.gsl_matrix_complex_swap(*args, **kwargs)
gsl_matrix_complex_swap = __block.gsl_matrix_complex_swap
def gsl_matrix_complex_swap_rows(*args, **kwargs):
return __block.gsl_matrix_complex_swap_rows(*args, **kwargs)
gsl_matrix_complex_swap_rows = __block.gsl_matrix_complex_swap_rows
def gsl_matrix_complex_swap_columns(*args, **kwargs):
return __block.gsl_matrix_complex_swap_columns(*args, **kwargs)
gsl_matrix_complex_swap_columns = __block.gsl_matrix_complex_swap_columns
def gsl_matrix_complex_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_complex_swap_rowcol(*args, **kwargs)
gsl_matrix_complex_swap_rowcol = __block.gsl_matrix_complex_swap_rowcol
def gsl_matrix_complex_transpose(*args, **kwargs):
return __block.gsl_matrix_complex_transpose(*args, **kwargs)
gsl_matrix_complex_transpose = __block.gsl_matrix_complex_transpose
def gsl_matrix_complex_isnull(*args, **kwargs):
return __block.gsl_matrix_complex_isnull(*args, **kwargs)
gsl_matrix_complex_isnull = __block.gsl_matrix_complex_isnull
def gsl_matrix_complex_diagonal(*args, **kwargs):
return __block.gsl_matrix_complex_diagonal(*args, **kwargs)
gsl_matrix_complex_diagonal = __block.gsl_matrix_complex_diagonal
def gsl_matrix_complex_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_complex_subdiagonal(*args, **kwargs)
gsl_matrix_complex_subdiagonal = __block.gsl_matrix_complex_subdiagonal
def gsl_matrix_complex_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_complex_superdiagonal(*args, **kwargs)
gsl_matrix_complex_superdiagonal = __block.gsl_matrix_complex_superdiagonal
def gsl_vector_complex_float_set_zero(*args, **kwargs):
return __block.gsl_vector_complex_float_set_zero(*args, **kwargs)
gsl_vector_complex_float_set_zero = __block.gsl_vector_complex_float_set_zero
def gsl_vector_complex_float_set_all(*args, **kwargs):
return __block.gsl_vector_complex_float_set_all(*args, **kwargs)
gsl_vector_complex_float_set_all = __block.gsl_vector_complex_float_set_all
def gsl_vector_complex_float_set_basis(*args, **kwargs):
return __block.gsl_vector_complex_float_set_basis(*args, **kwargs)
gsl_vector_complex_float_set_basis = __block.gsl_vector_complex_float_set_basis
def gsl_vector_complex_float_fread(*args, **kwargs):
return __block.gsl_vector_complex_float_fread(*args, **kwargs)
gsl_vector_complex_float_fread = __block.gsl_vector_complex_float_fread
def gsl_vector_complex_float_fwrite(*args, **kwargs):
return __block.gsl_vector_complex_float_fwrite(*args, **kwargs)
gsl_vector_complex_float_fwrite = __block.gsl_vector_complex_float_fwrite
def gsl_vector_complex_float_fscanf(*args, **kwargs):
return __block.gsl_vector_complex_float_fscanf(*args, **kwargs)
gsl_vector_complex_float_fscanf = __block.gsl_vector_complex_float_fscanf
def gsl_vector_complex_float_fprintf(*args, **kwargs):
return __block.gsl_vector_complex_float_fprintf(*args, **kwargs)
gsl_vector_complex_float_fprintf = __block.gsl_vector_complex_float_fprintf
def gsl_vector_complex_float_reverse(*args, **kwargs):
return __block.gsl_vector_complex_float_reverse(*args, **kwargs)
gsl_vector_complex_float_reverse = __block.gsl_vector_complex_float_reverse
def gsl_vector_complex_float_swap(*args, **kwargs):
return __block.gsl_vector_complex_float_swap(*args, **kwargs)
gsl_vector_complex_float_swap = __block.gsl_vector_complex_float_swap
def gsl_vector_complex_float_swap_elements(*args, **kwargs):
return __block.gsl_vector_complex_float_swap_elements(*args, **kwargs)
gsl_vector_complex_float_swap_elements = __block.gsl_vector_complex_float_swap_elements
def gsl_vector_complex_float_isnull(*args, **kwargs):
return __block.gsl_vector_complex_float_isnull(*args, **kwargs)
gsl_vector_complex_float_isnull = __block.gsl_vector_complex_float_isnull
def gsl_matrix_complex_float_set_zero(*args, **kwargs):
return __block.gsl_matrix_complex_float_set_zero(*args, **kwargs)
gsl_matrix_complex_float_set_zero = __block.gsl_matrix_complex_float_set_zero
def gsl_matrix_complex_float_set_all(*args, **kwargs):
return __block.gsl_matrix_complex_float_set_all(*args, **kwargs)
gsl_matrix_complex_float_set_all = __block.gsl_matrix_complex_float_set_all
def gsl_matrix_complex_float_set_identity(*args, **kwargs):
return __block.gsl_matrix_complex_float_set_identity(*args, **kwargs)
gsl_matrix_complex_float_set_identity = __block.gsl_matrix_complex_float_set_identity
def gsl_matrix_complex_float_fread(*args, **kwargs):
return __block.gsl_matrix_complex_float_fread(*args, **kwargs)
gsl_matrix_complex_float_fread = __block.gsl_matrix_complex_float_fread
def gsl_matrix_complex_float_fwrite(*args, **kwargs):
return __block.gsl_matrix_complex_float_fwrite(*args, **kwargs)
gsl_matrix_complex_float_fwrite = __block.gsl_matrix_complex_float_fwrite
def gsl_matrix_complex_float_fscanf(*args, **kwargs):
return __block.gsl_matrix_complex_float_fscanf(*args, **kwargs)
gsl_matrix_complex_float_fscanf = __block.gsl_matrix_complex_float_fscanf
def gsl_matrix_complex_float_fprintf(*args, **kwargs):
return __block.gsl_matrix_complex_float_fprintf(*args, **kwargs)
gsl_matrix_complex_float_fprintf = __block.gsl_matrix_complex_float_fprintf
def gsl_matrix_complex_float_swap(*args, **kwargs):
return __block.gsl_matrix_complex_float_swap(*args, **kwargs)
gsl_matrix_complex_float_swap = __block.gsl_matrix_complex_float_swap
def gsl_matrix_complex_float_swap_rows(*args, **kwargs):
return __block.gsl_matrix_complex_float_swap_rows(*args, **kwargs)
gsl_matrix_complex_float_swap_rows = __block.gsl_matrix_complex_float_swap_rows
def gsl_matrix_complex_float_swap_columns(*args, **kwargs):
return __block.gsl_matrix_complex_float_swap_columns(*args, **kwargs)
gsl_matrix_complex_float_swap_columns = __block.gsl_matrix_complex_float_swap_columns
def gsl_matrix_complex_float_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_complex_float_swap_rowcol(*args, **kwargs)
gsl_matrix_complex_float_swap_rowcol = __block.gsl_matrix_complex_float_swap_rowcol
def gsl_matrix_complex_float_transpose(*args, **kwargs):
return __block.gsl_matrix_complex_float_transpose(*args, **kwargs)
gsl_matrix_complex_float_transpose = __block.gsl_matrix_complex_float_transpose
def gsl_matrix_complex_float_isnull(*args, **kwargs):
return __block.gsl_matrix_complex_float_isnull(*args, **kwargs)
gsl_matrix_complex_float_isnull = __block.gsl_matrix_complex_float_isnull
def gsl_matrix_complex_float_diagonal(*args, **kwargs):
return __block.gsl_matrix_complex_float_diagonal(*args, **kwargs)
gsl_matrix_complex_float_diagonal = __block.gsl_matrix_complex_float_diagonal
def gsl_matrix_complex_float_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_complex_float_subdiagonal(*args, **kwargs)
gsl_matrix_complex_float_subdiagonal = __block.gsl_matrix_complex_float_subdiagonal
def gsl_matrix_complex_float_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_complex_float_superdiagonal(*args, **kwargs)
gsl_matrix_complex_float_superdiagonal = __block.gsl_matrix_complex_float_superdiagonal
# This file is compatible with both classic and new-style classes.
| 41.161369
| 90
| 0.825542
| 20
| 0.000396
| 0
| 0
| 0
| 0
| 0
| 0
| 419
| 0.008296
|
b79b4b3441162b8ce9025428d639fcec36693cf3
| 42
|
py
|
Python
|
tests/__init__.py
|
chasefinch/amp-renderer
|
a226140d8a8a6f8c21c073e394b672cf75c8671e
|
[
"Apache-2.0"
] | 13
|
2020-08-19T18:37:01.000Z
|
2021-12-10T17:33:14.000Z
|
tests/__init__.py
|
chasefinch/amp-renderer
|
a226140d8a8a6f8c21c073e394b672cf75c8671e
|
[
"Apache-2.0"
] | 5
|
2020-08-24T18:31:12.000Z
|
2022-02-07T17:36:59.000Z
|
tests/__init__.py
|
chasefinch/amp-renderer
|
a226140d8a8a6f8c21c073e394b672cf75c8671e
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the AMP Renderer project."""
| 21
| 41
| 0.690476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.97619
|
b79c02ce02e6d31db2455c7ba97e981d1dda98d5
| 3,564
|
py
|
Python
|
sudoku/sudoku.py
|
Ostap2003/backtracking-team-project
|
2a6fb2fc80dcdd286c76dbc3b546baf452098d98
|
[
"MIT"
] | null | null | null |
sudoku/sudoku.py
|
Ostap2003/backtracking-team-project
|
2a6fb2fc80dcdd286c76dbc3b546baf452098d98
|
[
"MIT"
] | null | null | null |
sudoku/sudoku.py
|
Ostap2003/backtracking-team-project
|
2a6fb2fc80dcdd286c76dbc3b546baf452098d98
|
[
"MIT"
] | null | null | null |
from pprint import pprint
'''The module has class Grid for solving sudoku.
Grid should be given as a list of integers or a path to a file.
'''
class Grid():
'''Class for solving sudoku.
'''
def __init__(self, path= None, grid= None) -> None:
'''Initialize grid, columns, squares.
'''
self.grid = []
if grid:
assert type(grid) == list
self.grid = grid
elif path:
grid = self.read_and_convert_to_grid(path)
self.check_grid_accuracy()
self.columns = []
self.squares = []
self.expand_data()
def read_and_convert_to_grid(self, path):
'''Reads from file and writes info to list grid.
'''
with open(path, 'r') as file:
for line in file:
if line[-1] == '\n':
line = line[:-1]
self.grid.append([int(item) for item in line])
def write_to_file(self):
'''Writes solved sudoku to file 'solved_sudoku.txt'.
'''
with open('solved_sudoku.txt', 'w') as file:
to_write = ''
for row in self.grid:
row = [str(i) for i in row]
to_write += ''.join(row) + '\n'
file.write(to_write[:-1])
def expand_data(self):
'''Fills lists self.columns and self.squares.
'''
for i in range(9):
col = []
for j in range(9):
col.append(self.grid[j][i])
self.columns.append(col)
for i in range(3):
for j in range(3):
square = []
for k in range(3):
for x in range(3):
square.append(self.grid[i*3 + k][j*3 + x])
self.squares.append(square)
def check_grid_accuracy(self):
'''Checks if grid content is appropriate.
'''
for i in range(9):
for j in range(9):
assert 0 <= self.grid[i][j] <= 9
return True
def solve_sudoku(self, row, col):
'''Recursion, which solves sudoku.
'''
union = set(self.squares[col//3 + (row//3)*3]).union(set(self.grid[row]))
union = union.union(self.columns[col])
if col == 8:
next_col = 0
next_row = row + 1
else:
next_col = col + 1
next_row = row
if self.grid[row][col] != 0:
if next_row == 9:
return True
return self.solve_sudoku(next_row, next_col)
for i in range(1, 10):
if i not in union:
self.grid[row][col] = i
self.columns[col][row] = i
self.squares[col//3 + (row//3)*3][(row % 3)*3 + (col % 3)] = i
if next_row == 9:
return True
if self.solve_sudoku(next_row, next_col):
return True
self.grid[row][col] = 0
self.columns[col][row] = 0
self.squares[col//3 + (row//3)*3][(row % 3)*3 + (col % 3)] = 0
return False
# grid = [ [3, 0, 6, 5, 0, 8, 4, 0, 0],
# [5, 2, 0, 0, 0, 0, 0, 0, 0],
# [0, 8, 7, 0, 0, 0, 0, 3, 1],
# [0, 0, 3, 0, 1, 0, 0, 8, 0],
# [9, 0, 0, 8, 6, 3, 0, 0, 5],
# [0, 5, 0, 0, 9, 0, 6, 0, 0],
# [1, 3, 0, 0, 0, 0, 2, 5, 0],
# [0, 0, 0, 0, 0, 0, 0, 7, 4],
# [0, 0, 5, 2, 0, 6, 3, 0, 0] ]
sudoku = Grid('grid_2.txt')
sudoku.solve_sudoku(0, 0)
sudoku.write_to_file()
| 28.741935
| 81
| 0.45679
| 2,968
| 0.832772
| 0
| 0
| 0
| 0
| 0
| 0
| 890
| 0.249719
|
b79f4a64c362393fd37c99b135489a7797ae3252
| 1,421
|
py
|
Python
|
device_e2e/sync/test_sync_c2d.py
|
dt-boringtao/azure-iot-sdk-python
|
35a09679bdf4d7a727391b265a8f1fbb99a30c45
|
[
"MIT"
] | null | null | null |
device_e2e/sync/test_sync_c2d.py
|
dt-boringtao/azure-iot-sdk-python
|
35a09679bdf4d7a727391b265a8f1fbb99a30c45
|
[
"MIT"
] | null | null | null |
device_e2e/sync/test_sync_c2d.py
|
dt-boringtao/azure-iot-sdk-python
|
35a09679bdf4d7a727391b265a8f1fbb99a30c45
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
import pytest
import logging
import json
import threading
from utils import get_random_dict
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
# TODO: add tests for various application properties
# TODO: is there a way to call send_c2d so it arrives as an object rather than a JSON string?
@pytest.mark.describe("Client C2d")
class TestReceiveC2d(object):
@pytest.mark.it("Can receive C2D")
@pytest.mark.quicktest_suite
def test_sync_receive_c2d(self, client, service_helper, leak_tracker):
leak_tracker.set_initial_object_list()
message = json.dumps(get_random_dict())
received_message = None
received = threading.Event()
def handle_on_message_received(message):
nonlocal received_message, received
logger.info("received {}".format(message))
received_message = message
received.set()
client.on_message_received = handle_on_message_received
service_helper.send_c2d(message, {})
received.wait(timeout=60)
assert received.is_set()
assert received_message.data.decode("utf-8") == message
received_message = None # so this isn't tagged as a leak
leak_tracker.check_for_leaks()
| 30.891304
| 93
| 0.713582
| 911
| 0.641098
| 0
| 0
| 947
| 0.666432
| 0
| 0
| 380
| 0.267417
|
b79f9124d587b0b999491249d4952350ec3b140e
| 6,588
|
py
|
Python
|
addons/mixer/blender_data/tests/test_bpy_blend_diff.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | null | null | null |
addons/mixer/blender_data/tests/test_bpy_blend_diff.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | null | null | null |
addons/mixer/blender_data/tests/test_bpy_blend_diff.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | null | null | null |
# GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import unittest
from bpy import data as D # noqa
from bpy import types as T # noqa
from mixer.blender_data.bpy_data_proxy import BpyDataProxy
from mixer.blender_data.diff import BpyBlendDiff
from mixer.blender_data.filter import test_properties
def sort_renamed_item(x):
return x[1]
class TestDiff(unittest.TestCase):
def setUp(self):
for w in D.worlds:
D.worlds.remove(w)
self.proxy = BpyDataProxy()
def test_create(self):
# test_diff.TestDiff.test_create
self.proxy.load(test_properties)
new_worlds = ["W0", "W1"]
new_worlds.sort()
for w in new_worlds:
D.worlds.new(w)
diff = BpyBlendDiff()
diff.diff(self.proxy, test_properties)
for collection_name, delta in diff.collection_deltas:
self.assertEqual(0, len(delta.items_removed), f"removed count mismatch for {collection_name}")
self.assertEqual(0, len(delta.items_renamed), f"renamed count mismatch for {collection_name}")
if collection_name == "worlds":
self.assertEqual(len(new_worlds), len(delta.items_added), f"added count mismatch for {collection_name}")
found = [datablock.name for datablock, _ in delta.items_added]
found.sort()
self.assertEqual(new_worlds, found, f"added count mismatch for {collection_name}")
else:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {collection_name}")
def test_remove(self):
# test_diff.TestDiff.test_create
new_worlds = ["W0", "W1", "W2"]
new_worlds.sort()
for w in new_worlds:
D.worlds.new(w)
self.proxy.load(test_properties)
removed = ["W0", "W1"]
removed.sort()
for w in removed:
D.worlds.remove(D.worlds[w])
diff = BpyBlendDiff()
diff.diff(self.proxy, test_properties)
for name, delta in diff.collection_deltas:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
self.assertEqual(0, len(delta.items_renamed), f"renamed count mismatch for {name}")
if name == "worlds":
self.assertEqual(len(removed), len(delta.items_removed), f"removed count mismatch for {name}")
items_removed = [proxy.data("name") for proxy in delta.items_removed]
items_removed.sort()
self.assertEqual(removed, items_removed, f"removed count mismatch for {name}")
else:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
def test_rename(self):
# test_diff.TestDiff.test_create
new_worlds = ["W0", "W1", "W2"]
new_worlds.sort()
for w in new_worlds:
D.worlds.new(w)
self.proxy.load(test_properties)
renamed = [("W0", "W00"), ("W2", "W22")]
renamed.sort(key=sort_renamed_item)
for old_name, new_name in renamed:
D.worlds[old_name].name = new_name
diff = BpyBlendDiff()
diff.diff(self.proxy, test_properties)
for name, delta in diff.collection_deltas:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
self.assertEqual(0, len(delta.items_removed), f"removed count mismatch for {name}")
if name == "worlds":
self.assertEqual(len(renamed), len(delta.items_renamed), f"renamed count mismatch for {name}")
items_renamed = list(delta.items_renamed)
items_renamed.sort(key=sort_renamed_item)
items_renamed = [(proxy.data("name"), new_name) for proxy, new_name in items_renamed]
self.assertEqual(renamed, items_renamed, f"removed count mismatch for {name}")
else:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
def test_create_delete_rename(self):
# test_diff.TestDiff.test_create
new_worlds = ["W0", "W1", "W2", "W4"]
new_worlds.sort()
for w in new_worlds:
D.worlds.new(w)
self.proxy.load(test_properties)
renamed = [("W0", "W00"), ("W2", "W22"), ("W4", "W44")]
renamed.sort(key=sort_renamed_item)
for old_name, new_name in renamed:
D.worlds[old_name].name = new_name
added = ["W0", "W5"]
added.sort()
for w in added:
D.worlds.new(w)
removed = ["W1", "W00"]
removed.sort()
for w in removed:
D.worlds.remove(D.worlds[w])
diff = BpyBlendDiff()
diff.diff(self.proxy, test_properties)
for name, delta in diff.collection_deltas:
if name == "worlds":
items_added = [datablock.name for datablock, _ in delta.items_added]
items_added.sort()
self.assertEqual(items_added, ["W0", "W5"], f"added count mismatch for {name}")
items_renamed = delta.items_renamed
items_renamed.sort(key=sort_renamed_item)
items_renamed = [(proxy.data("name"), new_name) for proxy, new_name in items_renamed]
self.assertEqual(items_renamed, [("W2", "W22"), ("W4", "W44")], f"renamed count mismatch for {name}")
items_removed = [proxy.data("name") for proxy in delta.items_removed]
items_removed.sort()
self.assertEqual(items_removed, ["W0", "W1"], f"removed count mismatch for {name}")
else:
self.assertEqual(0, len(delta.items_renamed), f"renamed count mismatch for {name}")
self.assertEqual(0, len(delta.items_removed), f"removed count mismatch for {name}")
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
| 41.961783
| 120
| 0.622495
| 5,607
| 0.851093
| 0
| 0
| 0
| 0
| 0
| 0
| 1,809
| 0.27459
|
b7a030be152f3c0e93089efb532b4fde2f3761dd
| 17,258
|
py
|
Python
|
dependencies/svgwrite/examples/ltattrie/tiling_part_5.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 21
|
2015-01-16T05:10:02.000Z
|
2021-06-11T20:48:15.000Z
|
dependencies/svgwrite/examples/ltattrie/tiling_part_5.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 1
|
2019-09-09T12:10:27.000Z
|
2020-05-22T10:12:14.000Z
|
dependencies/svgwrite/examples/ltattrie/tiling_part_5.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 2
|
2015-05-03T04:51:08.000Z
|
2018-08-24T08:28:53.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import math, sys
import svgwrite
#
# http://www.w3.org/TR/SVG11/struct.html#UseElement
#
# For more information on tesselation / tiling see http://en.wikipedia.org/wiki/Wallpaper_group
# The organization of these tilings are from the interesting book
# Designing Testellations: The Secrets of Interlocking Patterns by Jinny Beyer.
PROGNAME = sys.argv[0].rstrip('.py')
def create_svg(name):
svg_size_width = 900
svg_size_height = 1600
font_size = 20
square_size = 30
title1 = name + ': Part 5 tiling with multiple def, groups, use, translate and scale.'
sqrt3 = math.sqrt(3) # do this calc once instead of repeating the calc many times.
dwg = svgwrite.Drawing(name, (svg_size_width, svg_size_height), debug=True)
# ####################
# p3m1 - Mirror and Three rotations
# - Equilateral triangle mirrored, rotated
# All three sides are the same length, all three angles are 60 degrees.
# The height of the triangle h = sqrt(3)/2.0 * length of a side
# The centre of the triangle is sqrt(3)/6.0 * length of a side
defs_g_trieq_size_x = square_size
defs_g_trieq_size_y = defs_g_trieq_size_x * sqrt3 / 2.0
defs_g_trieq_centre = sqrt3 / 6.0 * defs_g_trieq_size_x
# width of equilateral triangle at the centre
defs_g_trieq_centre_size_x = defs_g_trieq_size_x - defs_g_trieq_size_x * defs_g_trieq_centre / defs_g_trieq_size_y
# defs_g_trieq = dwg.defs.add(dwg.g(id='defs_g_trieq', clip_path='url(#cliptrieq)'))
defs_g_trieq = dwg.defs.add(dwg.g(id='defs_g_trieq'))
defs_g_trieq.add(dwg.polygon([(0, -defs_g_trieq_size_y + defs_g_trieq_centre), (defs_g_trieq_size_x / 2.0, defs_g_trieq_centre),
(-defs_g_trieq_size_x / 2.0, defs_g_trieq_centre)], stroke='none'))
defs_g_trieq.add(dwg.polygon([(-defs_g_trieq_size_x / 2.0, defs_g_trieq_centre), (-defs_g_trieq_centre_size_x / 2.0, 0),
(defs_g_trieq_centre_size_x / 2.0, 0), (0, defs_g_trieq_centre)], stroke='none', fill='yellow'))
# Create mirror of the equilateral triangle.
defs_g_trieq_m = dwg.defs.add(dwg.g(id='defs_g_trieq_m'))
defs_g_trieq_m.add(dwg.use(defs_g_trieq, insert=(0, 0)))
defs_g_trieq_m.scale(-1, -1)
# Create combined cell
defs_g_trieq_cc_size_x = 1.5 * defs_g_trieq_size_x
defs_g_trieq_cc_size_y = defs_g_trieq_size_y
defs_g_trieq_cc = dwg.defs.add(dwg.g(id='defs_g_trieq_cc'))
defs_g_trieq_cc.add(dwg.use(defs_g_trieq, insert=(-defs_g_trieq_size_x / 4.0, defs_g_trieq_size_y / 2.0 - defs_g_trieq_centre)))
defs_g_trieq_cc.add(dwg.use(defs_g_trieq_m, insert=(defs_g_trieq_size_x / 4.0, -(defs_g_trieq_size_y / 2.0 - defs_g_trieq_centre))))
# Create rotations of combined cell
defs_g_trieq_cc_120 = dwg.defs.add(dwg.g(id='defs_g_trieq_cc_120'))
defs_g_trieq_cc_120.add(dwg.use(defs_g_trieq_cc, insert=(0, 0), fill='mediumblue'))
defs_g_trieq_cc_120.rotate(120, center=(0, 0))
defs_g_trieq_cc_m120 = dwg.defs.add(dwg.g(id='defs_g_trieq_cc_m120'))
defs_g_trieq_cc_m120.add(dwg.use(defs_g_trieq_cc, insert=(0, 0), fill='navy'))
defs_g_trieq_cc_m120.rotate(-120, center=(0, 0))
# Create pattern from rotations of combined cell
defs_g_trieq_pattern_size_x = 2 * defs_g_trieq_size_x
defs_g_trieq_pattern_size_y = 2 * defs_g_trieq_size_y
defs_g_trieq_pattern = dwg.defs.add(dwg.g(id='defs_g_trieq_pattern'))
defs_g_trieq_pattern.add(dwg.use(defs_g_trieq_cc, insert=(-defs_g_trieq_size_x / 4.0, -defs_g_trieq_cc_size_y / 2.0)))
defs_g_trieq_pattern.add(dwg.use(defs_g_trieq_cc_120, insert=(defs_g_trieq_size_x / 2.0, 0)))
defs_g_trieq_pattern.add(dwg.use(defs_g_trieq_cc_m120, insert=(-defs_g_trieq_size_x / 4.0, defs_g_trieq_cc_size_y / 2.0)))
# ####################
# p31m - Three rotations and a mirror
# - A Kite shape, half hexagon, and half of a 60 degree diamond will all work for this
# symmetry. This one will use a kite.
# 30, 60, 90 angle triangle
# The length of the sides are 1:sqrt(3):2 2 is the hypotenuse
# invsqrt2 = 1/sqrt2
# invsqrt2_2 = invsqrt2 * invsqrt2 = 1/2 = .5 by definition
# sin and cos(45 degrees) is 1/sqrt2 = 0.707106781187
# cos(30 degrees) is sqrt3/2
# sin(30 degrees) is 1/2
# tan(30) = 1/sqrt(3)
# The height of equilateral triangle h = sqrt(3)/2.0 * length of a side
# The centre of equilateral triangle is sqrt(3)/6.0 * length of a side
defs_g_kite_size_x = square_size
defs_g_kite_size_y = defs_g_kite_size_x * sqrt3 / 2.0 + defs_g_kite_size_x * sqrt3 / 6.0
# Having a clip path seems to increase the visibility of the lines between the tiles.
# A clipping path may be necessary if the shapes go outside the triangle.
# defs_g_kite = dwg.defs.add(dwg.g(id='defs_g_kite', clip_path='url(#clipkite)'))
defs_g_kite = dwg.defs.add(dwg.g(id='defs_g_kite'))
defs_g_kite.add(dwg.polygon([(0, 0),
(defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0)),
(0, defs_g_kite_size_y),
(-defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0))], stroke='none'))
#defs_g_kite.add(dwg.polygon([(0, 0),
# (defs_g_kite_size_x / 4.0, (defs_g_kite_size_y + defs_g_kite_size_x / (sqrt3 * 2.0)) / 2.0),
# (-defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0))], stroke='none', fill='yellow'))
defs_g_kite.add(dwg.polygon([(0, 0),
(defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0)),
(0, defs_g_kite_size_y / 12.0),
(-defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0))], stroke='none',
fill='black'))
defs_g_kite.add(dwg.polygon([(0, defs_g_kite_size_y),
(defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0)),
(0, defs_g_kite_size_y * 8.0 / 12.0),
(-defs_g_kite_size_x / 2.0, defs_g_kite_size_x / (sqrt3 * 2.0))], stroke='none',
fill='green'))
# Create rotations of the kite.
defs_g_kite_120 = dwg.defs.add(dwg.g(id='defs_g_kite_120'))
defs_g_kite_120.add(dwg.use(defs_g_kite, insert=(0, 0)))
defs_g_kite_120.rotate(120, center=(0, 0))
defs_g_kite_m120 = dwg.defs.add(dwg.g(id='defs_g_kite_m120'))
defs_g_kite_m120.add(dwg.use(defs_g_kite, insert=(0, 0)))
defs_g_kite_m120.rotate(-120, center=(0, 0))
# Now use the cell, rotated cells to create the combined cell.
# The height of equilateral triangle h = sqrt(3) / 2.0 * length of a side
defs_g_kite_cc_size_x = 2 * defs_g_kite_size_x
defs_g_kite_cc_size_y = defs_g_kite_size_x * sqrt3 # 2*(sqrt(3)/2.0)
defs_g_kite_cc = dwg.defs.add(dwg.g(id='defs_g_kite_cc'))
defs_g_kite_cc.add(dwg.use(defs_g_kite, insert=(0, 0)))
defs_g_kite_cc.add(dwg.use(defs_g_kite_120, insert=(0, 0)))
defs_g_kite_cc.add(dwg.use(defs_g_kite_m120, insert=(0, 0)))
# Now use the combined cell to create a mirrored combined cell
defs_g_kite_mcc = dwg.defs.add(dwg.g(id='defs_g_kite_mcc'))
defs_g_kite_mcc.add(dwg.use(defs_g_kite_cc, insert=(0, 0)))
defs_g_kite_mcc.scale(-1, -1)
# Now use the combined cell, and mirrored combined cell to create a pattern
defs_g_kite_pattern_size_x = 1.5 * defs_g_kite_cc_size_x
defs_g_kite_pattern_size_y = defs_g_kite_cc_size_y
defs_g_kite_pattern = dwg.defs.add(dwg.g(id='defs_g_kite_pattern'))
defs_g_kite_pattern.add(dwg.use(defs_g_kite_cc, insert=(-defs_g_kite_cc_size_x / 4.0, -sqrt3 / 12.0 * defs_g_kite_cc_size_x)))
defs_g_kite_pattern.add(dwg.use(defs_g_kite_mcc, insert=(defs_g_kite_cc_size_x / 4.0, sqrt3 / 12.0 * defs_g_kite_cc_size_x)))
# ####################
# p6m - Kaleidoscope Either of the two long sides of the primary triangle is mirrored. The
# resulting shape is rotated six times.
# 30, 60, 90 angle triangle
# The length of the sides are 1:sqrt(3):2 2 is the hypotenuse
# invsqrt2 = 1/sqrt2
# invsqrt2_2 = invsqrt2 * invsqrt2 = 1/2 = .5 by definition
# sin and cos(45 degrees) is 1/sqrt2 = 0.707106781187
# cos(30 degrees) is sqrt3/2
# sin(30 degrees) is 1/2
# tan(30) = 1/sqrt(3)
# # The height of equilateral triangle h = sqrt(3) / 2.0 * length of a side
# # The centre of equilateral triangle is sqrt(3) / 6.0 * length of a side
defs_g_kale_tri_size_x = square_size
defs_g_kale_tri_size_y = defs_g_kale_tri_size_x * 4.0 / sqrt3
# Having a clip path seems to increase the visibility of the lines between the tiles.
# A clipping path may be necessary if the shapes go outside the triangle.
# defs_g_kale_tri = dwg.defs.add(dwg.g(id='defs_g_kale_tri', clip_path='url(#clipkale)'))
defs_g_kale_tri = dwg.defs.add(dwg.g(id='defs_g_kale_tri'))
defs_g_kale_tri.add(dwg.polygon([(0, -defs_g_kale_tri_size_y), (0, 0), (-defs_g_kale_tri_size_x, defs_g_kale_tri_size_x / sqrt3
- defs_g_kale_tri_size_y)], stroke='none'))
defs_g_kale_tri.add(dwg.polygon([(-defs_g_kale_tri_size_x, defs_g_kale_tri_size_x / sqrt3 - defs_g_kale_tri_size_y), (0, 2.0
* defs_g_kale_tri_size_x / sqrt3 - defs_g_kale_tri_size_y), (0, 3.0 * defs_g_kale_tri_size_x / sqrt3
- defs_g_kale_tri_size_y)], stroke='none', fill='yellow'))
# Create mirror of the kale.
defs_g_kale_tri_m = dwg.defs.add(dwg.g(id='defs_g_kale_tri_m'))
defs_g_kale_tri_m.add(dwg.use(defs_g_kale_tri, insert=(0, 0)))
defs_g_kale_tri_m.scale(-1, 1)
# Now use the tri, rotated tri to create the combined cell.
defs_g_kale_cc_size_x = 2 * defs_g_kale_tri_size_x
defs_g_kale_cc_size_y = defs_g_kale_tri_size_y
defs_g_kale_cc = dwg.defs.add(dwg.g(id='defs_g_kale_cc'))
defs_g_kale_cc.add(dwg.use(defs_g_kale_tri, insert=(0, 0)))
defs_g_kale_cc.add(dwg.use(defs_g_kale_tri_m, insert=(0, 0)))
# Now rotate the combined cell.
defs_g_kale_cc_60 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_60'))
defs_g_kale_cc_60.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_60.rotate(60, center=(0, 0))
defs_g_kale_cc_120 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_120'))
defs_g_kale_cc_120.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_120.rotate(120, center=(0, 0))
defs_g_kale_cc_180 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_180'))
defs_g_kale_cc_180.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_180.rotate(180, center=(0, 0))
defs_g_kale_cc_m60 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_m60'))
defs_g_kale_cc_m60.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_m60.rotate(-60, center=(0, 0))
defs_g_kale_cc_m120 = dwg.defs.add(dwg.g(id='defs_g_kale_cc_m120'))
defs_g_kale_cc_m120.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_cc_m120.rotate(-120, center=(0, 0))
# Now use the cell and five rotated cells to create the pattern.
defs_g_kale_pattern_size_x = 2 * defs_g_kale_cc_size_x
defs_g_kale_pattern_size_y = 2 * defs_g_kale_cc_size_y
defs_g_kale_pattern = dwg.defs.add(dwg.g(id='defs_g_kale_pattern'))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_60, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_120, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_180, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_m60, insert=(0, 0)))
defs_g_kale_pattern.add(dwg.use(defs_g_kale_cc_m120, insert=(0, 0)))
# ########################
# Background will be dark but not black so the background does not overwhelm the colors.
dwg.add(dwg.rect(insert=(0, 0), size=('100%', '100%'), rx=None, ry=None, fill='grey'))
# Give the name of the example and a title.
y = font_size + 5
dwg.add(dwg.text(title1, insert=(0, y), font_family='serif', font_size=font_size, fill='white'))
y = y + font_size
# p3m1 - Mirror and three rotations
title2 = 'Mirror and three rotations, math name: p3m1'
dwg.add(dwg.text(title2, insert=(50, y), font_family='serif', font_size=font_size, fill='white'))
y = y + font_size + defs_g_trieq_size_x
cell_created = dwg.use(defs_g_trieq, insert=(50 + defs_g_trieq_size_x, y), fill='lightblue')
dwg.add(cell_created)
dwg.add(dwg.circle(center=(50 + defs_g_trieq_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
cc_created = dwg.use(defs_g_trieq_cc, insert=(150 + defs_g_trieq_cc_size_x, y), fill='lightblue')
dwg.add(cc_created)
dwg.add(dwg.circle(center=(150 + defs_g_trieq_cc_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
pattern_created = dwg.use(defs_g_trieq_pattern, insert=(250 + defs_g_trieq_cc_size_x, y), fill='lightblue')
dwg.add(pattern_created)
dwg.add(dwg.circle(center=(250 + defs_g_trieq_cc_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
y += defs_g_trieq_pattern_size_y
for i in range(8):
y += defs_g_trieq_pattern_size_y / 2.0
for j in range(6):
if i % 2:
x = 50 + j * 1.5 * defs_g_trieq_pattern_size_x
else:
x = 50 + 1.5 * defs_g_trieq_size_x + j * 1.5 * defs_g_trieq_pattern_size_x
pattern_created = dwg.use(defs_g_trieq_pattern, fill='lightblue')
pattern_created.translate(x, y)
dwg.add(pattern_created)
y += defs_g_trieq_pattern_size_y
#
# p31m sample cell, combined cell and tile
title2 = 'Kite rotated and mirrored, math name: p31m'
dwg.add(dwg.text(title2, insert=(50, y), font_family='serif', font_size=font_size, fill='white'))
y = y + font_size + defs_g_kite_size_y
cell_created = dwg.use(defs_g_kite, insert=(50 + defs_g_kite_size_x / 2.0, y), fill='navy')
dwg.add(cell_created)
dwg.add(dwg.circle(center=(50 + defs_g_kite_size_x / 2.0, y), r=3, stroke='none', fill='purple', opacity='0.5'))
cc_created = dwg.use(defs_g_kite_cc, insert=(150 + defs_g_kite_size_x / 2.0, y), fill='navy')
dwg.add(cc_created)
dwg.add(dwg.circle(center=(150 + defs_g_kite_size_x / 2.0, y), r=3, stroke='none', fill='purple', opacity='0.5'))
mcc_created = dwg.use(defs_g_kite_mcc, insert=(250 + defs_g_kite_cc_size_x / 2, y), fill='navy')
dwg.add(mcc_created)
dwg.add(dwg.circle(center=(250 + defs_g_kite_cc_size_x / 2, y), r=3, stroke='none', fill='purple', opacity='0.5'))
pattern_created = dwg.use(defs_g_kite_pattern, insert=(350 + defs_g_kite_cc_size_x, y), fill='navy')
dwg.add(pattern_created)
dwg.add(dwg.circle(center=(350 + defs_g_kite_cc_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
y += defs_g_kite_pattern_size_y
for i in range(6):
y += defs_g_kite_pattern_size_y
for j in range(8):
if i % 2:
x = 100 + (j + 0.5) * defs_g_kite_cc_size_x
else:
x = 100 + j * defs_g_kite_cc_size_x
pattern_created = dwg.use(defs_g_kite_pattern, fill='navy')
pattern_created.translate(x, y)
dwg.add(pattern_created)
y += defs_g_kite_pattern_size_y
# ##
# p6m kaleidoscope
title2 = 'Kaleidoscope 30, 60, 90 triangle mirrored and rotated, math name: p6m'
dwg.add(dwg.text(title2, insert=(50, y), font_family='serif', font_size=font_size, fill='white'))
y = y + font_size
y += defs_g_kale_tri_size_y
cell_created = dwg.use(defs_g_kale_tri, insert=(50 + defs_g_kale_tri_size_x, y), fill='navy')
dwg.add(cell_created)
dwg.add(dwg.circle(center=(50 + defs_g_kale_tri_size_x, y), r=3, stroke='none', fill='purple', opacity='0.5'))
cc_created = dwg.use(defs_g_kale_cc, insert=(150 + defs_g_kale_cc_size_x / 2.0, y), fill='navy')
dwg.add(cc_created)
dwg.add(dwg.circle(center=(150 + defs_g_kale_cc_size_x / 2.0, y), r=3, stroke='none', fill='purple', opacity='0.5'))
pattern_created = dwg.use(defs_g_kale_pattern, insert=(250 + defs_g_kale_pattern_size_x / 2.0, y), fill='navy')
dwg.add(pattern_created)
dwg.add(dwg.circle(center=(250 + defs_g_kale_pattern_size_x / 2.0, y), r=3, stroke='none', fill='purple', opacity='0.5'))
y += defs_g_kale_pattern_size_y / 2.0
for i in range(4):
y += defs_g_kale_pattern_size_y - defs_g_kale_pattern_size_x / (sqrt3 * 2)
for j in range(6):
if i % 2:
x = 100 + j * defs_g_kale_pattern_size_x
else:
x = 100 + defs_g_kale_cc_size_x + j * defs_g_kale_pattern_size_x
pattern_created = dwg.use(defs_g_kale_pattern, fill='navy')
pattern_created.translate(x, y)
dwg.add(pattern_created)
y += defs_g_kale_pattern_size_y
# All items have been added so save the svg to a the file.
dwg.save()
if __name__ == '__main__':
create_svg(PROGNAME + '.svg')
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 textwidth=99
| 51.825826
| 137
| 0.670877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,977
| 0.288388
|