hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
daf5488d1b5d701e455b28156dfe56ebf5465e9a
| 19,391
|
py
|
Python
|
electra/pretrain/pretrain_utils.py
|
entelecheia/electra-tf2
|
08845a1c4bff78bc7d50b8331170b9c925c0d919
|
[
"Apache-2.0"
] | null | null | null |
electra/pretrain/pretrain_utils.py
|
entelecheia/electra-tf2
|
08845a1c4bff78bc7d50b8331170b9c925c0d919
|
[
"Apache-2.0"
] | null | null | null |
electra/pretrain/pretrain_utils.py
|
entelecheia/electra-tf2
|
08845a1c4bff78bc7d50b8331170b9c925c0d919
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for preparing pre-training data and supplying them to the model."""
import collections
import os
import numpy as np
import tensorflow as tf
from ..util import utils
from ..model import tokenization
def get_dataset(config, batch_size, num_cpu_threads=4, world_size=1, rank=0):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
}
input_files = []
for input_pattern in config.pretrain_tfrecords.split(","):
input_files.extend(tf.io.gfile.glob(input_pattern))
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.shard(num_shards=world_size, index=rank)
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files), seed=config.seed, reshuffle_each_iteration=False)
cycle_length = min(num_cpu_threads, len(input_files))
d = d.interleave(
tf.data.TFRecordDataset,
cycle_length=cycle_length,
deterministic=True)
d = d.shuffle(buffer_size=100, seed=config.seed, reshuffle_each_iteration=False)
d = d.map(lambda record: _decode_record(record, name_to_features))
d = d.batch(batch_size)
return d
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
# model inputs - it's a bit nicer to use a namedtuple rather than keep the
# features as a dict
Inputs = collections.namedtuple(
"Inputs", ["input_ids", "input_mask", "segment_ids", "masked_lm_positions",
"masked_lm_ids", "masked_lm_weights"])
def features_to_inputs(features):
return Inputs(
input_ids=features["input_ids"],
input_mask=features["input_mask"],
segment_ids=features["segment_ids"],
masked_lm_positions=(features["masked_lm_positions"]
if "masked_lm_positions" in features else None),
masked_lm_ids=(features["masked_lm_ids"]
if "masked_lm_ids" in features else None),
masked_lm_weights=(features["masked_lm_weights"]
if "masked_lm_weights" in features else None),
)
def get_updated_inputs(inputs, **kwargs):
features = inputs._asdict()
for k, v in kwargs.items():
features[k] = v
return features_to_inputs(features)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
#
# if name is None:
# name = tensor.name
#
# if expected_rank is not None:
# assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def gather_positions(sequence, positions):
"""Gathers the vectors at the specific positions over a minibatch.
Args:
sequence: A [batch_size, seq_length] or
[batch_size, seq_length, depth] tensor of values
positions: A [batch_size, n_positions] tensor of indices
Returns: A [batch_size, n_positions] or
[batch_size, n_positions, depth] tensor of the values at the indices
"""
shape = get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
sequence = tf.expand_dims(sequence, -1)
position_shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(positions + position_shift, [-1])
flat_sequence = tf.reshape(sequence, [B * L, D])
gathered = tf.gather(flat_sequence, flat_positions)
if depth_dimension:
return tf.reshape(gathered, [B, -1, D])
else:
return tf.reshape(gathered, [B, -1])
def scatter_update(sequence, updates, positions):
"""Scatter-update a sequence.
Args:
sequence: A [batch_size, seq_len] or [batch_size, seq_len, depth] tensor
updates: A tensor of size batch_size*seq_len(*depth)
positions: A [batch_size, n_positions] tensor
Returns: A tuple of two tensors. First is a [batch_size, seq_len] or
[batch_size, seq_len, depth] tensor of "sequence" with elements at
"positions" replaced by the values at "updates." Updates to index 0 are
ignored. If there are duplicated positions the update is only applied once.
Second is a [batch_size, seq_len] mask tensor of which inputs were updated.
"""
shape = get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
sequence = tf.expand_dims(sequence, -1)
N = get_shape_list(positions)[1]
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(positions + shift, [-1, 1])
flat_updates = tf.reshape(updates, [-1, D])
updates = tf.scatter_nd(flat_positions, flat_updates, [B * L, D])
updates = tf.reshape(updates, [B, L, D])
flat_updates_mask = tf.ones([B * N], tf.int32)
updates_mask = tf.scatter_nd(flat_positions, flat_updates_mask, [B * L])
updates_mask = tf.reshape(updates_mask, [B, L])
not_first_token = tf.concat([tf.zeros((B, 1), tf.int32),
tf.ones((B, L - 1), tf.int32)], -1)
updates_mask *= not_first_token
updates_mask_3d = tf.expand_dims(updates_mask, -1)
# account for duplicate positions
if sequence.dtype == tf.float32:
updates_mask_3d = tf.cast(updates_mask_3d, tf.float32)
updates /= tf.maximum(1.0, updates_mask_3d)
else:
assert sequence.dtype == tf.int32
updates = tf.math.floordiv(updates, tf.maximum(1, updates_mask_3d))
updates_mask = tf.minimum(updates_mask, 1)
updates_mask_3d = tf.minimum(updates_mask_3d, 1)
updated_sequence = (((1 - updates_mask_3d) * sequence) +
(updates_mask_3d * updates))
if not depth_dimension:
updated_sequence = tf.squeeze(updated_sequence, -1)
return updated_sequence, updates_mask
def _get_candidates_mask(inputs: Inputs, vocab,
disallow_from_mask=None):
"""Returns a mask tensor of positions in the input that can be masked out."""
ignore_ids = [vocab["[SEP]"], vocab["[CLS]"], vocab["[MASK]"]]
candidates_mask = tf.ones_like(inputs.input_ids, tf.bool)
for ignore_id in ignore_ids:
candidates_mask &= tf.not_equal(inputs.input_ids, ignore_id)
candidates_mask &= tf.cast(inputs.input_mask, tf.bool)
if disallow_from_mask is not None:
candidates_mask &= ~disallow_from_mask
return candidates_mask
def mask(config, inputs, mask_prob, proposal_distribution=1.0,
disallow_from_mask=None, already_masked=None):
"""Implementation of dynamic masking. The optional arguments aren't needed for
BERT/ELECTRA and are from early experiments in "strategically" masking out
tokens instead of uniformly at random.
Args:
config: configure_pretraining.PretrainingConfig
inputs: pretrain_data.Inputs containing input input_ids/input_mask
mask_prob: percent of tokens to mask
proposal_distribution: for non-uniform masking can be a [B, L] tensor
of scores for masking each position.
disallow_from_mask: a boolean tensor of [B, L] of positions that should
not be masked out
already_masked: a boolean tensor of [B, N] of already masked-out tokens
for multiple rounds of masking
Returns: a pretrain_data.Inputs with masking added
"""
# Get the batch size, sequence length, and max masked-out tokens
N = config.max_predictions_per_seq
B, L = get_shape_list(inputs.input_ids)
# Find indices where masking out a token is allowed
vocab = tokenization.ElectraTokenizer(
config.vocab_file, do_lower_case=config.do_lower_case).get_vocab()
candidates_mask = _get_candidates_mask(inputs, vocab, disallow_from_mask)
# Set the number of tokens to mask out per example
num_tokens = tf.cast(tf.reduce_sum(inputs.input_mask, -1), tf.float32)
num_to_predict = tf.maximum(1, tf.minimum(
N, tf.cast(tf.round(num_tokens * mask_prob), tf.int32)))
masked_lm_weights = tf.cast(tf.sequence_mask(num_to_predict, N), tf.float32)
if already_masked is not None:
masked_lm_weights *= (1 - already_masked)
# Get a probability of masking each position in the sequence
candidate_mask_float = tf.cast(candidates_mask, tf.float32)
sample_prob = (proposal_distribution * candidate_mask_float)
sample_prob /= tf.reduce_sum(sample_prob, axis=-1, keepdims=True)
# Sample the positions to mask out
sample_prob = tf.stop_gradient(sample_prob)
sample_logits = tf.math.log(sample_prob)
masked_lm_positions = tf.random.categorical(
sample_logits, N, dtype=tf.int32)
masked_lm_positions *= tf.cast(masked_lm_weights, tf.int32)
# Get the ids of the masked-out tokens
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(masked_lm_positions + shift, [-1, 1])
masked_lm_ids = tf.gather_nd(tf.reshape(inputs.input_ids, [-1]),
flat_positions)
masked_lm_ids = tf.reshape(masked_lm_ids, [B, -1])
masked_lm_ids *= tf.cast(masked_lm_weights, tf.int32)
# Update the input ids
replace_with_mask_positions = masked_lm_positions * tf.cast(
tf.less(tf.random.uniform([B, N]), 0.85), tf.int32)
inputs_ids, _ = scatter_update(
inputs.input_ids, tf.fill([B, N], vocab["[MASK]"]),
replace_with_mask_positions)
return get_updated_inputs(
inputs,
input_ids=tf.stop_gradient(inputs_ids),
masked_lm_positions=masked_lm_positions,
masked_lm_ids=masked_lm_ids,
masked_lm_weights=masked_lm_weights
)
def unmask(inputs: Inputs):
unmasked_input_ids, _ = scatter_update(
inputs.input_ids, inputs.masked_lm_ids, inputs.masked_lm_positions)
return get_updated_inputs(inputs, input_ids=unmasked_input_ids)
def sample_from_softmax(logits, disallow=None):
if disallow is not None:
logits -= 1000.0 * disallow
uniform_noise = tf.random.uniform(
get_shape_list(logits), minval=0, maxval=1)
gumbel_noise = tf.cast(-tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9), logits.dtype)
return tf.one_hot(tf.argmax(tf.nn.softmax(logits + gumbel_noise), -1,
output_type=tf.int32), logits.shape[-1])
ENDC = "\033[0m"
COLORS = ["\033[" + str(n) + "m" for n in list(range(91, 97)) + [90]]
RED = COLORS[0]
BLUE = COLORS[3]
CYAN = COLORS[5]
GREEN = COLORS[1]
def print_tokens(inputs: Inputs, inv_vocab, updates_mask=None):
"""Pretty-print model inputs."""
pos_to_tokid = {}
for tokid, pos, weight in zip(
inputs.masked_lm_ids[0], inputs.masked_lm_positions[0],
inputs.masked_lm_weights[0]):
if weight == 0:
pass
else:
pos_to_tokid[pos] = tokid
text = ""
provided_update_mask = (updates_mask is not None)
if not provided_update_mask:
updates_mask = np.zeros_like(inputs.input_ids)
for pos, (tokid, um) in enumerate(
zip(inputs.input_ids[0], updates_mask[0])):
token = inv_vocab[tokid]
if token == "[PAD]":
break
if pos in pos_to_tokid:
token = RED + token + " (" + inv_vocab[pos_to_tokid[pos]] + ")" + ENDC
if provided_update_mask:
assert um == 1
else:
if provided_update_mask:
assert um == 0
text += token + " "
utils.log(utils.printable_text(text))
class PretrainingConfig(object):
"""Defines pre-training hyperparameters."""
def __init__(self, model_name, **kwargs):
self.model_name = model_name
self.seed = 42
self.debug = False # debug mode for quickly running things
self.do_train = True # pre-train ELECTRA
self.do_eval = False # evaluate generator/discriminator on unlabeled data
# self.phase2 = False
self.phase = 1
# amp
self.amp = True
self.xla = True
self.fp16_compression = False
# optimizer type
self.optimizer = 'adam'
self.gradient_accumulation_steps = 1
# lamb whitelisting for LN and biases
self.skip_adaptive = False
# loss functions
self.electra_objective = True # if False, use the BERT objective instead
self.gen_weight = 1.0 # masked language modeling / generator loss
self.disc_weight = 50.0 # discriminator loss
self.mask_prob = 0.15 # percent of input tokens to mask out / replace
# optimization
self.learning_rate = 5e-4
self.lr_decay_power = 0.5
self.weight_decay_rate = 0.01
self.num_warmup_steps = 10000
self.opt_beta_1 = 0.878
self.opt_beta_2 = 0.974
self.end_lr = 0.0
# training settings
self.log_freq = 10
self.skip_checkpoint = False
self.save_checkpoints_steps = 1000
self.num_train_steps = 1000000
self.num_eval_steps = 100
self.keep_checkpoint_max = 5 # maximum number of recent checkpoint files to keep; change to 0 or None to keep all checkpoints
self.restore_checkpoint = None
self.load_weights = False
# model settings
self.model_size = "base" # one of "small", "base", or "large"
# override the default transformer hparams for the provided model size; see
# modeling.BertConfig for the possible hparams and util.training_utils for
# the defaults
self.model_hparam_overrides = (
kwargs["model_hparam_overrides"]
if "model_hparam_overrides" in kwargs else {})
self.embedding_size = None # bert hidden size by default
self.vocab_size = 30522 # number of tokens in the vocabulary
self.do_lower_case = True # lowercase the input?
# generator settings
self.uniform_generator = False # generator is uniform at random
self.shared_embeddings = True # share generator/discriminator token embeddings?
# self.untied_generator = True # tie all generator/discriminator weights?
self.generator_layers = 1.0 # frac of discriminator layers for generator
self.generator_hidden_size = 0.25 # frac of discrim hidden size for gen
self.disallow_correct = False # force the generator to sample incorrect
# tokens (so 15% of tokens are always
# fake)
self.temperature = 1.0 # temperature for sampling from generator
# batch sizes
self.max_seq_length = 128
self.train_batch_size = 128
self.eval_batch_size = 128
self.results_dir = "results"
self.json_summary = None
self.wandb_group = f'electra-pretraining-p{self.phase}'
self.wandb_project = 'electra-pretraining'
self.update(kwargs)
# default locations of data files
self.pretrain_tfrecords = os.path.join(
"data", "pretrain_tfrecords/pretrain_data.tfrecord*")
self.vocab_file = os.path.join("vocab", "vocab.txt")
self.model_dir = os.path.join(self.results_dir, "models", model_name)
self.checkpoints_dir = os.path.join(self.model_dir, "checkpoints")
self.weights_dir = os.path.join(self.model_dir, "weights")
self.results_txt = os.path.join(self.results_dir, "unsup_results.txt")
self.results_pkl = os.path.join(self.results_dir, "unsup_results.pkl")
self.log_dir = os.path.join(self.model_dir, "logs")
self.max_predictions_per_seq = int((self.mask_prob + 0.005) *
self.max_seq_length)
# defaults for different-sized model
if self.model_size == "base":
self.embedding_size = 768
self.hidden_size = 768
self.num_hidden_layers = 12
if self.hidden_size % 64 != 0:
raise ValueError("Hidden size {} should be divisible by 64. Number of attention heads is hidden size {} / 64 ".format(self.hidden_size, self.hidden_size))
self.num_attention_heads = int(self.hidden_size / 64.)
elif self.model_size == "large":
self.embedding_size = 1024
self.hidden_size = 1024
self.num_hidden_layers = 24
if self.hidden_size % 64 != 0:
raise ValueError("Hidden size {} should be divisible by 64. Number of attention heads is hidden size {} / 64 ".format(self.hidden_size, self.hidden_size))
self.num_attention_heads = int(self.hidden_size / 64.)
else:
raise ValueError("--model_size : 'base' and 'large supported only.")
self.act_func = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.update(kwargs)
def update(self, kwargs):
for k, v in kwargs.items():
if v is not None:
self.__dict__[k] = v
| 39.094758
| 171
| 0.661235
|
6045ee908213bf426287fa6349722c32ca6903e6
| 2,103
|
py
|
Python
|
RecoEgamma/EgammaIsolationAlgos/python/egmElectronIsolationPUPPI_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoEgamma/EgammaIsolationAlgos/python/egmElectronIsolationPUPPI_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoEgamma/EgammaIsolationAlgos/python/egmElectronIsolationPUPPI_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
import PhysicsTools.IsolationAlgos.CITKPFIsolationSumProducerForPUPPI_cfi as _mod
IsoConeDefinitions = cms.VPSet(
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.015),
isolateAgainst = cms.string('h+'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.0),
isolateAgainst = cms.string('h0'),
miniAODVertexCodes = cms.vuint32(2,3) ),
cms.PSet( isolationAlgo = cms.string('ElectronPFIsolationWithConeVeto'),
coneSize = cms.double(0.3),
VetoConeSizeBarrel = cms.double(0.0),
VetoConeSizeEndcaps = cms.double(0.08),
isolateAgainst = cms.string('gamma'),
miniAODVertexCodes = cms.vuint32(2,3) )
)
egmElectronIsolationAODPUPPI = _mod.CITKPFIsolationSumProducerForPUPPI.clone(
srcToIsolate = "gedGsfElectrons",
srcForIsolationCone = '',
isolationConeDefinitions = IsoConeDefinitions
)
egmElectronIsolationMiniAODPUPPI = _mod.CITKPFIsolationSumProducerForPUPPI.clone(
srcToIsolate = "slimmedElectrons",
srcForIsolationCone = 'packedPFCandidates',
puppiValueMap = '',
isolationConeDefinitions = IsoConeDefinitions
)
egmElectronIsolationMiniAODPUPPINoLeptons = _mod.CITKPFIsolationSumProducerForPUPPI.clone(
srcToIsolate = "slimmedElectrons",
srcForIsolationCone = 'packedPFCandidates',
puppiValueMap = '',
usePUPPINoLepton = True,
isolationConeDefinitions = IsoConeDefinitions
)
| 46.733333
| 90
| 0.617689
|
2fa0f1b53d52bc141c9e6443c5143fedc061c573
| 1,092
|
py
|
Python
|
CNS_Platform/CNS_AIl_AI_module.py
|
Jonghyun-Kim-73/ERC_Project
|
271ec7e77002fcf43158492a6938563af8442e70
|
[
"Apache-2.0"
] | 1
|
2021-03-02T10:31:24.000Z
|
2021-03-02T10:31:24.000Z
|
CNS_Platform/CNS_AIl_AI_module.py
|
Jonghyun-Kim-73/ERC_Project
|
271ec7e77002fcf43158492a6938563af8442e70
|
[
"Apache-2.0"
] | null | null | null |
CNS_Platform/CNS_AIl_AI_module.py
|
Jonghyun-Kim-73/ERC_Project
|
271ec7e77002fcf43158492a6938563af8442e70
|
[
"Apache-2.0"
] | 1
|
2021-03-02T10:31:11.000Z
|
2021-03-02T10:31:11.000Z
|
import numpy as np
class Mainnet:
def __init__(self):
self.EM_SV_net, self.AB_DIG_Net = self.make_net()
def make_net(self):
import tensorflow as tf
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense, Lambda, LSTM, RepeatVector
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
def sampling(args):
z_mean, z_log_sigma = args
epsilon = tf.keras.backend.random_normal(shape=(1, 8))
return z_mean + z_log_sigma * epsilon
x = Input(shape=(397,))
y = RepeatVector(10)(x)
h = LSTM(4)(y)
z_mean = Dense(8)(h)
z_log_sigma = Dense(8)(h)
z = Lambda(sampling, output_shape=(8,))([z_mean, z_log_sigma])
h_decoded = RepeatVector(10)(z)
x_decoded_mean = LSTM(4, return_sequences=True)(h_decoded)
x_decoded_mean = LSTM(26, return_sequences=False)(x_decoded_mean)
vae = Model(x, x_decoded_mean)
vae.load_weights('vae_lstm_weight.h5')
return vae
| 29.513514
| 73
| 0.612637
|
907e8393c80426e0973e74cc3b82505080fe2ca4
| 1,249
|
py
|
Python
|
main.py
|
bsppbep/mesh_lora
|
cef36ee138e624b9fc6e14b2cf89594d6dc8bc9e
|
[
"MIT"
] | 5
|
2020-10-26T09:06:51.000Z
|
2022-03-03T14:40:10.000Z
|
main.py
|
bsppbep/mesh_lora
|
cef36ee138e624b9fc6e14b2cf89594d6dc8bc9e
|
[
"MIT"
] | null | null | null |
main.py
|
bsppbep/mesh_lora
|
cef36ee138e624b9fc6e14b2cf89594d6dc8bc9e
|
[
"MIT"
] | 1
|
2020-11-18T03:51:38.000Z
|
2020-11-18T03:51:38.000Z
|
#-*- coding: utf-8 -*-
"""Mesh LoRa.
A meshed LoRa network of communication beacons is being built here.
Each tag has a unique identifier in the network (between 1 and 254).
But we can also use one or more tags with the identifier 255. This tag
can only relay messages.
"""
import time
from mesh_lora import Messenger
from setup_logger import logger
# If you run this code on a module that does not have an
# RFM95 module, you can use a simulation of the RFM95 module.
simulation = False
if simulation:
from simulated_RFM95 import RFM95
else:
from RFM95 import RFM95
# defines RFM95
rfm95 = RFM95()
# start logger
logger.info('start')
# Initialize the messenger.
# The id (between 1 and 254) must be unique in the network.
# If you want the tag to act only as a relay, you can use id 255.
# The id 255 does not need to be unique in the network.
my_messenger = Messenger(rfm95, id_in_network=255)
logger.info('Messenger id : {}'.format(my_messenger.id_in_network))
# Start
my_messenger.start()
while True:
try:
if my_messenger.id_in_network == 1:
my_messenger.post('hello 2')
if my_messenger.id_in_network == 2:
my_messenger.post('hi 1')
time.sleep(4)
except:
break
| 24.98
| 70
| 0.707766
|
75c77aaf03d48bec1de4695db8ad09f3c0942ac6
| 1,154
|
py
|
Python
|
fullcalendar/admin.py
|
jonge-democraten/mezzanine-swingtime
|
1a68430a8470da038b50b96a33f26e5cb77145d7
|
[
"MIT"
] | 1
|
2018-10-31T11:23:17.000Z
|
2018-10-31T11:23:17.000Z
|
fullcalendar/admin.py
|
jonge-democraten/mezzanine-fullcalendar
|
1a68430a8470da038b50b96a33f26e5cb77145d7
|
[
"MIT"
] | null | null | null |
fullcalendar/admin.py
|
jonge-democraten/mezzanine-fullcalendar
|
1a68430a8470da038b50b96a33f26e5cb77145d7
|
[
"MIT"
] | null | null | null |
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from mezzanine.core.admin import StackedDynamicInlineAdmin, DisplayableAdmin
from fullcalendar.models import *
class EventCategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
class OccurrenceInline(StackedDynamicInlineAdmin):
model = Occurrence
fields = ('start_time', 'end_time', 'description', 'location')
class EventAdmin(DisplayableAdmin):
list_display = ('publish_date', 'title', 'status')
search_fields = ('title', 'description', 'content')
fieldsets = (
(None, {
"fields": [
"title", "status", ("publish_date", "expiry_date"),
"event_category", "content"
]
}),
(_("Meta data"), {
"fields": [
"_meta_title", "slug",
("description", "gen_description"),
"keywords", "in_sitemap"
],
"classes": ("collapse-closed",)
}),
)
inlines = [OccurrenceInline]
admin.site.register(Event, EventAdmin)
admin.site.register(EventCategory, EventCategoryAdmin)
| 28.146341
| 76
| 0.610919
|
a80d0f550737e2bc0d3a4beaae9ee83f5d9349ac
| 1,215
|
py
|
Python
|
share.py
|
chenwenfang/MSSC
|
f8da9bfed9df26ae7240dbb3905eb2e88213f4c3
|
[
"Apache-2.0"
] | null | null | null |
share.py
|
chenwenfang/MSSC
|
f8da9bfed9df26ae7240dbb3905eb2e88213f4c3
|
[
"Apache-2.0"
] | null | null | null |
share.py
|
chenwenfang/MSSC
|
f8da9bfed9df26ae7240dbb3905eb2e88213f4c3
|
[
"Apache-2.0"
] | null | null | null |
def get_neighborhood_weights_edgesnumber(ns,edge_dict):
"""
:param ns: this is the candidate community set
:param edge_dict: e.g. {node1: {node2: {weight: value1}, {node3: {weight: value2 }}}}, node1 and node2 is connected with edge-weight value1, node1 and node3 is connnected with edge-weight value2
:return: the neighbors dict (whose key is the node index and value is the number of neightbors in ns); Win (Wout) is the total edge-weight in (external) ns; number_in (number_out) is the total number of edges in (external) ns;
"""
neighborhood_dict = {}
Win = 0
number_in = 0
Wout = 0
number_out = 0
for i in ns:
neii = edge_dict[i]
for j in neii:
if j in ns:
Win += float(neii[j]['weight'])
number_in += 1
else:
Wout += float(neii[j]['weight'])
number_out += 1
if (j not in neighborhood_dict):
neighborhood_dict[j] = 1
else:
neighborhood_dict[j] = neighborhood_dict[j] + 1
Win /= 2
number_in /= 2
return neighborhood_dict,Win, number_in, Wout, number_out
| 45
| 231
| 0.576955
|
bc836f0bcfe67c4c4aea170a87bdbf51e53fbdb1
| 1,359
|
py
|
Python
|
openstack/tests/unit/network/v2/test_auto_allocated_topology.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 99
|
2018-03-28T15:41:45.000Z
|
2022-01-23T17:22:13.000Z
|
openstack/tests/unit/network/v2/test_auto_allocated_topology.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 5
|
2018-05-25T16:54:23.000Z
|
2021-11-21T02:27:16.000Z
|
openstack/tests/unit/network/v2/test_auto_allocated_topology.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 104
|
2018-04-06T14:33:54.000Z
|
2022-03-01T01:58:09.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network.v2 import auto_allocated_topology
from openstack.tests.unit import base
EXAMPLE = {
'tenant_id': '1',
'dry_run': False,
}
class TestAutoAllocatedTopology(base.TestCase):
def test_basic(self):
topo = auto_allocated_topology.AutoAllocatedTopology
self.assertEqual('auto_allocated_topology', topo.resource_key)
self.assertEqual('/auto-allocated-topology', topo.base_path)
self.assertFalse(topo.allow_create)
self.assertTrue(topo.allow_fetch)
self.assertFalse(topo.allow_commit)
self.assertTrue(topo.allow_delete)
self.assertFalse(topo.allow_list)
def test_make_it(self):
topo = auto_allocated_topology.AutoAllocatedTopology(**EXAMPLE)
self.assertEqual(EXAMPLE['tenant_id'], topo.project_id)
| 35.763158
| 75
| 0.740986
|
0d1af156eea5f22d3eb84c402d27bc7664b83d93
| 32,739
|
py
|
Python
|
jgscm/__init__.py
|
hail-is/jgscm
|
ee39501d4f7c0756abc8cab028b20b9db5507ac6
|
[
"MIT"
] | null | null | null |
jgscm/__init__.py
|
hail-is/jgscm
|
ee39501d4f7c0756abc8cab028b20b9db5507ac6
|
[
"MIT"
] | 2
|
2019-04-06T16:29:34.000Z
|
2020-04-20T20:31:31.000Z
|
jgscm/__init__.py
|
hail-is/jgscm
|
ee39501d4f7c0756abc8cab028b20b9db5507ac6
|
[
"MIT"
] | 1
|
2020-02-10T15:31:35.000Z
|
2020-02-10T15:31:35.000Z
|
import base64
import errno
from itertools import islice
import os
import sys
import uuid
from google.cloud.exceptions import NotFound, Forbidden, BadRequest
from google.cloud.storage import Client as GSClient, Blob
import nbformat
from notebook.services.contents.checkpoints import Checkpoints, \
GenericCheckpointsMixin
try:
import notebook.transutils
# https://github.com/jupyter/notebook/issues/3056
except ImportError:
pass
from notebook.services.contents.manager import ContentsManager
from tornado import web
from tornado.escape import url_unescape
from traitlets import Any, Bool, Int, Unicode, default
from concurrent.futures import ThreadPoolExecutor, wait
if sys.version_info[0] == 2:
import socket
BrokenPipeError = socket.error
base64.encodebytes = base64.encodestring
base64.decodebytes = base64.decodestring
else:
unicode = str
class GoogleStorageCheckpoints(GenericCheckpointsMixin, Checkpoints):
checkpoint_dir = Unicode(
".ipynb_checkpoints",
config=True,
help="""The directory name in which to keep file checkpoints
This is a path relative to the file"s own directory.
By default, it is .ipynb_checkpoints
""",
)
checkpoint_bucket = Unicode(
"", config=True, help="The bucket name where to keep file checkpoints."
" If empty, the current bucket is used."
)
def create_file_checkpoint(self, content, format, path):
"""Create a checkpoint of the current state of a file
Returns a checkpoint model for the new checkpoint.
"""
checkpoint_id = str(uuid.uuid4())
cp = self._get_checkpoint_path(checkpoint_id, path)
self.log.debug("creating checkpoint %s for %s as %s",
checkpoint_id, path, cp)
blob = self.parent._save_file(cp, content, format)
return {
"id": checkpoint_id,
"last_modified": blob.updated,
}
def create_notebook_checkpoint(self, nb, path):
"""Create a checkpoint of the current state of a file
Returns a checkpoint model for the new checkpoint.
"""
checkpoint_id = str(uuid.uuid4())
cp = self._get_checkpoint_path(checkpoint_id, path)
self.log.debug("creating checkpoint %s for %s as %s",
checkpoint_id, path, cp)
blob = self.parent._save_notebook(cp, nb)
return {
"id": checkpoint_id,
"last_modified": blob.updated,
}
def get_file_checkpoint(self, checkpoint_id, path):
"""Get the content of a checkpoint for a non-notebook file.
Returns a dict of the form:
{
"type": "file",
"content": <str>,
"format": {"text","base64"},
}
"""
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
cp = self._get_checkpoint_path(checkpoint_id, path)
exists, blob = self.parent._fetch(cp)
if not exists:
raise web.HTTPError(404, u"No such checkpoint: %s for %s" % (
checkpoint_id, path))
content, fmt = self.parent._read_file(blob, None)
return {
"type": "file",
"content": content,
"format": fmt
}
def get_notebook_checkpoint(self, checkpoint_id, path):
"""Get the content of a checkpoint for a notebook.
Returns a dict of the form:
{
"type": "notebook",
"content": <output of nbformat.read>,
}
"""
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
cp = self._get_checkpoint_path(checkpoint_id, path)
exists, blob = self.parent._fetch(cp)
if not exists:
raise web.HTTPError(404, u"No such checkpoint: %s for %s" % (
checkpoint_id, path))
nb = self.parent._read_notebook(blob)
return {
"type": "notebook",
"content": nb
}
def rename_checkpoint(self, checkpoint_id, old_path, new_path):
"""Rename a single checkpoint from old_path to new_path."""
old_cp = self._get_checkpoint_path(checkpoint_id, old_path)
new_cp = self._get_checkpoint_path(checkpoint_id, new_path)
self.parent.rename_file(old_cp, new_cp)
def delete_checkpoint(self, checkpoint_id, path):
"""delete a checkpoint for a file"""
cp = self._get_checkpoint_path(checkpoint_id, path)
self.parent.delete_file(cp)
def list_checkpoints(self, path):
"""Return a list of checkpoints for a given file"""
cp = self._get_checkpoint_path(None, path)
bucket_name, bucket_path = self.parent._parse_path(cp)
try:
bucket = self.parent._get_bucket(bucket_name)
it = bucket.list_blobs(prefix=bucket_path, delimiter="/",
max_results=self.parent.max_list_size)
checkpoints = [{
"id": os.path.splitext(file.path)[0][-36:],
"last_modified": file.updated,
} for file in islice(it, self.parent.max_list_size)]
except NotFound:
return []
checkpoints.sort(key=lambda c: c["last_modified"], reverse=True)
self.log.debug("list_checkpoints: %s: %s", path, checkpoints)
return checkpoints
def _get_checkpoint_path(self, checkpoint_id, path):
if path.startswith("/"):
path = path[1:]
bucket_name, bucket_path = self.parent._parse_path(path)
if self.checkpoint_bucket:
bucket_name = self.checkpoint_bucket
slash = bucket_path.rfind("/") + 1
name, ext = os.path.splitext(bucket_path[slash:])
if checkpoint_id is not None:
return "%s/%s%s/%s-%s%s" % (
bucket_name, bucket_path[:slash], self.checkpoint_dir, name,
checkpoint_id, ext)
return "%s/%s%s/%s" % (bucket_name, bucket_path[:slash],
self.checkpoint_dir, name)
class GoogleStorageContentManager(ContentsManager):
project = Unicode(
"", config=True,
help="The name of the project in Google Cloud to use. If you do not "
"set this parameter, google.cloud will pick the default project "
"from the execution context if it exists."
)
keyfile = Unicode(
"", config=True,
help="The path to the Google Cloud API JSON keyfile which is needed "
"for authorization. If you do not set this parameter, "
"google.cloud will be OK if the default project exists."
)
max_list_size = Int(128, config=True, help="list_blobs() limit")
cache_buckets = Bool(True, config=True,
help="Value indicating whether to cache the bucket "
"objects for faster operations.")
hide_dotted_blobs = Bool(True, config=True,
help="Consider blobs which names start with dot "
"as hidden.")
# redefine untitled_directory to change the default value
untitled_directory = Unicode(
"untitled-folder", config=True,
help="The base name used when creating untitled directories.")
default_path = Unicode(
"", config=True, help="The default path to open.")
post_save_hook = Any(None, config=True,
help="""Python callable or importstring thereof
to be called on the path of a file just saved.
This can be used to process the file on disk,
such as converting the notebook to a script or HTML via nbconvert.
It will be called as (all arguments passed by keyword)::
hook(os_path=path, model=model, contents_manager=instance)
- path: the GCS path to the file just written
- model: the model representing the file
- contents_manager: this ContentsManager instance
"""
)
def __init__(self, *args, **kwargs):
# Stub for the GSClient instance (set lazily by the client property).
self._client = None
super(GoogleStorageContentManager, self).__init__(*args, **kwargs)
def debug_args(fn):
def wrapped_fn(self, *args, **kwargs):
self.log.debug("call %s(%s%s%s)", fn.__name__,
", ".join(repr(a) for a in args),
", " if args and kwargs else "",
", ".join("%s=%r" % p for p in kwargs.items()))
result = fn(self, *args, **kwargs)
self.log.debug("result %s %s", fn.__name__, result)
return result
return wrapped_fn
@debug_args
def is_hidden(self, path):
if path == "":
return False
if path.startswith("/"):
path = path[1:]
bucket_name, bucket_path = self._parse_path(path)
try:
bucket = self._get_bucket(bucket_name)
except Forbidden:
return True
if bucket is None:
return True
if self.hide_dotted_blobs and \
self._get_blob_name(bucket_path).startswith("."):
return True
return False
@debug_args
def file_exists(self, path=""):
if path == "" or path.endswith("/"):
return False
if path.startswith("/"):
path = path[1:]
bucket_name, bucket_path = self._parse_path(path)
if not bucket_path:
return False
bucket = self._get_bucket(bucket_name)
if bucket is None or bucket_path == "":
return False
blob = bucket.blob(bucket_path)
return blob.exists() and not (
blob.name.endswith("/") and blob.size == 0)
@debug_args
def dir_exists(self, path):
if path.startswith("/"):
path = path[1:]
if path == "":
return True
if not path.endswith("/"):
path += "/"
bucket_name, blob_prefix_name = self._parse_path(path)
# Get the bucket, fail if the bucket cannot be found.
bucket = self._get_bucket(bucket_name)
if not bucket:
return False
# Only check that bucket exists.
if not blob_prefix_name:
return True
# Check that some blobs exist with the prefix as a path.
if list(bucket.list_blobs(prefix=blob_prefix_name, max_results=1)):
return True
return False
@debug_args
def get(self, path, content=True, type=None, format=None):
if isinstance(path, Blob):
obj = path
path = self._get_blob_path(obj)
elif path.startswith("/"):
path = path[1:]
if not path:
path = self.default_path
type = self._resolve_storagetype(path, type)
if type == "directory":
if path and not path.endswith("/"):
path += "/"
exists, members = self._fetch(path, content=content)
if not exists:
raise web.HTTPError(404, u"No such directory: %s" % path)
model = self._dir_model(path, members, content=content)
else:
exists, blob = self._fetch(path)
if not exists:
raise web.HTTPError(404, u"No such file: %s" % path)
if type == "notebook" or (type is None and path.endswith(".ipynb")):
model = self._notebook_model(blob, content=content)
else:
model = self._file_model(blob, content=content, format=format)
return model
@debug_args
def save(self, model, path):
if path.startswith("/"):
path = path[1:]
if "type" not in model:
raise web.HTTPError(400, u"No file type provided")
if "content" not in model and model["type"] != "directory":
raise web.HTTPError(400, u"No file content provided")
if "/" not in path and self.default_path:
path = "%s/%s" % (self.default_path, path)
bucket_name, bucket_path = self._parse_path(path)
if bucket_path == "" and model["type"] != "directory":
raise web.HTTPError(403, u"You may only create directories "
u"(buckets) at the root level.")
if bucket_path != "" and model["type"] == "directory" and \
bucket_path[-1] != "/":
path += "/"
self.log.debug("Saving %s", path)
self.run_pre_save_hook(model=model, path=path)
try:
if model["type"] == "notebook":
nb = nbformat.from_dict(model["content"])
self.check_and_sign(nb, path)
self._save_notebook(path, nb)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model["type"] == "file":
# Missing format will be handled internally by _save_file.
self._save_file(path, model["content"], model.get("format"))
elif model["type"] == "directory":
self._save_directory(path, model)
else:
raise web.HTTPError(
00, u"Unhandled contents type: %s" % model["type"])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u"Error while saving file: %s %s", path, e,
exc_info=True)
raise web.HTTPError(
500, u"Unexpected error while saving file: %s %s" % (path, e))
validation_message = None
if model["type"] == "notebook":
self.validate_notebook_model(model)
validation_message = model.get("message", None)
model = self.get(path, content=False)
if validation_message:
model["message"] = validation_message
self.run_post_save_hook(model=model, os_path=path)
return model
@debug_args
def delete_file(self, path):
if path.startswith("/"):
path = path[1:]
bucket_name, bucket_path = self._parse_path(path)
bucket = self._get_bucket(bucket_name, throw=True)
if bucket_path == "":
bucket.delete()
del self._bucket_cache[bucket_name]
return
it = bucket.list_blobs(prefix=bucket_path, delimiter="/",
max_results=self.max_list_size)
files = list(islice(it, self.max_list_size))
folders = it.prefixes
bucket.delete_blobs(files)
for folder in folders:
self.delete_file(bucket_name + "/" + folder)
@debug_args
def rename_file(self, old_path, new_path):
if old_path.startswith("/"):
old_path = old_path[1:]
if new_path.startswith("/"):
new_path = new_path[1:]
old_bucket_name, old_bucket_path = self._parse_path(old_path)
old_bucket = self._get_bucket(old_bucket_name, throw=True)
new_bucket_name, new_bucket_path = self._parse_path(new_path)
new_bucket = self._get_bucket(new_bucket_name, throw=True)
old_blob = old_bucket.get_blob(old_bucket_path)
if old_bucket_name == new_bucket_name:
if old_blob is not None:
old_bucket.rename_blob(old_blob, new_bucket_path)
return
if not old_bucket_path.endswith("/"):
old_bucket_path += "/"
if not new_bucket_path.endswith("/"):
new_bucket_path += "/"
it = old_bucket.list_blobs(prefix=old_bucket_path, delimiter="/",
max_results=self.max_list_size)
old_blobs = list(islice(it, self.max_list_size))
folders = it.prefixes
for ob in old_blobs:
old_bucket.rename_blob(
ob, new_bucket_path + self._get_blob_name(ob))
for f in folders:
self.rename_file(
old_bucket_name + "/" + f,
new_bucket_name + "/" +
f.replace(old_bucket_path, new_bucket_path, 1))
return
if old_blob is not None:
old_bucket.copy_blob(old_blob, new_bucket, new_bucket_path)
old_bucket.delete_blob(old_blob)
return
if not old_bucket_path.endswith("/"):
old_bucket_path += "/"
if not new_bucket_path.endswith("/"):
new_bucket_path += "/"
it = old_bucket.list_blobs(prefix=old_bucket_path, delimiter="/",
max_results=self.max_list_size)
old_blobs = list(islice(it, self.max_list_size))
folders = it.prefixes
for ob in old_blobs:
old_bucket.copy_blob(ob, new_bucket, new_bucket_path +
self._get_blob_name(ob))
ob.delete()
for f in folders:
self.rename_file(
old_bucket_name + "/" + f,
new_bucket_name + "/" +
f.replace(old_bucket_path, new_bucket_path, 1))
@property
def client(self):
"""
:return: used instance of :class:`google.cloud.storage.Client`.
"""
if self._client is not None:
return self._client
if not self.project:
self._client = GSClient()
else:
self._client = GSClient.from_service_account_json(
self.keyfile, project=self.project)
return self._client
def run_post_save_hook(self, model, os_path):
"""Run the post-save hook if defined, and log errors"""
if self.post_save_hook:
try:
self.log.debug("Running post-save hook on %s", os_path)
self.post_save_hook(os_path=os_path,
model=model,
contents_manager=self)
except Exception:
self.log.error("Post-save hook failed on %s",
os_path, exc_info=True)
@default("checkpoints_class")
def _checkpoints_class_default(self):
return GoogleStorageCheckpoints
def _resolve_storagetype(self, path, storagetype):
"""Based on the arguments and status of GCS, return a valid type."""
if "/" not in path or path.endswith("/") or path == "":
if storagetype not in (None, "directory"):
raise web.HTTPError(
400, u"%s is not a directory" % path, reason="bad type")
return "directory"
if storagetype is None and path.endswith(".ipynb"):
return "notebook"
if storagetype is not None:
return storagetype
# If type cannot be inferred from the argument set, use
# the storage API to see if a blob or a prefix exists.
if self.file_exists(path):
return "file"
if self.dir_exists(path):
return "directory"
raise web.HTTPError(
404, u"%s does not exist" % path, reason="bad type")
def _get_bucket(self, name, throw=False):
"""
Get the bucket by it's name. Uses cache by default.
:param name: bucket name.
:param throw: If True raises NotFound exception, otherwise, returns
None.
:return: instance of :class:`google.cloud.storage.Bucket` or None.
"""
if not self.cache_buckets:
try:
bucket_descriptor = self.client.bucket(name, user_project=self.client.project)
return self.client.get_bucket(bucket_descriptor)
except NotFound:
if throw:
raise
return None
try:
cache = self._bucket_cache
except AttributeError:
self._bucket_cache = cache = {}
try:
return cache[name]
except KeyError:
try:
bucket_descriptor = self.client.bucket(name, user_project=self.client.project)
bucket = self.client.get_bucket(bucket_descriptor)
except BrokenPipeError as e:
if e.errno in (None, errno.EPIPE):
return self._get_bucket(name, throw)
else:
raise
except (BadRequest, NotFound):
if throw:
raise
return None
cache[name] = bucket
return bucket
def _parse_path(self, path):
"""
Splits the path into bucket name and path inside the bucket.
:param path: string to split.
:return: tuple(bucket name, bucket path).
"""
if self.default_path and not path.startswith(f"{self.default_path}/"):
path = f"{self.default_path}/{path}"
bucket, _, blobname = path.partition("/")
return bucket, blobname
@staticmethod
def _get_blob_path(blob):
"""
Gets blob path.
:param blob: instance of :class:`google.cloud.storage.Blob`.
:return: path string.
"""
return blob.bucket.name + "/" + blob.name
@staticmethod
def _get_blob_name(blob):
"""
Gets blob name (last part of the path).
:param blob: instance of :class:`google.cloud.storage.Blob`.
:return: name string.
"""
if isinstance(blob, Blob):
return os.path.basename(blob.name)
assert isinstance(blob, (unicode, str))
if blob.endswith("/"):
blob = blob[:-1]
return os.path.basename(blob)
@staticmethod
def _get_dir_name(path):
"""
Extracts directory name like os.path.dirname.
:param path: GCS path string.
:return: directory name string.
"""
if path.endswith("/"):
path = path[:-1]
return path.rsplit("/", 1)[-1]
@debug_args
def _fetch(self, path, content=True):
"""
Retrieves the blob by it's path.
:param path: blob path or directory name.
:param content: If False, just check if path exists.
:return: tuple(exists Bool, :class:`google.cloud.storage.Blob` or
tuple(file [Blob], folders list)).
"""
if path == "":
try:
buckets = self.client.list_buckets()
return True, ([], [b.name + "/" for b in buckets])
except BrokenPipeError as e:
if e.errno in (None, errno.EPIPE):
return self._fetch(path, content)
else:
raise
try:
bucket_name, bucket_path = self._parse_path(path)
except ValueError:
return False, None
try:
bucket = self._get_bucket(bucket_name)
except Forbidden:
return True, None
if bucket is None:
return False, None
if bucket_path == "" and not content:
return True, None
if bucket_path == "" or bucket_path.endswith("/"):
if bucket_path != "":
try:
exists = bucket.blob(bucket_path).exists()
except BrokenPipeError as e:
if e.errno in (None, errno.EPIPE):
return self._fetch(path, content)
else:
raise
if exists and not content:
return True, None
# blob may not exist but at the same time be a part of a path
delimiter = '/' if content else None # https://github.com/hail-is/hail/issues/8586
max_list_size = self.max_list_size if content else 1
try:
it = bucket.list_blobs(prefix=bucket_path,
delimiter=delimiter,
max_results=max_list_size)
try:
files = list(islice(it, max_list_size))
except BrokenPipeError as e:
if e.errno in (None, errno.EPIPE):
return self._fetch(path, content)
else:
raise
except NotFound:
del self._bucket_cache[bucket_name]
return False, None
folders = it.prefixes
return (bool(files or folders or bucket_path == ""),
(files, folders) if content else None)
if not content:
return bucket.blob(bucket_path).exists(), None
try:
blob = bucket.get_blob(bucket_path)
except BrokenPipeError as e:
if e.errno in (None, errno.EPIPE):
return self._fetch(path, content)
else:
raise
return blob is not None, blob
def _base_model(self, blob):
"""Builds the common base of a contents model"""
last_modified = blob.updated
created = last_modified
model = {
"name": self._get_blob_name(blob),
"path": self._get_blob_path(blob),
"last_modified": last_modified,
"created": created,
"content": None,
"format": None,
"mimetype": blob.content_type,
"writable": True
}
return model
def _read_file(self, blob, format):
"""Reads a non-notebook file.
blob: instance of :class:`google.cloud.storage.Blob`.
format:
If "text", the contents will be decoded as UTF-8.
If "base64", the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
bcontent = blob.download_as_string()
if format is None or format == "text":
# Try to interpret as unicode if format is unknown or if unicode
# was explicitly requested.
try:
return bcontent.decode("utf8"), "text"
except UnicodeError:
if format == "text":
raise web.HTTPError(
400, "%s is not UTF-8 encoded" %
self._get_blob_path(blob),
reason="bad format",
)
return base64.encodebytes(bcontent).decode("ascii"), "base64"
def _file_model(self, blob, content=True, format=None):
"""Builds a model for a file
if content is requested, include the file contents.
format:
If "text", the contents will be decoded as UTF-8.
If "base64", the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(blob)
model["type"] = "file"
if content:
content, format = self._read_file(blob, format)
if model["mimetype"] == "text/plain":
default_mime = {
"text": "text/plain",
"base64": "application/octet-stream"
}[format]
model["mimetype"] = default_mime
model.update(
content=content,
format=format,
)
return model
def _read_notebook(self, blob):
"""
Reads a notebook file from GCS blob.
:param blob: :class:`google.cloud.storage.Blob` instance.
:return: :class:`nbformat.notebooknode.NotebookNode` instance.
"""
data = blob.download_as_string().decode("utf-8")
nb = nbformat.reads(data, as_version=4)
self.mark_trusted_cells(nb, self._get_blob_path(blob))
return nb
def _notebook_model(self, blob, content=True):
"""Builds a notebook model.
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(blob)
model["type"] = "notebook"
if content:
nb = self._read_notebook(blob)
model["content"] = nb
model["mimetype"] = "application/x-ipynb+json"
model["format"] = "json"
self.validate_notebook_model(model)
return model
def _dir_model(self, path, members, content=True):
"""Builds a model for a directory
if content is requested, will include a listing of the directory
"""
model = {
"type": "directory",
"name": self._get_dir_name(path),
"path": path.rstrip('/'),
"last_modified": "",
"created": "",
"content": None,
"format": None,
"mimetype": "application/x-directory",
"writable": (members is not None or not self.is_hidden(path))
}
if content:
blobs, folders = members
if path != "":
tmpl = "%s/%%s" % self._parse_path(path)[0]
else:
tmpl = "%s"
_, this = self._parse_path(path)
with ThreadPoolExecutor(max_workers=64) as pool:
blob_futures = [
pool.submit(self.get, path=blob, content=False)
for blob in blobs
if self._get_blob_path(blob) != path
and self.should_list(self._get_blob_name(blob))]
folder_futures = [
pool.submit(self.get, path=tmpl % folder, content=False)
for folder in folders
if self.should_list(folder) and folder != this]
self.log.debug(f'running {len(blob_futures) + len(folder_futures)}'
f' 32-parallel')
done, not_done = wait(blob_futures + folder_futures)
assert not not_done
model["content"] = [x.result() for x in done]
model["format"] = "json"
return model
def _save_notebook(self, path, nb):
"""
Uploads notebook to GCS.
:param path: blob path.
:param nb: :class:`nbformat.notebooknode.NotebookNode` instance.
:return: created :class:`google.cloud.storage.Blob`.
"""
bucket_name, bucket_path = self._parse_path(path)
bucket = self._get_bucket(bucket_name, throw=True)
data = nbformat.writes(nb, version=nbformat.NO_CONVERT)
blob = bucket.blob(bucket_path)
blob.upload_from_string(data, "application/x-ipynb+json")
return blob
def _save_file(self, path, content, format):
"""Uploads content of a generic file to GCS.
:param: path blob path.
:param: content file contents string.
:param: format the description of the input format, can be either
"text" or "base64".
:return: created :class:`google.cloud.storage.Blob`.
"""
bucket_name, bucket_path = self._parse_path(path)
bucket = self._get_bucket(bucket_name, throw=True)
if format not in {"text", "base64"}:
raise web.HTTPError(
400,
u"Must specify format of file contents as \"text\" or "
u"\"base64\"",
)
try:
if format == "text":
bcontent = content.encode("utf8")
else:
b64_bytes = content.encode("ascii")
bcontent = base64.decodebytes(b64_bytes)
except Exception as e:
raise web.HTTPError(
400, u"Encoding error saving %s: %s" % (path, e)
)
blob = bucket.blob(bucket_path)
blob.upload_from_string(bcontent)
return blob
def _save_directory(self, path, model):
"""Creates a directory in GCS."""
exists, obj = self._fetch(path)
if exists:
if isinstance(obj, Blob):
raise web.HTTPError(400, u"Not a directory: %s" % path)
else:
self.log.debug("Directory %r already exists", path)
return
bucket_name, bucket_path = self._parse_path(path)
if bucket_path == "":
self.client.create_bucket(bucket_name)
else:
bucket = self._get_bucket(bucket_name, throw=True)
bucket.blob(bucket_path).upload_from_string(
b"", content_type="application/x-directory")
debug_args = staticmethod(debug_args)
| 38.426056
| 95
| 0.557683
|
36ac8ae2ed2821ead2905989553b79b6c0505118
| 3,781
|
py
|
Python
|
python/GafferCortexUI/OpPathPreview.py
|
danieldresser-ie/gaffer
|
78c22487156a5800fcca49a24f52451a8ac0c559
|
[
"BSD-3-Clause"
] | 1
|
2016-07-31T09:55:09.000Z
|
2016-07-31T09:55:09.000Z
|
python/GafferCortexUI/OpPathPreview.py
|
Kthulhu/gaffer
|
8995d579d07231988abc92c3ac2788c15c8bc75c
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferCortexUI/OpPathPreview.py
|
Kthulhu/gaffer
|
8995d579d07231988abc92c3ac2788c15c8bc75c
|
[
"BSD-3-Clause"
] | 1
|
2020-02-15T16:15:54.000Z
|
2020-02-15T16:15:54.000Z
|
##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferCortex
import GafferCortexUI
class OpPathPreview( GafferUI.DeferredPathPreview ) :
def __init__( self, path ) :
self.__column = GafferUI.ListContainer( borderWidth = 8 )
GafferUI.DeferredPathPreview.__init__( self, self.__column, path )
with self.__column :
# we'll replace this with the op in _deferredUpdate()
GafferUI.Spacer( IECore.V2i( 1 ) )
button = GafferUI.Button( "Launch" )
self.__executeClickedConnection = button.clickedSignal().connect( Gaffer.WeakMethod( self.__executeClicked ) )
self._updateFromPath()
def isValid( self ) :
path = self.getPath()
if not isinstance( path, GafferCortex.ClassLoaderPath ) :
return False
if hasattr( path.classLoader(), "classType" ) :
if not issubclass( path.classLoader().classType(), IECore.Op ) :
return False
else :
if path.classLoader().searchPath() != IECore.ClassLoader.defaultOpLoader().searchPath() :
return False
return path.isLeaf()
def _load( self ) :
return self.getPath().load()()
def _deferredUpdate( self, op ) :
self.__node = Gaffer.ParameterisedHolderNode()
self.__node.setParameterised( op )
GafferCortexUI.ParameterPresets.autoLoad( self.__node )
self.__column[0] = GafferUI.NodeUI.create( self.__node )
def __executeClicked( self, button ) :
# Create a copy of our op
self.__node.setParameterisedValues()
op = self.__node.getParameterised()[0]
opCopy = self.getPath().load()()
IECore.ParameterAlgo.copyClasses(
op.parameters(),
opCopy.parameters(),
)
opCopy.parameters().setValue( op.parameters().getValue().copy() )
# Launch an OpDialogue, executing the copy in the background
dialogue = GafferCortexUI.OpDialogue(
opCopy,
executeInBackground = True,
executeImmediately = True
)
self.ancestor( GafferUI.Window ).addChildWindow( dialogue )
dialogue.setVisible( True )
GafferUI.PathPreviewWidget.registerType( "Op", OpPathPreview )
| 34.063063
| 113
| 0.705369
|
4e1835e8b2744d29b4406cdc5701d32379d71c77
| 319
|
py
|
Python
|
src/prefect/tasks/cubejs/__init__.py
|
suryatmodulus/prefect
|
e4ac9f6aa831140c7fba0397f3e5e0884b1b9e42
|
[
"Apache-2.0"
] | 3
|
2021-11-09T10:46:58.000Z
|
2022-03-11T04:22:35.000Z
|
src/prefect/tasks/cubejs/__init__.py
|
suryatmodulus/prefect
|
e4ac9f6aa831140c7fba0397f3e5e0884b1b9e42
|
[
"Apache-2.0"
] | 8
|
2021-10-11T16:42:59.000Z
|
2022-03-31T08:42:24.000Z
|
src/prefect/tasks/cubejs/__init__.py
|
suryatmodulus/prefect
|
e4ac9f6aa831140c7fba0397f3e5e0884b1b9e42
|
[
"Apache-2.0"
] | 1
|
2022-03-11T04:22:40.000Z
|
2022-03-11T04:22:40.000Z
|
"""
This is a collection of tasks to interact with a Cube.js or Cube Cloud environment.
"""
try:
from prefect.tasks.cubejs.cubejs_tasks import CubeJSQueryTask
except ImportError as err:
raise ImportError(
'prefect.tasks.cubejs` requires Prefect to be installed with the "cubejs" extra.'
) from err
| 29
| 89
| 0.733542
|
68a460e9023c6da78d1ba6ed3e3b2597a663b497
| 381
|
py
|
Python
|
auth-api/migrations/versions/a0f0a77dc77f_merge_heads.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | 11
|
2019-09-26T06:58:25.000Z
|
2022-01-26T06:19:39.000Z
|
auth-api/migrations/versions/a0f0a77dc77f_merge_heads.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | 1,622
|
2019-05-07T21:08:38.000Z
|
2022-03-28T17:07:15.000Z
|
auth-api/migrations/versions/a0f0a77dc77f_merge_heads.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | 98
|
2019-03-01T21:36:15.000Z
|
2021-12-01T22:11:25.000Z
|
"""merge heads
Revision ID: a0f0a77dc77f
Revises: 3c89ccb6bb32, a37f90e6802d
Create Date: 2021-05-03 08:30:00.475241
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a0f0a77dc77f'
down_revision = ('3c89ccb6bb32', 'a37f90e6802d')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 15.24
| 48
| 0.740157
|
f13346bec1052b9eb2d1ed7b8b14f69df38c842f
| 10,383
|
py
|
Python
|
kubernetes/test/test_extensions_v1beta1_api.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 2
|
2020-06-21T08:03:18.000Z
|
2020-06-21T09:53:29.000Z
|
kubernetes/test/test_extensions_v1beta1_api.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_extensions_v1beta1_api.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 1
|
2020-12-10T07:28:08.000Z
|
2020-12-10T07:28:08.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.extensions_v1beta1_api import ExtensionsV1beta1Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestExtensionsV1beta1Api(unittest.TestCase):
"""ExtensionsV1beta1Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.extensions_v1beta1_api.ExtensionsV1beta1Api() # noqa: E501
def tearDown(self):
pass
def test_create_namespaced_daemon_set(self):
"""Test case for create_namespaced_daemon_set
"""
pass
def test_create_namespaced_deployment(self):
"""Test case for create_namespaced_deployment
"""
pass
def test_create_namespaced_deployment_rollback(self):
"""Test case for create_namespaced_deployment_rollback
"""
pass
def test_create_namespaced_ingress(self):
"""Test case for create_namespaced_ingress
"""
pass
def test_create_namespaced_network_policy(self):
"""Test case for create_namespaced_network_policy
"""
pass
def test_create_namespaced_replica_set(self):
"""Test case for create_namespaced_replica_set
"""
pass
def test_create_pod_security_policy(self):
"""Test case for create_pod_security_policy
"""
pass
def test_delete_collection_namespaced_daemon_set(self):
"""Test case for delete_collection_namespaced_daemon_set
"""
pass
def test_delete_collection_namespaced_deployment(self):
"""Test case for delete_collection_namespaced_deployment
"""
pass
def test_delete_collection_namespaced_ingress(self):
"""Test case for delete_collection_namespaced_ingress
"""
pass
def test_delete_collection_namespaced_network_policy(self):
"""Test case for delete_collection_namespaced_network_policy
"""
pass
def test_delete_collection_namespaced_replica_set(self):
"""Test case for delete_collection_namespaced_replica_set
"""
pass
def test_delete_collection_pod_security_policy(self):
"""Test case for delete_collection_pod_security_policy
"""
pass
def test_delete_namespaced_daemon_set(self):
"""Test case for delete_namespaced_daemon_set
"""
pass
def test_delete_namespaced_deployment(self):
"""Test case for delete_namespaced_deployment
"""
pass
def test_delete_namespaced_ingress(self):
"""Test case for delete_namespaced_ingress
"""
pass
def test_delete_namespaced_network_policy(self):
"""Test case for delete_namespaced_network_policy
"""
pass
def test_delete_namespaced_replica_set(self):
"""Test case for delete_namespaced_replica_set
"""
pass
def test_delete_pod_security_policy(self):
"""Test case for delete_pod_security_policy
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_daemon_set_for_all_namespaces(self):
"""Test case for list_daemon_set_for_all_namespaces
"""
pass
def test_list_deployment_for_all_namespaces(self):
"""Test case for list_deployment_for_all_namespaces
"""
pass
def test_list_ingress_for_all_namespaces(self):
"""Test case for list_ingress_for_all_namespaces
"""
pass
def test_list_namespaced_daemon_set(self):
"""Test case for list_namespaced_daemon_set
"""
pass
def test_list_namespaced_deployment(self):
"""Test case for list_namespaced_deployment
"""
pass
def test_list_namespaced_ingress(self):
"""Test case for list_namespaced_ingress
"""
pass
def test_list_namespaced_network_policy(self):
"""Test case for list_namespaced_network_policy
"""
pass
def test_list_namespaced_replica_set(self):
"""Test case for list_namespaced_replica_set
"""
pass
def test_list_network_policy_for_all_namespaces(self):
"""Test case for list_network_policy_for_all_namespaces
"""
pass
def test_list_pod_security_policy(self):
"""Test case for list_pod_security_policy
"""
pass
def test_list_replica_set_for_all_namespaces(self):
"""Test case for list_replica_set_for_all_namespaces
"""
pass
def test_patch_namespaced_daemon_set(self):
"""Test case for patch_namespaced_daemon_set
"""
pass
def test_patch_namespaced_daemon_set_status(self):
"""Test case for patch_namespaced_daemon_set_status
"""
pass
def test_patch_namespaced_deployment(self):
"""Test case for patch_namespaced_deployment
"""
pass
def test_patch_namespaced_deployment_scale(self):
"""Test case for patch_namespaced_deployment_scale
"""
pass
def test_patch_namespaced_deployment_status(self):
"""Test case for patch_namespaced_deployment_status
"""
pass
def test_patch_namespaced_ingress(self):
"""Test case for patch_namespaced_ingress
"""
pass
def test_patch_namespaced_ingress_status(self):
"""Test case for patch_namespaced_ingress_status
"""
pass
def test_patch_namespaced_network_policy(self):
"""Test case for patch_namespaced_network_policy
"""
pass
def test_patch_namespaced_replica_set(self):
"""Test case for patch_namespaced_replica_set
"""
pass
def test_patch_namespaced_replica_set_scale(self):
"""Test case for patch_namespaced_replica_set_scale
"""
pass
def test_patch_namespaced_replica_set_status(self):
"""Test case for patch_namespaced_replica_set_status
"""
pass
def test_patch_namespaced_replication_controller_dummy_scale(self):
"""Test case for patch_namespaced_replication_controller_dummy_scale
"""
pass
def test_patch_pod_security_policy(self):
"""Test case for patch_pod_security_policy
"""
pass
def test_read_namespaced_daemon_set(self):
"""Test case for read_namespaced_daemon_set
"""
pass
def test_read_namespaced_daemon_set_status(self):
"""Test case for read_namespaced_daemon_set_status
"""
pass
def test_read_namespaced_deployment(self):
"""Test case for read_namespaced_deployment
"""
pass
def test_read_namespaced_deployment_scale(self):
"""Test case for read_namespaced_deployment_scale
"""
pass
def test_read_namespaced_deployment_status(self):
"""Test case for read_namespaced_deployment_status
"""
pass
def test_read_namespaced_ingress(self):
"""Test case for read_namespaced_ingress
"""
pass
def test_read_namespaced_ingress_status(self):
"""Test case for read_namespaced_ingress_status
"""
pass
def test_read_namespaced_network_policy(self):
"""Test case for read_namespaced_network_policy
"""
pass
def test_read_namespaced_replica_set(self):
"""Test case for read_namespaced_replica_set
"""
pass
def test_read_namespaced_replica_set_scale(self):
"""Test case for read_namespaced_replica_set_scale
"""
pass
def test_read_namespaced_replica_set_status(self):
"""Test case for read_namespaced_replica_set_status
"""
pass
def test_read_namespaced_replication_controller_dummy_scale(self):
"""Test case for read_namespaced_replication_controller_dummy_scale
"""
pass
def test_read_pod_security_policy(self):
"""Test case for read_pod_security_policy
"""
pass
def test_replace_namespaced_daemon_set(self):
"""Test case for replace_namespaced_daemon_set
"""
pass
def test_replace_namespaced_daemon_set_status(self):
"""Test case for replace_namespaced_daemon_set_status
"""
pass
def test_replace_namespaced_deployment(self):
"""Test case for replace_namespaced_deployment
"""
pass
def test_replace_namespaced_deployment_scale(self):
"""Test case for replace_namespaced_deployment_scale
"""
pass
def test_replace_namespaced_deployment_status(self):
"""Test case for replace_namespaced_deployment_status
"""
pass
def test_replace_namespaced_ingress(self):
"""Test case for replace_namespaced_ingress
"""
pass
def test_replace_namespaced_ingress_status(self):
"""Test case for replace_namespaced_ingress_status
"""
pass
def test_replace_namespaced_network_policy(self):
"""Test case for replace_namespaced_network_policy
"""
pass
def test_replace_namespaced_replica_set(self):
"""Test case for replace_namespaced_replica_set
"""
pass
def test_replace_namespaced_replica_set_scale(self):
"""Test case for replace_namespaced_replica_set_scale
"""
pass
def test_replace_namespaced_replica_set_status(self):
"""Test case for replace_namespaced_replica_set_status
"""
pass
def test_replace_namespaced_replication_controller_dummy_scale(self):
"""Test case for replace_namespaced_replication_controller_dummy_scale
"""
pass
def test_replace_pod_security_policy(self):
"""Test case for replace_pod_security_policy
"""
pass
if __name__ == '__main__':
unittest.main()
| 22.870044
| 124
| 0.663874
|
0922cc75a62ba7bfe0746cb0a7b74ec4ab844c95
| 7,482
|
py
|
Python
|
stests/monitoring/on_consensus_finality_signature.py
|
Fraser999/stests
|
482a84e562223731e47e5e2b91d0a081f5c4aad6
|
[
"Apache-2.0"
] | null | null | null |
stests/monitoring/on_consensus_finality_signature.py
|
Fraser999/stests
|
482a84e562223731e47e5e2b91d0a081f5c4aad6
|
[
"Apache-2.0"
] | null | null | null |
stests/monitoring/on_consensus_finality_signature.py
|
Fraser999/stests
|
482a84e562223731e47e5e2b91d0a081f5c4aad6
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import typing
import dramatiq
from stests import chain
from stests.core import cache
from stests.core import factory
from stests.core.logging import log_event
from stests.core.types.chain import BlockStatus
from stests.core.types.chain import Deploy
from stests.core.types.chain import DeployStatus
from stests.core.types.infra import Network
from stests.core.types.infra import Node
from stests.core.types.infra import NodeEventInfo
from stests.core.types.infra import NodeIdentifier
from stests.core.utils import encoder
from stests.events import EventType
# Queue to which messages will be dispatched.
_QUEUE = "monitoring.events.consensus.fault"
class _Context():
"""Contextual information passed along chain of execution.
"""
def __init__(self, info: NodeEventInfo):
self.block = None
self.block_hash = info.block_hash
self.deploy = None
self.deploy_hash = None
self.info = info
self.network_id = factory.create_network_id(info.network)
self.network = cache.infra.get_network(self.network_id)
self.node_id = factory.create_node_id(self.network_id, info.node_index)
self.node = cache.infra.get_node(self.node_id)
self.on_chain_block = None
self.on_chain_deploy = None
@property
def deploy_hashes(self):
"""Gets set of associated deploy hashes."""
try:
return self.on_chain_block['header']['deploy_hashes']
except (TypeError, KeyError):
return []
@property
def deploy_execution_ctx(self):
"""Returns workload generated execution context."""
return cache.orchestration.get_context(
self.deploy.network,
self.deploy.run_index,
self.deploy.run_type,
)
@property
def transfer_hashes(self):
"""Gets set of associated transfer hashes."""
try:
return self.on_chain_block['header']['transfer_hashes']
except (TypeError, KeyError):
return []
@dramatiq.actor(queue_name=_QUEUE)
def on_consensus_finality_signature(info: NodeEventInfo):
"""Event: raised whenever a consensus finality signature is emitted by a node.
:param info: Node event information.
"""
if not _is_block_processed(info):
_process_block(_Context(info))
def _is_block_processed(info: NodeEventInfo) -> bool:
"""Returns flag indicating whether finalised deploy event has already been processed.
"""
_, encached = cache.monitoring.set_block(info)
return not encached
def _is_deploy_processed(ctx: _Context) -> bool:
"""Returns flag indicating whether finalised deploy event has already been processed.
"""
_, encached = cache.monitoring.set_deploy(ctx.network.name, ctx.block_hash, ctx.deploy_hash)
return not encached
def _process_block(ctx: _Context):
"""Processes a finalised block.
"""
# Escape if block not found.
try:
ctx.on_chain_block = chain.get_block(ctx.network, ctx.node, ctx.block_hash)
except Exception as err:
log_event(EventType.CHAIN_QUERY_BLOCK_NOT_FOUND, None, ctx.block_hash)
return
# Escape if block empty.
if not ctx.deploy_hashes and not ctx.transfer_hashes:
log_event(EventType.CHAIN_ADDED_BLOCK_EMPTY, None, ctx.block_hash)
return
# Set stats.
ctx.block = factory.create_block_statistics_on_addition(
block_hash = ctx.block_hash,
block_hash_parent = ctx.on_chain_block['header']['parent_hash'],
chain_name = ctx.network.chain_name,
era_id = ctx.on_chain_block['header']['era_id'],
deploy_cost_total = None,
deploy_count = len(ctx.deploy_hashes) + len(ctx.transfer_hashes),
deploy_gas_price_avg = None,
height = ctx.on_chain_block['header']['height'],
is_switch_block = ctx.on_chain_block['header']['era_end'] is not None,
network = ctx.network.name,
proposer = ctx.on_chain_block['header']['proposer'],
size_bytes = None,
state_root_hash = ctx.on_chain_block['header']['state_root_hash'],
status = BlockStatus.FINALIZED.name,
timestamp = datetime.strptime(ctx.on_chain_block['header']['timestamp'], "%Y-%m-%dT%H:%M:%S.%fZ"),
)
# Emit event.
log_event(EventType.CHAIN_ADDED_BLOCK, f"{ctx.block_hash}", ctx.block)
# Process associated deploys + transfers.
_process_block_deploys(ctx)
def _process_block_deploys(ctx: _Context):
"""Processes a finalised block.
"""
for deploy_hash in ctx.deploy_hashes + ctx.transfer_hashes:
ctx.deploy_hash = deploy_hash
if not _is_deploy_processed(ctx):
_process_deploy(ctx)
def _process_deploy(ctx: _Context):
"""Processes a finalised deploy.
"""
# Set deploy - escape if not found.
try:
ctx.on_chain_deploy = chain.get_deploy(ctx.network, ctx.node, ctx.deploy_hash)
except Exception as err:
log_event(EventType.CHAIN_QUERY_DEPLOY_NOT_FOUND, None, ctx.deploy_hash)
return
# Emit event.
log_event(EventType.CHAIN_ADDED_DEPLOY, f"{ctx.block_hash}.{ctx.deploy_hash}", ctx.info)
# Escape if deploy cannot be correlated to a workflow.
ctx.deploy = cache.state.get_deploy_on_finalisation(ctx.network.name, ctx.deploy_hash)
if not ctx.deploy:
return
# Process correlated - i.e. deploys previously dispatched by a generator.
_process_deploy_correlated(ctx)
def _process_deploy_correlated(ctx: _Context):
"""Process a monitored deploy that was previously dispatched during a generator run.
"""
# Notify.
log_event(EventType.WFLOW_DEPLOY_CORRELATED, f"{ctx.block_hash}.{ctx.deploy_hash}", ctx.node, block_hash=ctx.block_hash, deploy_hash=ctx.deploy_hash)
# Update cache: deploy.
ctx.deploy.block_hash = ctx.block_hash
try:
ctx.deploy.deploy_cost = int(ctx.on_chain_deploy["execution_results"][0]["result"]["Success"]["cost"])
except KeyError:
try:
ctx.deploy.deploy_cost = int(ctx.on_chain_deploy["execution_results"][0]["result"]["cost"])
except KeyError:
ctx.deploy.deploy_cost = 0
ctx.deploy.era_id = ctx.block.era_id
ctx.deploy.finalization_duration = ctx.block.timestamp.timestamp() - ctx.deploy.dispatch_timestamp.timestamp()
ctx.deploy.finalization_node_index = ctx.node.index
ctx.deploy.finalization_timestamp = ctx.block.timestamp
ctx.deploy.state_root_hash = ctx.block.state_root_hash
ctx.deploy.status = DeployStatus.ADDED
cache.state.set_deploy(ctx.deploy)
# Update cache: account balance.
if ctx.deploy.deploy_cost > 0:
cache.state.decrement_account_balance_on_deploy_finalisation(ctx.deploy, ctx.deploy.deploy_cost)
# Enqueue message for processing by orchestrator.
_enqueue_correlated(ctx)
def _enqueue_correlated(ctx: _Context):
"""Enqueues a correlated deploy for further processing by orchestrator.
"""
dramatiq.get_broker().enqueue(dramatiq.Message(
queue_name="orchestration.engine.step",
actor_name="on_step_deploy_finalized",
args=([
encoder.encode(ctx.deploy_execution_ctx),
encoder.encode(ctx.node_id),
ctx.block_hash,
ctx.deploy_hash
]),
kwargs=dict(),
options=dict(),
))
| 33.702703
| 153
| 0.686982
|
9d29d77cb65d89efc7a7f0a789c6ffb5bf468c6f
| 10,726
|
py
|
Python
|
venv/lib/python2.7/site-packages/openpyxl/drawing/fill.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | 7
|
2016-12-12T02:29:42.000Z
|
2020-05-12T21:21:21.000Z
|
venv/lib/python2.7/site-packages/openpyxl/drawing/fill.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | 31
|
2017-01-05T06:07:28.000Z
|
2018-05-27T13:13:06.000Z
|
venv/lib/python2.7/site-packages/openpyxl/drawing/fill.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | 3
|
2017-12-21T23:30:12.000Z
|
2019-01-03T20:51:52.000Z
|
from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Bool,
Integer,
Set,
NoneSet,
Typed,
MinMax,
Sequence,
)
from openpyxl.descriptors.excel import Relation
from openpyxl.descriptors.nested import NestedNoneSet
from openpyxl.descriptors.sequence import NestedSequence
from openpyxl.xml.constants import DRAWING_NS
from .colors import ColorChoice
from openpyxl.descriptors.excel import ExtensionList as OfficeArtExtensionList
from .effect import *
"""
Fill elements from drawing main schema
"""
class PatternFillProperties(Serialisable):
tagname = "pattFill"
namespace = DRAWING_NS
prst = NoneSet(values=(['pct5', 'pct10', 'pct20', 'pct25', 'pct30', 'pct40',
'pct50', 'pct60', 'pct70', 'pct75', 'pct80', 'pct90', 'horz', 'vert',
'ltHorz', 'ltVert', 'dkHorz', 'dkVert', 'narHorz', 'narVert', 'dashHorz',
'dashVert', 'cross', 'dnDiag', 'upDiag', 'ltDnDiag', 'ltUpDiag',
'dkDnDiag', 'dkUpDiag', 'wdDnDiag', 'wdUpDiag', 'dashDnDiag',
'dashUpDiag', 'diagCross', 'smCheck', 'lgCheck', 'smGrid', 'lgGrid',
'dotGrid', 'smConfetti', 'lgConfetti', 'horzBrick', 'diagBrick',
'solidDmnd', 'openDmnd', 'dotDmnd', 'plaid', 'sphere', 'weave', 'divot',
'shingle', 'wave', 'trellis', 'zigZag']))
preset = Alias("prst")
fgClr = Typed(expected_type=ColorChoice, allow_none=True)
foreground = Alias("fgClr")
bgClr = Typed(expected_type=ColorChoice, allow_none=True)
background = Alias("bgClr")
__elements__ = ("fgClr", "bgClr")
def __init__(self,
prst=None,
fgClr=None,
bgClr=None,
):
self.prst = prst
self.fgClr = fgClr
self.bgClr = bgClr
class RelativeRect(Serialisable):
tagname = "rect"
namespace = DRAWING_NS
l = MinMax(min=0, max=100, allow_none=True)
left = Alias('l')
t = MinMax(min=0, max=100, allow_none=True)
top = Alias('t')
r = MinMax(min=0, max=100, allow_none=True)
right = Alias('r')
b = MinMax(min=0, max=100, allow_none=True)
bottom = Alias('b')
def __init__(self,
l=None,
t=None,
r=None,
b=None,
):
self.l = l
self.t = t
self.r = r
self.b = b
class StretchInfoProperties(Serialisable):
tagname = "stretch"
namespace = DRAWING_NS
fillRect = Typed(expected_type=RelativeRect, allow_none=True)
def __init__(self,
fillRect=RelativeRect(),
):
self.fillRect = fillRect
class GradientStop(Serialisable):
tagname = "gradStop"
pos = MinMax(min=0, max=100, allow_none=True)
# Color Choice Group
def __init__(self,
pos=None,
):
self.pos = pos
class GradientStopList(Serialisable):
tagname = "gradStopLst"
gs = Sequence(expected_type=GradientStop)
def __init__(self,
gs=None,
):
if gs is None:
gs = [GradientStop(), GradientStop()]
self.gs = gs
class LinearShadeProperties(Serialisable):
ang = Integer()
scaled = Bool(allow_none=True)
def __init__(self,
ang=None,
scaled=None,
):
self.ang = ang
self.scaled = scaled
class PathShadeProperties(Serialisable):
path = Set(values=(['shape', 'circle', 'rect']))
fillToRect = Typed(expected_type=RelativeRect, allow_none=True)
def __init__(self,
path=None,
fillToRect=None,
):
self.path = path
self.fillToRect = fillToRect
class GradientFillProperties(Serialisable):
tagname = "gradFill"
flip = NoneSet(values=(['x', 'y', 'xy']))
rotWithShape = Bool(allow_none=True)
gsLst = Typed(expected_type=GradientStopList, allow_none=True)
stop_list = Alias("gsLst")
lin = Typed(expected_type=LinearShadeProperties, allow_none=True)
linear = Alias("lin")
path = Typed(expected_type=PathShadeProperties, allow_none=True)
tileRect = Typed(expected_type=RelativeRect, allow_none=True)
__elements__ = ('gsLst', 'lin', 'path', 'tileRect')
def __init__(self,
flip=None,
rotWithShape=None,
gsLst=None,
lin=None,
path=None,
tileRect=None,
):
self.flip = flip
self.rotWithShape = rotWithShape
self.gsLst = gsLst
self.lin = lin
self.path = path
self.tileRect = tileRect
class Blip(Serialisable):
tagname = "blip"
namespace = DRAWING_NS
#Using attribute groupAG_Blob
cstate = NoneSet(values=(['email', 'screen', 'print', 'hqprint']))
embed = Relation() #rId
link = Relation() #hyperlink
noGrp = Bool(allow_none=True)
noSelect = Bool(allow_none=True)
noRot = Bool(allow_none=True)
noChangeAspect = Bool(allow_none=True)
noMove = Bool(allow_none=True)
noResize = Bool(allow_none=True)
noEditPoints = Bool(allow_none=True)
noAdjustHandles = Bool(allow_none=True)
noChangeArrowheads = Bool(allow_none=True)
noChangeShapeType = Bool(allow_none=True)
# some elements are choice
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
alphaBiLevel = Typed(expected_type=AlphaBiLevelEffect, allow_none=True)
alphaCeiling = Typed(expected_type=AlphaCeilingEffect, allow_none=True)
alphaFloor = Typed(expected_type=AlphaFloorEffect, allow_none=True)
alphaInv = Typed(expected_type=AlphaInverseEffect, allow_none=True)
alphaMod = Typed(expected_type=AlphaModulateEffect, allow_none=True)
alphaModFix = Typed(expected_type=AlphaModulateFixedEffect, allow_none=True)
alphaRepl = Typed(expected_type=AlphaReplaceEffect, allow_none=True)
biLevel = Typed(expected_type=BiLevelEffect, allow_none=True)
blur = Typed(expected_type=BlurEffect, allow_none=True)
clrChange = Typed(expected_type=ColorChangeEffect, allow_none=True)
clrRepl = Typed(expected_type=ColorReplaceEffect, allow_none=True)
duotone = Typed(expected_type=DuotoneEffect, allow_none=True)
fillOverlay = Typed(expected_type=FillOverlayEffect, allow_none=True)
grayscl = Typed(expected_type=GrayscaleEffect, allow_none=True)
hsl = Typed(expected_type=HSLEffect, allow_none=True)
lum = Typed(expected_type=LuminanceEffect, allow_none=True)
tint = Typed(expected_type=TintEffect, allow_none=True)
__elements__ = ('alphaBiLevel', 'alphaCeiling', 'alphaFloor', 'alphaInv',
'alphaMod', 'alphaModFix', 'alphaRepl', 'biLevel', 'blur', 'clrChange',
'clrRepl', 'duotone', 'fillOverlay', 'grayscl', 'hsl', 'lum', 'tint')
def __init__(self,
cstate=None,
embed=None,
link=None,
noGrp=None,
noSelect=None,
noRot=None,
noChangeAspect=None,
noMove=None,
noResize=None,
noEditPoints=None,
noAdjustHandles=None,
noChangeArrowheads=None,
noChangeShapeType=None,
extLst=None,
alphaBiLevel=None,
alphaCeiling=None,
alphaFloor=None,
alphaInv=None,
alphaMod=None,
alphaModFix=None,
alphaRepl=None,
biLevel=None,
blur=None,
clrChange=None,
clrRepl=None,
duotone=None,
fillOverlay=None,
grayscl=None,
hsl=None,
lum=None,
tint=None,
):
self.cstate = cstate
self.embed = embed
self.link = link
self.noGrp = noGrp
self.noSelect = noSelect
self.noRot = noRot
self.noChangeAspect = noChangeAspect
self.noMove = noMove
self.noResize = noResize
self.noEditPoints = noEditPoints
self.noAdjustHandles = noAdjustHandles
self.noChangeArrowheads = noChangeArrowheads
self.noChangeShapeType = noChangeShapeType
self.extLst = extLst
self.alphaBiLevel = alphaBiLevel
self.alphaCeiling = alphaCeiling
self.alphaFloor = alphaFloor
self.alphaInv = alphaInv
self.alphaMod = alphaMod
self.alphaModFix = alphaModFix
self.alphaRepl = alphaRepl
self.biLevel = biLevel
self.blur = blur
self.clrChange = clrChange
self.clrRepl = clrRepl
self.duotone = duotone
self.fillOverlay = fillOverlay
self.grayscl = grayscl
self.hsl = hsl
self.lum = lum
self.tint = tint
class TileInfoProperties(Serialisable):
tx = Integer(allow_none=True)
ty = Integer(allow_none=True)
sx = Integer(allow_none=True)
sy = Integer(allow_none=True)
flip = NoneSet(values=(['x', 'y', 'xy']))
algn = Set(values=(['tl', 't', 'tr', 'l', 'ctr', 'r', 'bl', 'b', 'br']))
def __init__(self,
tx=None,
ty=None,
sx=None,
sy=None,
flip=None,
algn=None,
):
self.tx = tx
self.ty = ty
self.sx = sx
self.sy = sy
self.flip = flip
self.algn = algn
class BlipFillProperties(Serialisable):
tagname = "blipFill"
dpi = Integer(allow_none=True)
rotWithShape = Bool(allow_none=True)
blip = Typed(expected_type=Blip, allow_none=True)
srcRect = Typed(expected_type=RelativeRect, allow_none=True)
tile = Typed(expected_type=TileInfoProperties, allow_none=True)
stretch = Typed(expected_type=StretchInfoProperties, allow_none=True)
__elements__ = ("blip", "srcRect", "tile", "stretch")
def __init__(self,
dpi=None,
rotWithShape=None,
blip=None,
tile=None,
stretch=StretchInfoProperties(),
srcRect=None,
):
self.dpi = dpi
self.rotWithShape = rotWithShape
self.blip = blip
self.tile = tile
self.stretch = stretch
self.srcRect = srcRect
| 30.733524
| 97
| 0.58829
|
f50f274b3acfb9c9a4ac34ad1d3d2ed913ad3196
| 2,135
|
py
|
Python
|
Scripts/Python/iris pero en python.py
|
ceceher/Analisis-multivariado
|
08f88ce71201ac4e290bfdcac89287e021662cb4
|
[
"MIT"
] | null | null | null |
Scripts/Python/iris pero en python.py
|
ceceher/Analisis-multivariado
|
08f88ce71201ac4e290bfdcac89287e021662cb4
|
[
"MIT"
] | null | null | null |
Scripts/Python/iris pero en python.py
|
ceceher/Analisis-multivariado
|
08f88ce71201ac4e290bfdcac89287e021662cb4
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from numpy.core.fromnumeric import mean
from sklearn.cluster import MeanShift
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
#Hagamos un gráfico
xmin, xmax = X[:, 0].min() - 0.5, X[:, 0].min() + 0.5
ymin, ymax = X[:, 1].min() - 0.5, X[:, 1].min() + 0.5
plt.figure(2, figsize = (8,9))
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c = y, cmap = plt.cm.Set1,
edgecolors= 'k')
#plt.show()
if __name__ == "__main__":
iris = datasets.load_iris()
dataset = iris
#############################################
# Meanshift en Iris
#############################################
meanshift = MeanShift().fit(X)
print(meanshift.labels_)
print(len(meanshift.labels_))
print("="*64)
print(meanshift.cluster_centers_)
dataset['meanshift'] = meanshift.labels_
print("="*64)
print(dataset)
print("="*64 + "\n Componentes principales \n" + "="*64)
pca = PCA(n_components = 2)
pca.fit(X)
pca_data = pca.transform(X)
print(pca_data)
print("="*64 + "\n MeanShift de PCA \n" + "="*64)
meanshift = MeanShift().fit(pca_data)
plt.scatter(pca_data[:, 0],
pca_data[:, 1],
c = meanshift.predict(pca_data))
plt.scatter(meanshift.cluster_centers_[:, 0],
meanshift.cluster_centers_[:, 1],
c = [ 'purple','yellow'],
s = 200)
plt.show()
# No se porque sale una gráfica con 5 colores diferentes pero solo me marca 2 centro para los clusters
#############################################
# Kmeans en Iris
#############################################
print("="*64 + "\n K-means en Iris \n" + "="*64)
kmeans = MiniBatchKMeans(n_clusters=4, batch_size=8).fit(X)
print("Total de centros: ", len(kmeans.cluster_centers_))
print("="*64)
print(kmeans.predict(X))
dataset['group'] = kmeans.predict(X)
print(dataset)
| 26.036585
| 107
| 0.551756
|
bdb5a26660d8c923f26e60d108a3325e1b66d7b8
| 2,145
|
py
|
Python
|
torch/utils/data/datapipes/utils/common.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | 1
|
2019-11-20T08:10:31.000Z
|
2019-11-20T08:10:31.000Z
|
torch/utils/data/datapipes/utils/common.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | null | null | null |
torch/utils/data/datapipes/utils/common.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | 1
|
2022-01-19T10:55:49.000Z
|
2022-01-19T10:55:49.000Z
|
import os
import fnmatch
import warnings
from io import BufferedIOBase
from typing import Iterable, List, Union
def match_masks(name : str, masks : Union[str, List[str]]) -> bool:
# empty mask matches any input name
if not masks:
return True
if isinstance(masks, str):
return fnmatch.fnmatch(name, masks)
for mask in masks:
if fnmatch.fnmatch(name, mask):
return True
return False
def get_file_pathnames_from_root(
root: str,
masks: Union[str, List[str]],
recursive: bool = False,
abspath: bool = False) -> Iterable[str]:
# print out an error message and raise the error out
def onerror(err : OSError):
warnings.warn(err.filename + " : " + err.strerror)
raise err
for path, dirs, files in os.walk(root, onerror=onerror):
if abspath:
path = os.path.abspath(path)
for f in files:
if match_masks(f, masks):
yield os.path.join(path, f)
if not recursive:
break
def get_file_binaries_from_pathnames(pathnames: Iterable, mode: str):
if not isinstance(pathnames, Iterable):
pathnames = [pathnames, ]
if mode in ('b', 't'):
mode = 'r' + mode
for pathname in pathnames:
if not isinstance(pathname, str):
raise TypeError("Expected string type for pathname, but got {}"
.format(type(pathname)))
yield (pathname, open(pathname, mode))
def validate_pathname_binary_tuple(data):
if not isinstance(data, tuple):
raise TypeError("pathname binary data should be tuple type, but got {}".format(type(data)))
if len(data) != 2:
raise TypeError("pathname binary tuple length should be 2, but got {}".format(str(len(data))))
if not isinstance(data[0], str):
raise TypeError("pathname binary tuple should have string type pathname, but got {}".format(type(data[0])))
if not isinstance(data[1], BufferedIOBase):
raise TypeError("pathname binary tuple should have BufferedIOBase based binary type, but got {}".format(type(data[1])))
| 32.5
| 127
| 0.632168
|
c38e8f9b6d3d2bf71a21acd494318fe58917604c
| 691
|
py
|
Python
|
sdk/python/pulumi_azure_native/netapp/v20201201/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/v20201201/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/v20201201/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .account import *
from .backup import *
from .backup_policy import *
from .get_account import *
from .get_backup import *
from .get_backup_policy import *
from .get_pool import *
from .get_snapshot import *
from .get_snapshot_policy import *
from .get_volume import *
from .pool import *
from .snapshot import *
from .snapshot_policy import *
from .volume import *
from ._inputs import *
from . import outputs
| 27.64
| 80
| 0.748191
|
08b1e95d3aedc5a3f02b6ea508ea1d2ee3174a67
| 6,579
|
py
|
Python
|
kydavra/GeneticAlgorithmSelector.py
|
ScienceKot/kydavra
|
cbed15c1e0337c250fa9cc3350135764467ef071
|
[
"MIT"
] | 33
|
2020-08-17T04:22:41.000Z
|
2022-03-05T18:49:16.000Z
|
kydavra/GeneticAlgorithmSelector.py
|
SigmoidAI/kydavra
|
e17f3855b94045953d0df6d13b9d9e0dffa9a8a3
|
[
"MIT"
] | null | null | null |
kydavra/GeneticAlgorithmSelector.py
|
SigmoidAI/kydavra
|
e17f3855b94045953d0df6d13b9d9e0dffa9a8a3
|
[
"MIT"
] | 9
|
2020-09-22T13:14:28.000Z
|
2022-01-31T06:44:31.000Z
|
'''
Created with love by Sigmoid
@Author - Păpăluță Vasile - vpapaluta06@gmail.com
'''
# Importing all needed libraries
import random
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
class GeneticAlgorithmSelector:
def __init__(self, nb_children : int = 4, nb_generation : int = 200, scoring_metric = accuracy_score, max : bool = True) -> None:
'''
Setting up the model
:param nb_children: integer, default = 4
The number of children created at every generation
:param nb_generation: integer, default = 200
The number of generations created to find the best feature combination
:param scoring_metric: function, default = sklearn.metrics.accuracy_score
The sklearn like scoring metric
:param max: boolean, default = True
Used to define whatever we need the biggest of the smallest values computed by self.scoring_metric
'''
self.nb_children = nb_children
self.nb_generation = nb_generation
self.scoring_metric = scoring_metric
self.max = max
def inverse(self, value : int) -> int :
'''
This function inverses the value coming to function
:param value: integer, o or 1
The value that should be inverted
:return: integer
Return the inverse of :param value (if value = 0 ==> 1, else 0)
'''
if value == 0:
return 1
else:
return 0
def cross_over(self, population : list) -> list:
'''
This function apply the crossing-over process on 2 arrys-like lists
:param population: 2-d array like list
The population with 2 array that will be uses to create new individuals in the population
:return: 2-d array like list
Return the population with parents and their children after corssing-over
'''
# Creating an empty list for the new generation
new_generation = []
# Adding nb_children children to the new generatin
for i in range(self.nb_children//2):
first = random.randrange(0, len(self.X_columns)-1)
second = random.randrange(0, len(self.X_columns)-1)
if first > second:
first, second = second, first
new_generation.append(population[0][0:first] + population[1][first: second] + population[0][second:])
new_generation.append(population[1][0:first] + population[0][first: second] + population[1][second:])
# Adding the new generation to the population
for gene in new_generation:
population.append(gene)
return population
def mutate(self, gene : list) -> list:
'''
This function generates a random mutation on a gene
:param gene: 1-d array like list
The list with zeros and ones that will be mutated
:return: 1-drray like list
The gene list after mutation
'''
# Choosing a random place to generate a mutation
mutation_locus = random.randrange(0, len(self.X_columns)-1)
# Generating a mutation
gene[mutation_locus] = self.inverse(gene[mutation_locus])
return gene
def gene_to_cols(self, gene : list) -> list:
'''
This function convert the zeros and ones list to columns list
:param gene: 1-d array like list
The list with zeros and ones that must be transformed in a columns sequence
:return: 1-d array like list
The lust with columns that will go to the model
'''
cols = []
for i in range(len(gene)):
if gene[i] == 1:
cols.append(self.X_columns[i])
return cols
def select(self, algo, dataframe : 'pd.DataFrame', y_column : str, test_size=0.2):
'''
This function selects the best columns
:param algo: sklearn algorithm class
An sklearn algorithm class
:param dataframe: pandas DataFrame
Data Frame on which the algorithm is applied
:param y_column: string
The column name of the value that we what to predict
:param test_size: float, default = 0.2
The percent of data set that will be used to find the trained algorithm accuracy
:return: list
The list of columns selected by algorithm
'''
# Getting the list with names of columns without the target one
self.X_columns = [col for col in dataframe.columns if col != y_column]
# Generating the empty population and a temporal one
population = []
temp = []
for i in range(len(self.X_columns)):
temp.append(random.choice([0, 1]))
population.append(temp)
temp = [self.inverse(x) for x in population[-1]]
population.append(temp)
# Creating new generations and testing in searching for individual with the best metric
for gen in range(self.nb_generation):
# Creating an empty list with the metrics
acc = []
# Generating the first cross-over
population = self.cross_over(population)
# Generating some mutations
for i in range(2, len(population)):
population[i] = self.mutate(population[i])
# Measuring the accuracy of every individual in the population
for gene in population:
if set(gene) == set([0]):
continue
X = dataframe[self.gene_to_cols(gene)].values
y = dataframe[y_column].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=0)
algo.fit(X_train, y_train)
y_pred = algo.predict(X_test)
acc.append(self.scoring_metric(y_test, y_pred))
# Selecting the 2 best members of the population based on whatever the metric's property
if self.max:
res = sorted(range(len(acc)), key=lambda sub: acc[sub])[-2:]
else:
res = sorted(range(len(acc)), key=lambda sub: acc[sub])[:2]
# Creating a new population with the 2 best members
population = [population[res[0]], population[res[1]]]
return self.gene_to_cols(population[0])
| 44.755102
| 134
| 0.598875
|
bb0ac780c72b8e6be418e361ae629978f4591809
| 690
|
py
|
Python
|
roles/alpine/library/zpool_exists.py
|
wenerme/ansible-role-alpine
|
a0e8190173cd557f84539c53d26bf073433aa815
|
[
"MIT"
] | 3
|
2019-11-27T02:30:59.000Z
|
2020-11-25T01:33:26.000Z
|
roles/alpine/library/zpool_exists.py
|
wenerme/ansible-role-alpine
|
a0e8190173cd557f84539c53d26bf073433aa815
|
[
"MIT"
] | null | null | null |
roles/alpine/library/zpool_exists.py
|
wenerme/ansible-role-alpine
|
a0e8190173cd557f84539c53d26bf073433aa815
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
name = params['name']
if name is None:
module.fail_json(msg='name is required')
proc = subprocess.call(["zpool", name, 'status'])
if __name__ == '__main__':
main()
| 23
| 61
| 0.647826
|
d39bf52e1ccaadd2939ef4ff81b9a85364ea05e1
| 3,181
|
py
|
Python
|
sdks/apigw-manager/src/apigw_manager/apigw/utils.py
|
IMBlues/bkpaas-python-sdk
|
a87bee3d26f0ddeac124c7a4679cd3eff4abb8fc
|
[
"MIT"
] | 17
|
2021-08-03T03:15:35.000Z
|
2022-03-18T06:10:04.000Z
|
sdks/apigw-manager/src/apigw_manager/apigw/utils.py
|
IMBlues/bkpaas-python-sdk
|
a87bee3d26f0ddeac124c7a4679cd3eff4abb8fc
|
[
"MIT"
] | 7
|
2021-08-03T07:10:12.000Z
|
2022-03-23T04:47:22.000Z
|
sdks/apigw-manager/src/apigw_manager/apigw/utils.py
|
IMBlues/bkpaas-python-sdk
|
a87bee3d26f0ddeac124c7a4679cd3eff4abb8fc
|
[
"MIT"
] | 9
|
2021-08-03T03:20:36.000Z
|
2022-03-08T13:47:50.000Z
|
# -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
import os
import zipfile
import yaml
from django.conf import settings
from apigw_manager.core import configuration
def get_configuration(**kwargs):
"""Generate management configuration according to the settings"""
settings_mappings = [
("BK_APIGW_NAME", "api_name"),
("BK_APP_CODE", "bk_app_code"),
("BK_APP_SECRET", "bk_app_secret"),
]
for attr, key in settings_mappings:
if key not in kwargs and hasattr(settings, attr):
kwargs[key] = getattr(settings, attr)
host = kwargs.pop("host", "")
if not host:
apigw_name = kwargs.get("apigw_name") or getattr(settings, "BK_APIGW_API_NAME", "bk-apigateway")
if hasattr(settings, "BK_API_URL_TMPL"):
host = "%s/prod/" % settings.BK_API_URL_TMPL.format(api_name=apigw_name).rstrip("/")
elif hasattr(settings, "BK_API_STAGE_URL_TMPL"):
host = settings.BK_API_STAGE_URL_TMPL.format(
api_name=apigw_name,
stage_name="prod",
)
# stage has been added to host, here stage is set to an empty string
return configuration.Configuration(
host=host.rstrip("/"),
stage="",
**kwargs,
)
def yaml_load(content):
"""Load YAML"""
return yaml.load(content, Loader=yaml.FullLoader)
def parse_value_list(*values):
"""Parse value list"""
data = {}
for i in values:
key, sep, value = i.partition(":")
if not sep:
data[key] = None
else:
data[key] = yaml_load(value)
return data
class ZipArchiveFile:
@classmethod
def archive(cls, path, output):
"""归档文件
其中的文件名,设置为基于 path 的相对路径
"""
archived_files = cls._get_archived_files(path)
with zipfile.ZipFile(output, "w") as zip_:
for file_path, name in archived_files.items():
zip_.write(file_path, name)
return output
@classmethod
def _get_archived_files(cls, path):
"""获取待归档文件,及去掉基准目录后的文件名"""
if os.path.isfile(path):
return {path: os.path.basename(path)}
path = path if path.endswith("/") else path + "/"
path_to_name = {}
for root, dirs, files in os.walk(path):
for name in files:
file_path = os.path.join(root, name)
path_to_name[file_path] = file_path[len(path) :]
return path_to_name
| 31.49505
| 118
| 0.638164
|
df9b6a4b7e73cc092740319fecb9da572dc7f3d9
| 788
|
py
|
Python
|
PWCNet/correlation_package_pytorch1_2/setup.py
|
withlqs/DAIN
|
9abab9eabb86bbbd781bc51e2073945248eccc7c
|
[
"MIT"
] | null | null | null |
PWCNet/correlation_package_pytorch1_2/setup.py
|
withlqs/DAIN
|
9abab9eabb86bbbd781bc51e2073945248eccc7c
|
[
"MIT"
] | null | null | null |
PWCNet/correlation_package_pytorch1_2/setup.py
|
withlqs/DAIN
|
9abab9eabb86bbbd781bc51e2073945248eccc7c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
cxx_args = ['-std=c++14']
nvcc_args = [
'-gencode', 'arch=compute_50,code=sm_50',
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_75,code=sm_75',
# '-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_86,code=sm_86'
]
setup(
name='correlation_cuda',
ext_modules=[
CUDAExtension('correlation_cuda', [
'correlation_cuda.cc',
'correlation_cuda_kernel.cu'
], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args})
],
cmdclass={
'build_ext': BuildExtension
})
| 26.266667
| 67
| 0.656091
|
d286b1f8fbb07bffdd23639bcef992bc445a6819
| 4,471
|
py
|
Python
|
profiles_api/views.py
|
palakjadwani/profiles-rest-api
|
cfad7e944396ed941a509bf65fce26beb15116f3
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
palakjadwani/profiles-rest-api
|
cfad7e944396ed941a509bf65fce26beb15116f3
|
[
"MIT"
] | 6
|
2020-05-06T09:17:16.000Z
|
2022-02-10T09:17:14.000Z
|
profiles_api/views.py
|
palakjadwani/profiles-rest-api
|
cfad7e944396ed941a509bf65fce26beb15116f3
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
"""Test API view"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Return a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URLs',
]
return Response({'message' : 'Hello!', 'an_apiview' : an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle a partial update of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions (list, create, retreive, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more fuctionality with less code',
]
return Response({'message':'Hello!', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello maessage"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message':message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""handle getting an object by it's ID"""
return Response({'http_method':'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method':'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method':'PATCH'})
def destroy(self, request, pk=None):
"""handle removing an object"""
return Response({'http_method':'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
| 35.204724
| 76
| 0.666518
|
66b99341ec2f922aa165c950dd0301ebd3458d4a
| 84
|
py
|
Python
|
chesstimer/timer.py
|
theseana/pesteh
|
1125dc1055e3b8466c3c539c4afc2149d663dd46
|
[
"MIT"
] | 1
|
2022-01-16T00:33:57.000Z
|
2022-01-16T00:33:57.000Z
|
chesstimer/timer.py
|
theseana/pesteh
|
1125dc1055e3b8466c3c539c4afc2149d663dd46
|
[
"MIT"
] | null | null | null |
chesstimer/timer.py
|
theseana/pesteh
|
1125dc1055e3b8466c3c539c4afc2149d663dd46
|
[
"MIT"
] | null | null | null |
timer = 1052
m = int(timer/60)
s = timer % 60
print('{:02d}:{:02d}'.format(m, s))
| 12
| 35
| 0.559524
|
3cf2e3021819a13c8f91b0612e0f2053fad3d04d
| 16,022
|
py
|
Python
|
借来的plugins/OPQ-SetuBot/module/database.py
|
njjjay/IOTQQPlugins_selfuse
|
23bda39647c14256e6366bf49d72bb71ba68cbd7
|
[
"MIT"
] | 19
|
2020-06-16T03:36:24.000Z
|
2022-01-30T07:46:29.000Z
|
借来的plugins/OPQ-SetuBot/module/database.py
|
njjjay/IOTQQPlugins_selfuse
|
23bda39647c14256e6366bf49d72bb71ba68cbd7
|
[
"MIT"
] | 1
|
2020-08-01T18:20:10.000Z
|
2020-08-03T10:42:04.000Z
|
借来的plugins/OPQ-SetuBot/module/database.py
|
njjjay/IOTQQPlugins_selfuse
|
23bda39647c14256e6366bf49d72bb71ba68cbd7
|
[
"MIT"
] | 5
|
2020-08-12T02:02:20.000Z
|
2021-06-09T08:38:33.000Z
|
from tinydb import TinyDB, Query, where
from tinydb.storages import MemoryStorage
from tinydb.operations import add
from loguru import logger
from module.send import action
from module import config
import os
import time
import re
import random
from retrying import retry
groupConfig = TinyDB('./config/groupConfig.json')
friendConfig = TinyDB('./config/friendConfig.json')
setuTagConfig = TinyDB('./config/setuTagConfig.json')
lotteryData = TinyDB('./config/lotteryData.json')
setuDB = TinyDB('./config/setu.json')
tmpDB = TinyDB(storage=MemoryStorage)
# Q = Query()
# todo: 创建一个数据库的基本操作类,接下来各个模块的小类全部继承这个base类
#
# def matches_regex(values, pattern):
# # return any(re.match(pattern, value) for value in values)
# for v in values:
# print(v)
class BasicOperation:
@staticmethod
def change_dict(dicta, lista, change, ret=''):
x = dicta[lista[0]]
ret += (str(lista[0]) + ' ')
if len(lista) == 1:
rt_befeore = dicta.copy()
dicta[lista[0]] = change
return '{}: {}\n↓↓↓↓\n{}: {}'.format(ret, rt_befeore[lista[0]], ret, dicta[lista[0]])
lista.pop(0)
return BasicOperation.change_dict(x, lista, change, ret)
@staticmethod
def auth(qqg: int, qq: int): # superadmin:1 ,群主:2 , 管理员:3
if qq == config.superadmin:
return 1
elif res := groupConfig.get(where('GroupId') == qqg):
if qq == res['GroupOwner']:
return 2
elif qq in res['admins'] or qq in res['managers']: # 管理员
return 3
else:
return 0
else:
return 0
@staticmethod
def updateGroupData(groupid: int, data: dict):
groupConfig.update(data, where('GroupId') == groupid)
@staticmethod
def getGroupConf(groupid: int):
return groupConfig.get(where('GroupId') == groupid)
@staticmethod
def getUserconf(userid: int):
if conf := friendConfig.get(where('QQ') == userid):
return conf
else:
return {
'setuinfo': {
'title': True,
'pid': False,
'purl': True,
'page': True,
'author': True,
'uurl': True,
'uid': False,
'url_original': True,
# '': True,
# '': True,
# '': True
},
'original': False,
'setuLevel': 1,
'refreshSent': 600,
'at': False,
'at_warning': False, # @
'returnTags': True,
'msg_inputError': '必须是正整数数字哦~', # 非int
'msg_notFind': '你的xp好奇怪啊', # 没结果
'msg_tooMuch': '爪巴', # 大于最大值
'msg_lessThan0': '¿¿¿', # 小于0
'msg_setuClosed': 'setu已关闭~',
'msg_r18Closed': '未开启r18~',
'msg_insufficient': '关于{tag}的图片只获取到{num}张'
}
class LocalSetu:
@staticmethod
def conversionLevel(level_int):
conversionDict = {0: 'normal',
1: 'sexy',
2: 'porn',
3: 'all'}
return conversionDict[level_int]
@classmethod
def addSetu(cls, data: dict, level: int, groupid: int): # 改成单个插入 由上级控制 ,群独立
typE = cls.conversionLevel(level)
data['time'] = int(time.time())
if res := setuDB.get((where('artwork') == data['artwork']) & (where('page') == data['page'])): # 数据库有数据
data['type'] = res['type']
for k, v in data['type'].items(): # 遍历 'type': {'normal': [], 'sexy': [], 'porn': []}
if k != typE: # 群号出现在非这次修改的等级里
if groupid in v:
data['type'][k].remove(groupid)
else:
data['type'][k].append(groupid)
data['type'][k] = list(set(data['type'][k])) # 以防万一,去重
setuDB.update(data, (where('artwork') == data['artwork']) & (where('page') == data['page']))
logger.info(
'pid:{} page:{} group:{}-->{}'.format(data['artwork'], data['page'], res['type'], data['type']))
else:
data['type'][typE].append(groupid)
setuDB.insert(data)
logger.info('pid:{} page:{} group:{}'.format(data['artwork'], data['page'], data['type']))
# return '群{}:{}添加成功,图片分级{}'.format(groupid, data['original'],typE)
@staticmethod
def delSetu(artworkid, groupid, page: int = None):
if page == None:
if res := setuDB.search((where('artwork') == artworkid) &
(
(where('type')['normal'].any([groupid])) |
(where('type')['sexy'].any([groupid])) |
(where('type')['porn'].any([groupid]))
)): # 数据库有数据
for data in res:
for k, v in data['type'].items():
if groupid in v:
data['type'][k].remove(groupid)
setuDB.update(data, (where('artwork') == artworkid) & (where('page') == data['page']))
return True
else:
return False
else:
if res := setuDB.get((where('artwork') == artworkid) &
(where('page') == page) &
(
(where('type')['normal'].any([groupid])) |
(where('type')['sexy'].any([groupid])) |
(where('type')['porn'].any([groupid]))
)): # 数据库有数据
for k, v in res['type'].items():
if groupid in v:
res['type'][k].remove(groupid)
setuDB.update(res, (where('artwork') == artworkid) & (where('page') == page))
return True
else:
return False
@staticmethod
def updateSetu(artworkid, page, data):
setuDB.update(data, (where('artwork') == artworkid & where('page') == page))
@classmethod
def _serchtags(cls, taglist: list, expr=None):
# print(taglist)
if not taglist:
return expr
if expr:
expr = expr & (where('tags').any((where('name').matches(taglist[0], re.I | re.M))))
else:
expr = where('tags').any((where('name').matches(taglist[0], re.I | re.M)))
taglist.pop(0)
return cls._serchtags(taglist, expr)
@classmethod
def getSetu(cls, groupid: int, level: int, num: int, tags: list): # 用lv做key {lv:[{'sexy':[0,123]},]}
tags = tags.copy() # 有bug,递归会把上层的也删掉
level = cls.conversionLevel(level) # 转换
for i in range(len(tags)):
tags[i] = '.*{}'.format(tags[i])
if level != 'all': # 指定setu等级
allTagList = ['normal', 'sexy', 'porn']
allTagList.remove(level)
if tags:
data = setuDB.search(
(~where('type')[allTagList[0]].any([groupid])) &
(~where('type')[allTagList[1]].any([groupid])) &
(where('type')[level].any([0, groupid])) &
cls._serchtags(tags)
)
else:
data = setuDB.search(
(~where('type')[allTagList[0]].any([groupid])) &
(~where('type')[allTagList[1]].any([groupid])) &
(where('type')[level].any([0, groupid]))
)
else: # 从全部色图中搜索
if tags:
data = setuDB.search(
(
(where('type')['normal'].any([0, groupid])) |
(where('type')['sexy'].any([0, groupid])) |
(where('type')['porn'].any([0, groupid]))
)
& (cls._serchtags(tags))
)
else:
data = setuDB.search(
(
(where('type')['normal'].any([0, groupid])) |
(where('type')['sexy'].any([0, groupid])) |
(where('type')['porn'].any([0, groupid]))
)
)
if len(data) <= num:
return data
return random.sample(data, num)
class Cmd(BasicOperation):
pass
#
# @staticmethod
# def getGroupData(qqg):
# return groupConfig.get(Q['GroupId'] == qqg)
class Setu(BasicOperation):
@staticmethod
def ifSent(ID, url, refreshTime):
filename = os.path.basename(url)
if data := tmpDB.table('sentlist').search((where('id') == ID) & (where('filename') == filename)): # 如果有数据
if time.time() - data[0]['time'] <= refreshTime: # 发送过
logger.info('id:{},{}发送过~'.format(ID, filename))
return True
else:
tmpDB.table('sentlist').update({'time': time.time()},
(where('id') == ID) & (where('filename') == filename))
return False
else: # 没数据
tmpDB.table('sentlist').insert({'id': ID, 'time': time.time(), 'filename': filename})
return False
@staticmethod
def freq(groupid, num, refreshTime, freqCount):
if data_tmp := tmpDB.table('freq').get(where('group') == groupid): # 如果有数据
if refreshTime != 0 and (time.time() - data_tmp['time'] >= refreshTime): # 刷新
tmpDB.table('freq').update({'time': time.time(), 'freq': 0}, where('group') == groupid)
return False
elif freqCount != 0 and num + data_tmp['freq'] > freqCount: # 大于限制且不为0
logger.info('群:{}大于频率限制:{}次/{}s'.format(groupid, freqCount, refreshTime))
return freqCount, data_tmp['time']
# 记录
tmpDB.table('freq').update(add('freq', num), where('group') == groupid)
else: # 没数据
logger.info('群:{}第一次调用'.format(groupid))
tmpDB.table('freq').insert({'group': groupid, 'time': time.time(), 'freq': num})
return False
@staticmethod
def getGroupConf(groupid: int, msgType: str):
# data = {}
if res := groupConfig.get(where('GroupId') == groupid):
for k, v in res.items():
# print(k, v)
if type(v) == dict:
try:
res[k] = v[msgType]
except:
pass
# data[str(k)] = v
# print(res)
return res
class Lottery:
@staticmethod
def getUserInfo(qq: int, conf: dict, pool: str):
if res := lotteryData.table(pool).get(where('qq') == qq):
return res
else:
data = {
'qq': qq,
'allCount': 0, # 总次数
'farFiveStarFloors': conf[pool]['fiveStarFloorsCount'], # 离5星保底次数
'farFourStarFloors': conf[pool]['fourStarFloorsCount'], # 离4星保底次数
'FiveStarFloorsCount': 0, # 5星保底数
'FourStarFloorsCount': 0, # 4星保底数
'FiveStarCount': 0, # 获取到的5星数量
'FourStarCount': 0, # 获取到的4星数量
'certainlyFiveStarUp': False, # 必定5星up
'certainlyFourStarUp': False, # 必定4星up
}
lotteryData.table(pool).insert(data)
return data
@staticmethod
def updateUserinfo(qq, pool, data):
lotteryData.table(pool).update(data, where('qq') == qq)
class Event:
@staticmethod
def changeGroupAdmin(group: int, admins: list, flag: bool):
pass
@staticmethod
def changeGroupManager(group: int, managers: list, flag: bool):
pass
@staticmethod
def updateAdminAndManager(groupid: int, admins: list, managers: list):
groupConfig.update({'admins': admins, 'managers': managers}, where('GroupId') == groupid)
# @staticmethod
# def
class Getdata:
@staticmethod
def defaultdata(data):
data['managers'] = [] # 所有的管理者(可以设置bot功能的)
# -----------------------------------------------------
data['setuLevel'] = {'group': 1, 'temp': 3} # 默认等级 0:正常 1:性感 2:色情 3:All
data['setuinfo'] = {
'title': True,
'pid': False,
'purl': True,
'page': True,
'author': True,
'uurl': True,
'uid': False,
'url_original': True,
# '': True
}
data['returnTags'] = True # 显示tag
data['original'] = {'group': False, 'temp': False} # 是否原图
data['setu'] = {'group': True, 'temp': True} # 色图功能开关
data['r18'] = {'group': False, 'temp': True} # 是否开启r18
data['freq'] = 10 # 频率 (次)
data['refreshTime'] = 60 # 刷新时间 (s)
data['refreshSent'] = 900 # 刷新sent时间 (s)
data['maxnum'] = {'group': 3, 'temp': 10} # 一次最多数量
data['msgCount'] = {'text': 0, 'pic': 0, 'voice': 0} # 消息数量
data['revoke'] = {'group': 20, 'temp': 0} # 撤回消息延时(0为不撤回)
data['at'] = False # @
data['at_warning'] = False # @
data['msg_inputError'] = '必须是正整数数字哦~' # 非int
data['msg_notFind'] = '你的xp好奇怪啊' # 没结果
data['msg_tooMuch'] = '爪巴' # 大于最大值
data['msg_lessThan0'] = '¿¿¿' # 小于0
data['msg_setuClosed'] = 'setu已关闭~'
data['msg_r18Closed'] = '未开启r18~'
data['msg_insufficient'] = '关于{tag}的图片只获取到{num}张'
data['msg_frequency'] = '本群每{time}s能调用{num}次,已经调用{num_call}次,离刷新还有{r_time}s'
# data['msg_'] = ''
# return data
@classmethod
def updateData(cls, data, groupid):
if groupConfig.search(where('GroupId') == groupid):
logger.info('群:{}已存在,更新数据~'.format(groupid))
groupConfig.update(data, where('GroupId') == groupid)
else:
cls.defaultdata(data)
logger.info('群:{}不存在,插入数据~'.format(groupid))
groupConfig.insert(data)
@retry(stop_max_attempt_number=3, wait_random_max=2000)
def updateAllGroupData(self):
logger.info('开始更新所有群数据~')
data = action.getGroupList()
allgroups_get = [x['GroupId'] for x in data]
for group in data:
del group['GroupNotice'] # 删除不需要的key
admins = action.getGroupAdminList(group['GroupId'])
admins_QQid = [i['MemberUin'] for i in admins]
group['admins'] = admins_QQid # 管理员列表
self.updateData(group, group['GroupId'])
allgroups_db = [i['GroupId'] for i in groupConfig.all()]
if extraGroup := list(set(allgroups_db).difference(set(allgroups_get))): # 多余的群
logger.info('数据库中多余群:{}'.format(extraGroup))
for groupid_del in extraGroup:
groupConfig.remove(where('GroupId') == groupid_del)
logger.info('已删除群:{}数据'.format(groupid_del))
logger.success('更新群信息成功~')
return
@classmethod
@retry(stop_max_attempt_number=3, wait_random_max=2000)
def updateGroupData(cls, groupid: int):
logger.info('开始刷新群:{}的数据'.format(groupid))
data = action.getGroupList()
for group in data:
if group['GroupId'] == groupid:
del group['GroupNotice'] # 删除不需要的key
admins = action.getGroupAdminList(groupid)
admins_QQid = [i['MemberUin'] for i in admins]
group['admins'] = admins_QQid
logger.info('群:{}的admins:{}'.format(groupid, admins_QQid))
cls.updateData(group, group['GroupId'])
return
logger.warning('群:{}不存在~'.format(groupid))
| 38.700483
| 114
| 0.48708
|
7950078596d2c065a217c94205bfc57e7ee8e2e1
| 10,310
|
py
|
Python
|
tfs/collection.py
|
st-walker/tfs
|
7a229f4fecbf04d544c5116d79a281e4365ccd1d
|
[
"MIT"
] | 5
|
2019-02-18T14:38:59.000Z
|
2021-12-14T15:33:50.000Z
|
tfs/collection.py
|
st-walker/tfs
|
7a229f4fecbf04d544c5116d79a281e4365ccd1d
|
[
"MIT"
] | 54
|
2019-02-19T14:44:36.000Z
|
2022-02-16T15:07:53.000Z
|
tfs/collection.py
|
st-walker/tfs
|
7a229f4fecbf04d544c5116d79a281e4365ccd1d
|
[
"MIT"
] | 4
|
2019-10-17T08:58:57.000Z
|
2022-02-15T15:55:18.000Z
|
"""
Collection
----------------------
Advanced **TFS** files reading and writing functionality.
"""
import pathlib
from pandas import DataFrame
from tfs.frame import TfsDataFrame
from tfs.reader import read_tfs
from tfs.writer import write_tfs
class _MetaTfsCollection(type):
"""
Metaclass for TfsCollection. It takes the class attributes declared as
`Tfs(...)` and replaces it for a property getter and setter. Check
TfsCollection docs.
"""
def __new__(mcs, cls_name, bases, dct: dict):
new_dict = dict(dct)
new_dict["_two_plane_names"] = []
# for name in dct:
for key, value in dct.items():
try:
args = value.args
kwargs = value.kwargs
except AttributeError:
continue
new_props = _define_property(args, kwargs)
try:
prop_x, prop_y = new_props
new_dict.pop(key)
new_dict["_two_plane_names"].append(key)
new_dict[key + "_x"] = prop_x
new_dict[key + "_y"] = prop_y
except TypeError:
new_dict[key] = new_props
return super().__new__(mcs, cls_name, bases, new_dict)
class TfsCollection(metaclass=_MetaTfsCollection):
"""
Abstract class to lazily load and write **TFS** files.
Classes inheriting from this abstract class will be able to define **TFS** files
as readable or writable, and read or write them just as attribute access or
assignments. All attributes will be read and written as ``TfsDataFrame`` objects.
Example:
If **./example** is a directory that contains two **TFS** files **beta_phase_x.tfs**
and **beta_phase_y.tfs** with `BETX` and `BETY` columns respectively:
.. sourcecode:: python
class ExampleCollection(TfsCollection)
# All TFS attributes must be marked with the Tfs(...) class, and generated attribute
# names will be appended with _x / _y depending on files found in "./example"
beta = Tfs("beta_phase_{}.tfs") # A TFS attribute
other_value = 7 # A traditional attribute.
def get_filename(template: str, plane: str) -> str:
return template.format(plane)
example = ExampleCollection("./example")
# Get the BETX / BETY column from "beta_phase_x.tfs":
beta_x_column = example.beta_x.BETX # / example.beta_x.BETY
# Get the BETY column from "beta_phase_y.tfs":
beta_y_column = example.beta_y.BETY
# The planes can also be accessed as items (both examples below work):
beta_y_column = example.beta["y"].BETY
beta_y_column = example.beta["Y"].BETY
# This will write an empty DataFrame to "beta_phase_y.tfs":
example.allow_write = True
example.beta["y"] = DataFrame()
If the file to be loaded is not defined for two planes then the attribute can be declared as:
``coupling = Tfs("getcouple.tfs", two_planes=False)`` and then accessed as
``f1001w_column = example.coupling.F1001W``.
No file will be loaded until the corresponding attribute is accessed and the loaded
``TfsDataFrame`` will be buffered, thus the user should expect an ``IOError`` if the requested
file is not in the provided directory (only the first time, but is better to always take it
into account!).
When a ``TfsDataFrame`` is assigned to one attribute, it will be set as the buffer value. If the
``self.allow_write`` attribute is set to ``True``, an assignment on one of the attributes will
trigger the corresponding file write.
"""
def __init__(self, directory: pathlib.Path, allow_write: bool = None):
self.directory = pathlib.Path(directory) if isinstance(directory, str) else directory
self.allow_write = False if allow_write is None else allow_write
self.maybe_call = _MaybeCall(self)
self._buffer = {}
def get_filename(self, *args, **kwargs):
"""
Return the filename to be loaded or written.
This function will get as parameters any parameter given to the Tfs(...) attributes. It must
return the filename to be written according to those parameters. If ``two_planes=False`` is
not present in the Tfs(...) definition, it will also be given the keyword argument
``plane="x"`` or ``plane="y"``.
"""
raise NotImplementedError("This is an abstract method, it should be implemented in subclasses.")
def write_to(self, *args, **kwargs):
"""
Returns the filename and `TfsDataFrame` to be written on assignments.
If this function is overwritten, it will replace ``get_filename(...)`` in file writes to
find out the filename of the file to be written. It also gets the value assigned as first
parameter. It must return a tuple (filename, tfs_data_frame).
"""
raise NotImplementedError("This is an abstract method, it should be implemented in subclasses.")
def clear(self):
"""
Clear the file buffer.
Any subsequent attribute access will try to load the corresponding file again.
"""
self._buffer = {}
def read_tfs(self, filename: str) -> TfsDataFrame:
"""
Reads the **TFS** file from ``self.directory`` with the given filename.
This function can be overwritten to use something instead of ``tfs-pandas`` to load the
files.
Arguments:
filename (str): The name of the file to load.
Returns:
A ``TfsDataFrame`` built from reading the requested file.
"""
tfs_data_df = read_tfs(self.directory / filename)
if "NAME" in tfs_data_df:
tfs_data_df = tfs_data_df.set_index("NAME", drop=False)
return tfs_data_df
def __getattr__(self, attr: str) -> object:
if attr in self._two_plane_names:
return TfsCollection._TwoPlanes(self, attr)
raise AttributeError(f"{self.__class__.__name__} object has no attribute {attr}")
def _load_tfs(self, filename: str):
try:
return self._buffer[filename]
except KeyError:
tfs_data = self.read_tfs(filename)
if "NAME" in tfs_data:
tfs_data = tfs_data.set_index("NAME", drop=False)
self._buffer[filename] = tfs_data
return self._buffer[filename]
def _write_tfs(self, filename: str, data_frame: DataFrame):
if self.allow_write:
write_tfs(self.directory / filename, data_frame)
self._buffer[filename] = data_frame
class _TwoPlanes(object):
def __init__(self, parent, attr):
self.parent = parent
self.attr = attr
def __getitem__(self, plane: str):
return getattr(self.parent, self.attr + "_" + plane.lower())
def __setitem__(self, plane: str, value):
setattr(self.parent, self.attr + "_" + plane.lower(), value)
class Tfs:
"""Class to mark attributes as **TFS** attributes.
Any parameter given to this class will be passed to the ``get_filename()`` and ``write_to()``
methods, together with the plane if ``two_planes=False`` is not present.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
# Private methods to define the properties ##################################
def _define_property(args, kwargs):
if "two_planes" not in kwargs:
return _define_property_two_planes(args, kwargs)
elif kwargs["two_planes"]:
kwargs.pop("two_planes")
return _define_property_two_planes(args, kwargs)
else:
kwargs.pop("two_planes")
def getter_funct(self):
return _getter(self, *args, **kwargs)
def setter_funct(self, tfs_data):
return _setter(self, tfs_data, *args, **kwargs)
return property(fget=getter_funct, fset=setter_funct)
def _define_property_two_planes(args, kwargs) -> tuple:
x_kwargs = dict(kwargs)
y_kwargs = dict(kwargs)
x_kwargs["plane"] = "x"
y_kwargs["plane"] = "y"
def x_getter_funct(self):
return _getter(self, *args, **x_kwargs)
def x_setter_funct(self, tfs_data):
return _setter(self, tfs_data, *args, **x_kwargs)
def y_getter_funct(self):
return _getter(self, *args, **y_kwargs)
def y_setter_funct(self, tfs_data):
return _setter(self, tfs_data, *args, **y_kwargs)
property_x = property(fget=x_getter_funct, fset=x_setter_funct)
property_y = property(fget=y_getter_funct, fset=y_setter_funct)
return property_x, property_y
def _getter(self, *args, **kwargs):
filename = self.get_filename(*args, **kwargs)
return self._load_tfs(filename)
def _setter(self, value, *args, **kwargs):
try:
filename, data_frame = self.write_to(value, *args, **kwargs)
self._write_tfs(filename, data_frame)
except NotImplementedError:
filename = self.get_filename(*args, **kwargs)
self._write_tfs(filename, value)
class _MaybeCall:
"""
Handles the maybe_call feature of the TfsCollection.
This class defines the `maybe_call` attribute in the instances of `TfsCollection`. To avoid
repetitive try / except blocks, this class allows you to do:
``meas.maybe_call.beta["x"](some_funct, args, kwargs)``.
If the requested file is available, the call is equivalent to: ``some_funct(args, kwargs)``, if
not then no function is called and the program continues.
"""
def __init__(self, parent):
self.parent = parent
def __getattr__(self, attr):
return _MaybeCall.MaybeCallAttr(self.parent, attr)
class MaybeCallAttr:
def __init__(self, parent, attr):
self.parent = parent
self.attr = attr
def __getitem__(self, item):
return _MaybeCall.MaybeCallAttr(self.parent, self.attr + "_" + item)
def __call__(self, function_call, *args, **kwargs):
try:
tfs_file = getattr(self.parent, self.attr)
except IOError:
return lambda funct: None # Empty function
return function_call(tfs_file, *args, **kwargs)
| 36.048951
| 104
| 0.63967
|
c895e9888b8318f5541cb699e275cdf33e70e2bf
| 3,379
|
py
|
Python
|
tests/test_datasets.py
|
duranbe/doccano-transformer
|
1d19de73bf3b0cebadb31508e3a1864b4e1a6414
|
[
"MIT"
] | null | null | null |
tests/test_datasets.py
|
duranbe/doccano-transformer
|
1d19de73bf3b0cebadb31508e3a1864b4e1a6414
|
[
"MIT"
] | null | null | null |
tests/test_datasets.py
|
duranbe/doccano-transformer
|
1d19de73bf3b0cebadb31508e3a1864b4e1a6414
|
[
"MIT"
] | null | null | null |
import json
from collections import defaultdict
from pathlib import Path
from unittest import TestCase
from doccano_transformer.datasets import NERDataset
class TestNERDataset(TestCase):
@classmethod
def setUp(self):
current_path = Path(__file__).parent
self.shared_datadir = current_path / 'data' / 'labeling'
def test_from_labeling_text_label_jsonl_to_conll2003(self):
src_path = self.shared_datadir / 'labeling_text_label.jsonl'
filename = 'labeling_text_label.conll2003'
users = defaultdict(list)
d = NERDataset.from_jsonl(filepath=src_path)
for x in d.to_conll2003(str.split):
users[x['user']].append(x['data'])
for user, data in users.items():
with open(self.shared_datadir / (filename + f'.user{user}')) as f:
expected = f.read()
self.assertEqual(''.join(data), expected)
def test_from_labeling_jsonl_to_conll2003(self):
src_path = self.shared_datadir / 'labeling.jsonl'
filename = 'labeling.conll2003'
users = defaultdict(list)
d = NERDataset.from_jsonl(filepath=src_path)
for x in d.to_conll2003(str.split):
users[x['user']].append(x['data'])
for user, data in users.items():
with open(self.shared_datadir / (filename + f'.user{user}')) as f:
expected = f.read()
self.assertEqual(''.join(data), expected)
def test_from_labeling_text_label_jsonl_to_spacy(self):
src_path = self.shared_datadir / 'labeling_text_label.jsonl'
filename = 'labeling_text_label.spacy'
users = defaultdict(list)
d = NERDataset.from_jsonl(filepath=src_path)
for x in d.to_spacy(str.split):
users[x['user']].append(x['data'])
for user, data in users.items():
with open(self.shared_datadir / (filename + f'.user{user}')) as f:
expected = json.load(f)
# print(data)
self.assertEqual(data, expected)
def test_from_labeling_jsonl_to_spacy(self):
src_path = self.shared_datadir / 'labeling.jsonl'
filename = 'labeling.spacy'
users = defaultdict(list)
d = NERDataset.from_jsonl(filepath=src_path)
for x in d.to_spacy(str.split):
users[x['user']].append(x['data'])
for user, data in users.items():
with open(self.shared_datadir / (filename + f'.user{user}')) as f:
expected = json.load(f)
self.assertEqual(data, expected)
def test_from_labeling_spacy_to_jsonl(self):
src_path = self.shared_datadir / 'labeling.spacy.user1'
filename = self.shared_datadir / 'labeling.user1.jsonl'
d = NERDataset.from_spacy(filepath=src_path)
data = next(d.to_jsonl())
with open(filename) as f:
expected = json.load(f)
self.assertEqual(data, dict(expected))
def test_from_multi_labeling_spacy_to_jsonl(self):
src_path = self.shared_datadir / 'labeling.spacy.user2'
filename = self.shared_datadir / 'labeling.user2.jsonl'
d = NERDataset.from_spacy(filepath=src_path,user_id=2)
data = d.to_jsonl()
with open(filename) as f:
expected = map(json.loads,f)
for d,e in zip(data,expected):
self.assertEqual(d, dict(e))
| 39.752941
| 78
| 0.632436
|
9da11d6a5cfe0a6d9b039592994da66dc5b32b31
| 5,325
|
py
|
Python
|
labconfig.py
|
labscript-suite-bitbucket-archive/spielman-labscript_utils_mainline--forked-from--labscript_suite-labscript_utils
|
62811f8d31b40d79307029d7283c01d71213307c
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
labconfig.py
|
labscript-suite-bitbucket-archive/spielman-labscript_utils_mainline--forked-from--labscript_suite-labscript_utils
|
62811f8d31b40d79307029d7283c01d71213307c
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
labconfig.py
|
labscript-suite-bitbucket-archive/spielman-labscript_utils_mainline--forked-from--labscript_suite-labscript_utils
|
62811f8d31b40d79307029d7283c01d71213307c
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
#####################################################################
# #
# labconfig.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the labscript suite (see #
# http://labscriptsuite.org) and is licensed under the Simplified #
# BSD License. See the license.txt file in the root of the project #
# for the full license. #
# #
#####################################################################
import sys
import os
import socket
import subprocess
try:
import configparser
except ImportError:
# Python 2
import ConfigParser as configparser
from labscript_utils import labscript_suite_install_dir
# Look for a 'labconfig' folder in the labscript install directory:
if labscript_suite_install_dir is not None:
config_prefix = os.path.join(labscript_suite_install_dir, 'labconfig')
else:
# No labscript install directory found? Revert to system defaults
if os.name == 'nt':
config_prefix = os.path.abspath(r'C:\labconfig')
else:
config_prefix = os.path.join(os.getenv('HOME'),'labconfig')
if not os.path.exists(config_prefix):
config_prefix='/etc/labconfig/'
if not os.path.exists(config_prefix):
message = (r"Couldn't find labconfig folder. Please ensure it exists. " +
r"If the labscript suite is installed, labconfig must be <labscript_suite_install_dir>/labconfig/. " +
r"If the labscript suite is not installed, then C:\labconfig\ is checked on Windows, " +
r" and $HOME/labconfig/ then /etc/labconfig/ checked on unix.")
raise IOError(message)
config_prefix = os.path.abspath(config_prefix)
if sys.platform == 'darwin':
hostname = subprocess.check_output(['scutil', '--get', 'LocalHostName']).decode('utf8').strip()
else:
hostname = socket.gethostname()
default_config_path = os.path.join(config_prefix,'%s.ini'%hostname)
class LabConfig(configparser.SafeConfigParser):
NoOptionError = configparser.NoOptionError
NoSectionError = configparser.NoSectionError
def __init__(self,config_path=default_config_path,required_params={},defaults={}):
if isinstance(config_path,list):
self.config_path = config_path[0]
else:
self.config_path = config_path
self.file_format = ""
for section, options in required_params.items():
self.file_format += "[%s]\n"%section
for option in options:
self.file_format += "%s = <value>\n"%option
# If the folder doesn't exist, create it
if not os.path.exists(os.path.dirname(self.config_path)):
os.mkdir(os.path.dirname(self.config_path))
# If the file doesn't exist, create it
if not os.path.exists(self.config_path):
with open(self.config_path,'a+') as f:
f.write(self.file_format)
# Load the config file
configparser.SafeConfigParser.__init__(self,defaults)
self.read(config_path) #read all files in the config path if it is a list (self.config_path only contains one string)
try:
for section, options in required_params.items():
for option in options:
self.get(section,option)
except configparser.NoOptionError as e:
raise Exception('The experiment configuration file located at %s does not have the required keys. Make sure the config file containes the following structure:\n%s'%(config_path, self.file_format))
# Overwrite the add_section method to only attempt to add a section if it doesn't
# exist. We don't ever care whether a section exists or not, only that it does exist
# when we try and save an attribute into it.
def add_section(self,section):
# Create the group if it doesn't exist
if not section.lower() == 'default' and not self.has_section(section):
configparser.SafeConfigParser.add_section(self, section)
# Overwrite the set method so that it adds the section if it doesn't exist,
# and immediately saves the data to the file (to avoid data loss on program crash)
def set(self, section, option, value):
self.add_section(section)
configparser.SafeConfigParser.set(self,section,option,value)
self.save()
# Overwrite the remove section function so that it immediately saves the change to disk
def remove_section(self,section):
configparser.SafeConfigParser.remove_section(self,section)
self.save()
# Overwrite the remove option function so that it immediately saves the change to disk
def remove_option(self,section,option):
configparser.SafeConfigParser.remove_option(self,section,option)
self.save()
# Provide a convenience method to save the contents of the ConfigParser to disk
def save(self):
with open(self.config_path, 'w+') as f:
self.write(f)
| 44.375
| 208
| 0.614648
|
efff774df8b7ee79428772a0bc7b7b7e7b6b162a
| 111
|
py
|
Python
|
PythonFiles/MCQGame2.py
|
IamVaibhavsar/Python_Files
|
283d73929a3e11955c71499407c4f8bff56e4273
|
[
"MIT"
] | null | null | null |
PythonFiles/MCQGame2.py
|
IamVaibhavsar/Python_Files
|
283d73929a3e11955c71499407c4f8bff56e4273
|
[
"MIT"
] | null | null | null |
PythonFiles/MCQGame2.py
|
IamVaibhavsar/Python_Files
|
283d73929a3e11955c71499407c4f8bff56e4273
|
[
"MIT"
] | 1
|
2019-07-26T15:25:21.000Z
|
2019-07-26T15:25:21.000Z
|
class Question:
def __init__ (self,prompt,answer):
self.prompt=prompt
self.answer=answer
| 27.75
| 39
| 0.648649
|
427c79481b49b26fccbefbec4fd11d29e61aa903
| 10,700
|
py
|
Python
|
docs/conf.py
|
zmunk/scrapy
|
015b71d89fd641bfd926608a2490a963cf4fb40d
|
[
"BSD-3-Clause"
] | 2
|
2021-05-18T02:27:56.000Z
|
2021-05-18T11:11:12.000Z
|
docs/conf.py
|
zmunk/scrapy
|
015b71d89fd641bfd926608a2490a963cf4fb40d
|
[
"BSD-3-Clause"
] | 4
|
2020-06-06T03:16:43.000Z
|
2020-07-27T16:43:50.000Z
|
docs/conf.py
|
zmunk/scrapy
|
015b71d89fd641bfd926608a2490a963cf4fb40d
|
[
"BSD-3-Clause"
] | null | null | null |
# Scrapy documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 24 12:02:52 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from datetime import datetime
from os import path
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(path.join(path.dirname(__file__), "_ext"))
sys.path.insert(0, path.dirname(path.dirname(__file__)))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'hoverxref.extension',
'notfound.extension',
'scrapydocs',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Scrapy'
copyright = '2008–{}, Scrapy developers'.format(datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
import scrapy
version = '.'.join(map(str, scrapy.version_info[:2]))
release = scrapy.__version__
except ImportError:
version = ''
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
exclude_patterns = ['build']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# List of Sphinx warnings that will not be raised
suppress_warnings = ['epub.unknown_project_files']
# Options for HTML output
# -----------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# Add path to the RTD explicitly to robustify builds (otherwise might
# fail in a clean Debian build env)
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'scrapydoc.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scrapydoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Scrapy.tex', 'Scrapy Documentation',
'Scrapy developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Options for the linkcheck builder
# ---------------------------------
# A list of regular expressions that match URIs that should not be checked when
# doing a linkcheck build.
linkcheck_ignore = [
'http://localhost:\d+', 'http://hg.scrapy.org',
'http://directory.google.com/'
]
# Options for the Coverage extension
# ----------------------------------
coverage_ignore_pyobjects = [
# Contract’s add_pre_hook and add_post_hook are not documented because
# they should be transparent to contract developers, for whom pre_hook and
# post_hook should be the actual concern.
r'\bContract\.add_(pre|post)_hook$',
# ContractsManager is an internal class, developers are not expected to
# interact with it directly in any way.
r'\bContractsManager\b$',
# For default contracts we only want to document their general purpose in
# their __init__ method, the methods they reimplement to achieve that purpose
# should be irrelevant to developers using those contracts.
r'\w+Contract\.(adjust_request_args|(pre|post)_process)$',
# Methods of downloader middlewares are not documented, only the classes
# themselves, since downloader middlewares are controlled through Scrapy
# settings.
r'^scrapy\.downloadermiddlewares\.\w*?\.(\w*?Middleware|DownloaderStats)\.',
# Base classes of downloader middlewares are implementation details that
# are not meant for users.
r'^scrapy\.downloadermiddlewares\.\w*?\.Base\w*?Middleware',
# Private exception used by the command-line interface implementation.
r'^scrapy\.exceptions\.UsageError',
# Methods of BaseItemExporter subclasses are only documented in
# BaseItemExporter.
r'^scrapy\.exporters\.(?!BaseItemExporter\b)\w*?\.',
# Extension behavior is only modified through settings. Methods of
# extension classes, as well as helper functions, are implementation
# details that are not documented.
r'^scrapy\.extensions\.[a-z]\w*?\.[A-Z]\w*?\.', # methods
r'^scrapy\.extensions\.[a-z]\w*?\.[a-z]', # helper functions
# Never documented before, and deprecated now.
r'^scrapy\.item\.DictItem$',
r'^scrapy\.linkextractors\.FilteringLinkExtractor$',
# Implementation detail of LxmlLinkExtractor
r'^scrapy\.linkextractors\.lxmlhtml\.LxmlParserLinkExtractor',
]
# Options for the InterSphinx extension
# -------------------------------------
intersphinx_mapping = {
'attrs': ('https://www.attrs.org/en/stable/', None),
'coverage': ('https://coverage.readthedocs.io/en/stable', None),
'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),
'itemloaders': ('https://itemloaders.readthedocs.io/en/latest/', None),
'pytest': ('https://docs.pytest.org/en/latest', None),
'python': ('https://docs.python.org/3', None),
'sphinx': ('https://www.sphinx-doc.org/en/master', None),
'tox': ('https://tox.readthedocs.io/en/latest', None),
'twisted': ('https://twistedmatrix.com/documents/current', None),
'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),
}
# Options for sphinx-hoverxref options
# ------------------------------------
hoverxref_auto_ref = True
hoverxref_role_types = {
"class": "tooltip",
"confval": "tooltip",
"hoverxref": "tooltip",
"mod": "tooltip",
"ref": "tooltip",
}
hoverxref_roles = ['command', 'reqmeta', 'setting', 'signal']
def setup(app):
app.connect('autodoc-skip-member', maybe_skip_member)
def maybe_skip_member(app, what, name, obj, skip, options):
if not skip:
# autodocs was generating a text "alias of" for the following members
# https://github.com/sphinx-doc/sphinx/issues/4422
return name in {'default_item_class', 'default_selector_class'}
return skip
| 33.333333
| 82
| 0.710467
|
7c698d83895af2aaee69a2efa69f75aafaf715ae
| 4,771
|
py
|
Python
|
scripts/django_tests_settings.py
|
PeterJCLaw/django-stubs
|
29ac1c3017f764f58bb341e43d11f399076c631a
|
[
"MIT"
] | null | null | null |
scripts/django_tests_settings.py
|
PeterJCLaw/django-stubs
|
29ac1c3017f764f58bb341e43d11f399076c631a
|
[
"MIT"
] | 1
|
2021-06-15T16:24:10.000Z
|
2021-06-15T16:24:10.000Z
|
scripts/django_tests_settings.py
|
p7g/django-stubs
|
fe63fd1d2b7dd47cfc111a7753e45370e9497a2e
|
[
"MIT"
] | null | null | null |
SECRET_KEY = '1'
SITE_ID = 1
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
test_modules = [
'absolute_url_overrides',
'admin_autodiscover',
'admin_changelist',
'admin_checks',
'admin_custom_urls',
'admin_default_site',
'admin_docs',
'admin_filters',
'admin_inlines',
'admin_ordering',
'admin_registration',
'admin_scripts',
'admin_utils',
'admin_views',
'admin_widgets',
'aggregation',
'aggregation_regress',
'annotations',
'app_loading',
'apps',
'auth_tests',
'backends',
'base',
'bash_completion',
'basic',
'builtin_server',
'bulk_create',
'cache',
'check_framework',
'choices',
'conditional_processing',
'constraints',
'contenttypes_tests',
'context_processors',
'csrf_tests',
'custom_columns',
'custom_lookups',
'custom_managers',
'custom_methods',
'custom_migration_operations',
'custom_pk',
'datatypes',
'dates',
'datetimes',
'db_functions',
'db_typecasts',
'db_utils',
'dbshell',
'decorators',
'defer',
'defer_regress',
'delete',
'delete_regress',
'deprecation',
'dispatch',
'distinct_on_fields',
'empty',
'expressions',
'expressions_case',
'expressions_window',
'extra_regress',
'field_deconstruction',
'field_defaults',
'field_subclassing',
'file_storage',
'file_uploads',
'files',
'filtered_relation',
'fixtures',
'fixtures_model_package',
'fixtures_regress',
'flatpages_tests',
'force_insert_update',
'foreign_object',
'forms_tests',
'from_db_value',
'generic_inline_admin',
'generic_relations',
'generic_relations_regress',
'generic_views',
'get_earliest_or_latest',
'get_object_or_404',
'get_or_create',
'gis_tests',
'handlers',
'httpwrappers',
'humanize_tests',
'i18n',
'import_error_package',
'indexes',
'inline_formsets',
'inspectdb',
'introspection',
'invalid_models_tests',
'known_related_objects',
'logging_tests',
'lookup',
'm2m_and_m2o',
'm2m_intermediary',
'm2m_multiple',
'm2m_recursive',
'm2m_regress',
'm2m_signals',
'm2m_through',
'm2m_through_regress',
'm2o_recursive',
'mail',
'managers_regress',
'many_to_many',
'many_to_one',
'many_to_one_null',
'max_lengths',
'messages_tests',
'middleware',
'middleware_exceptions',
'migrate_signals',
'migration_test_data_persistence',
'migrations',
'migrations2',
'model_fields',
'model_forms',
'model_formsets',
'model_formsets_regress',
'model_indexes',
'model_inheritance',
'model_inheritance_regress',
'model_meta',
'model_options',
'model_package',
'model_regress',
'modeladmin',
'multiple_database',
'mutually_referential',
'nested_foreign_keys',
'no_models',
'null_fk',
'null_fk_ordering',
'null_queries',
'one_to_one',
'or_lookups',
'order_with_respect_to',
'ordering',
'pagination',
'postgres_tests',
'prefetch_related',
'project_template',
'properties',
'proxy_model_inheritance',
'proxy_models',
'queries',
'queryset_pickle',
'raw_query',
'redirects_tests',
'requests',
'reserved_names',
'resolve_url',
'responses',
'reverse_lookup',
'save_delete_hooks',
'schema',
'select_for_update',
'select_related',
'select_related_onetoone',
'select_related_regress',
'serializers',
'servers',
'sessions_tests',
'settings_tests',
'shell',
'shortcuts',
'signals',
'signed_cookies_tests',
'signing',
'sitemaps_tests',
'sites_framework',
'sites_tests',
'staticfiles_tests',
'str',
'string_lookup',
'swappable_models',
'syndication_tests',
'template_backends',
'template_loader',
'template_tests',
'test_client',
'test_client_regress',
'test_exceptions',
'test_runner',
'test_runner_apps',
'test_utils',
'timezones',
'transaction_hooks',
'transactions',
'unmanaged_models',
'update',
'update_only_fields',
'urlpatterns',
'urlpatterns_reverse',
'user_commands',
'utils_tests',
'validation',
'validators',
'version',
'view_tests',
'wsgi',
]
invalid_apps = {
'import_error_package',
}
for app in invalid_apps:
test_modules.remove(app)
INSTALLED_APPS += test_modules
| 20.743478
| 50
| 0.62817
|
36ba1659bbf981dfb6fb0298922e53a87e01b93e
| 16,592
|
py
|
Python
|
tensorflow/python/data/kernel_tests/group_by_window_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/data/kernel_tests/group_by_window_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/data/kernel_tests/group_by_window_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.group_by_window()`."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
# NOTE(mrry): These tests are based on the tests in bucket_ops_test.py.
# Currently, they use a constant batch size, though should be made to use a
# different batch size per key.
class GroupByWindowTest(test_base.DatasetTestBase, parameterized.TestCase):
def _dynamicPad(self, bucket, window, window_size):
# TODO(mrry): To match `tf.contrib.training.bucket()`, implement a
# generic form of padded_batch that pads every component
# dynamically and does not rely on static shape information about
# the arguments.
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket),
window.padded_batch(
32, (tensor_shape.TensorShape([]), tensor_shape.TensorShape(
[None]), tensor_shape.TensorShape([3])))))
@combinations.generate(test_base.default_test_combinations())
def testSingleBucket(self):
def _map_fn(v):
return (v, array_ops.fill([v],
v), array_ops.fill([3],
string_ops.as_string(v)))
input_dataset = dataset_ops.Dataset.from_tensor_slices(
math_ops.range(32)).map(_map_fn)
bucketed_dataset = input_dataset.group_by_window(
key_func=lambda x, y, z: 0,
reduce_func=lambda k, bucket: self._dynamicPad(k, bucket, 32),
window_size=32)
get_next = self.getNext(bucketed_dataset)
which_bucket, bucketed_values = self.evaluate(get_next())
self.assertEqual(0, which_bucket)
expected_scalar_int = np.arange(32, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values[0])
self.assertAllEqual(expected_unk_int64, bucketed_values[1])
self.assertAllEqual(expected_vec3_str, bucketed_values[2])
@combinations.generate(test_base.default_test_combinations())
def testEvenOddBuckets(self):
def _map_fn(v):
return (v, array_ops.fill([v],
v), array_ops.fill([3],
string_ops.as_string(v)))
input_dataset = dataset_ops.Dataset.from_tensor_slices(
math_ops.range(64)).map(_map_fn)
bucketed_dataset = input_dataset.group_by_window(
key_func=lambda x, y, z: math_ops.cast(x % 2, dtypes.int64),
reduce_func=lambda k, bucket: self._dynamicPad(k, bucket, 32),
window_size=32)
get_next = self.getNext(bucketed_dataset)
# Get two minibatches (one containing even values, one containing odds)
which_bucket_even, bucketed_values_even = self.evaluate(get_next())
which_bucket_odd, bucketed_values_odd = self.evaluate(get_next())
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values_even))
self.assertEqual(3, len(bucketed_values_odd))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, which_bucket_even)
self.assertAllEqual(1, which_bucket_odd)
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(3 *
[np.arange(0, 32 * 2, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_even[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_even[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[2])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_odd[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[2])
@combinations.generate(test_base.default_test_combinations())
def testEvenOddBucketsFilterOutAllOdd(self):
def _map_fn(v):
return {
"x": v,
"y": array_ops.fill([v], v),
"z": array_ops.fill([3], string_ops.as_string(v))
}
def _dynamic_pad_fn(bucket, window, _):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket),
window.padded_batch(
32, {
"x": tensor_shape.TensorShape([]),
"y": tensor_shape.TensorShape([None]),
"z": tensor_shape.TensorShape([3])
})))
input_dataset = dataset_ops.Dataset.from_tensor_slices(math_ops.range(
128)).map(_map_fn).filter(lambda d: math_ops.equal(d["x"] % 2, 0))
bucketed_dataset = input_dataset.group_by_window(
key_func=lambda d: math_ops.cast(d["x"] % 2, dtypes.int64),
reduce_func=lambda k, bucket: _dynamic_pad_fn(k, bucket, 32),
window_size=32)
get_next = self.getNext(bucketed_dataset)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
which_bucket0, bucketed_values_even0 = self.evaluate(get_next())
which_bucket1, bucketed_values_even1 = self.evaluate(get_next())
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, which_bucket0)
self.assertAllEqual(0, which_bucket1)
self.assertAllEqual(
np.arange(0, 64, 2, dtype=np.int64), bucketed_values_even0["x"])
self.assertAllEqual(
np.arange(64, 128, 2, dtype=np.int64), bucketed_values_even1["x"])
@combinations.generate(test_base.default_test_combinations())
def testDynamicWindowSize(self):
components = np.arange(100).astype(np.int64)
# Key fn: even/odd
# Reduce fn: batches of 5
# Window size fn: even=5, odd=10
def window_size_func(key):
window_sizes = constant_op.constant([5, 10], dtype=dtypes.int64)
return window_sizes[key]
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = dataset.group_by_window(
key_func=lambda x: x % 2,
reduce_func=lambda _, xs: xs.batch(20),
window_size=None,
window_size_func=window_size_func)
get_next = self.getNext(dataset)
with self.assertRaises(errors.OutOfRangeError):
batches = 0
while True:
result = self.evaluate(get_next())
is_even = all(x % 2 == 0 for x in result)
is_odd = all(x % 2 == 1 for x in result)
self.assertTrue(is_even or is_odd)
expected_batch_size = 5 if is_even else 10
self.assertEqual(expected_batch_size, result.shape[0])
batches += 1
self.assertEqual(batches, 15)
@combinations.generate(test_base.default_test_combinations())
def testSimple(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: x * x)
dataset = dataset.group_by_window(
key_func=lambda x: x % 2,
reduce_func=lambda _, xs: xs.batch(4),
window_size=4)
get_next = self.getNext(dataset)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
result = self.evaluate(get_next())
self.assertTrue(
all(x % 2 == 0 for x in result) or all(x % 2 == 1) for x in result)
counts.append(result.shape[0])
self.assertEqual(len(components), sum(counts))
num_full_batches = len([c for c in counts if c == 4])
self.assertGreaterEqual(num_full_batches, 24)
self.assertTrue(all(c == 4 for c in counts[:num_full_batches]))
@combinations.generate(test_base.default_test_combinations())
def testImmediateOutput(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0],
dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = dataset.repeat(-1)
dataset = dataset.group_by_window(
key_func=lambda x: x % 3,
reduce_func=lambda _, xs: xs.batch(4),
window_size=4)
get_next = self.getNext(dataset)
# The input is infinite, so this test demonstrates that:
# 1. We produce output without having to consume the entire input,
# 2. Different buckets can produce output at different rates, and
# 3. For deterministic input, the output is deterministic.
for _ in range(3):
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next()))
self.assertAllEqual([2, 2, 2, 2], self.evaluate(get_next()))
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testSmallGroups(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = dataset.group_by_window(
key_func=lambda x: x % 2,
reduce_func=lambda _, xs: xs.batch(4),
window_size=4)
get_next = self.getNext(dataset)
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next()))
# The small outputs at the end are deterministically produced in key
# order.
self.assertAllEqual([0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1], self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testEmpty(self):
dataset = dataset_ops.Dataset.range(4).group_by_window(
key_func=lambda _: 0, reduce_func=lambda _, xs: xs, window_size=0)
get_next = self.getNext(dataset)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Window size must be greater than zero, but got 0."):
print(self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testReduceFuncError(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
def reduce_func(_, xs):
# Introduce an incorrect padded shape that cannot (currently) be
# detected at graph construction time.
return xs.padded_batch(
4,
padded_shapes=(tensor_shape.TensorShape([]),
constant_op.constant([5], dtype=dtypes.int64) * -1))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = dataset.map(lambda x: (x, ops.convert_to_tensor([x * x])))
dataset = dataset.group_by_window(
key_func=lambda x, _: x % 2, reduce_func=reduce_func, window_size=32)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testConsumeWindowDatasetMoreThanOnce(self):
components = np.random.randint(50, size=(200,)).astype(np.int64)
def reduce_func(key, window):
# Apply two different kinds of padding to the input: tight
# padding, and quantized (to a multiple of 10) padding.
return dataset_ops.Dataset.zip((
window.padded_batch(
4, padded_shapes=tensor_shape.TensorShape([None])),
window.padded_batch(
4, padded_shapes=ops.convert_to_tensor([(key + 1) * 10])),
))
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = dataset.map(
lambda x: array_ops.fill([math_ops.cast(x, dtypes.int32)], x))
# pylint: disable=g-long-lambda
dataset = dataset.group_by_window(
key_func=lambda x: math_ops.cast(
array_ops.shape(x)[0] // 10, dtypes.int64),
reduce_func=reduce_func,
window_size=4)
get_next = self.getNext(dataset)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
tight_result, multiple_of_10_result = self.evaluate(get_next())
self.assertEqual(0, multiple_of_10_result.shape[1] % 10)
self.assertAllEqual(tight_result,
multiple_of_10_result[:, :tight_result.shape[1]])
counts.append(tight_result.shape[0])
self.assertEqual(len(components), sum(counts))
@combinations.generate(test_base.default_test_combinations())
def testShortCircuit(self):
dataset = dataset_ops.Dataset.range(10).group_by_window(
key_func=lambda x: x,
reduce_func=lambda _, window: window.batch(1),
window_size=1)
self.assertDatasetProduces(
dataset, expected_output=[[i] for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testGroupByWindowWithAutotune(self):
dataset = dataset_ops.Dataset.range(1000).group_by_window(
key_func=lambda x: x // 10,
reduce_func=lambda key, window: dataset_ops.Dataset.from_tensors(key),
window_size=4)
dataset = dataset.map(lambda x: x + 1, num_parallel_calls=-1)
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testGroupByWindowCardinality(self):
dataset = dataset_ops.Dataset.range(1).repeat().group_by_window(
key_func=lambda x: x % 2,
reduce_func=lambda key, window: dataset_ops.Dataset.from_tensors(key),
window_size=4)
self.assertEqual(self.evaluate(dataset.cardinality()), dataset_ops.INFINITE)
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors(np.int64(42)).group_by_window(
key_func=lambda x: x,
reduce_func=lambda key, window: window.batch(4),
window_size=4,
name="group_by_window")
self.assertDatasetProduces(dataset, [[42]])
class GroupByWindowCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, components):
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(-1)
dataset = dataset.group_by_window(
key_func=lambda x: x % 3,
reduce_func=lambda _, xs: xs.batch(4),
window_size=4)
return dataset
@combinations.generate(test_base.default_test_combinations())
def test(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0],
dtype=np.int64)
self.verify_unused_iterator(
lambda: self._build_dataset(components),
num_outputs=12,
verify_exhausted=False)
self.verify_multiple_breaks(
lambda: self._build_dataset(components),
num_outputs=12,
verify_exhausted=False)
self.verify_reset_restored_iterator(
lambda: self._build_dataset(components),
num_outputs=12,
verify_exhausted=False)
if __name__ == "__main__":
test.main()
| 41.069307
| 80
| 0.679785
|
cff9b2b3f1159bc656b4d3ba4f21d01d2274f087
| 1,064
|
py
|
Python
|
account/migrations/0002_userinfo.py
|
ZiYin-ss/python-blog2
|
78f30bb969fc0d2f0ddb7c1221bb2210387f2c09
|
[
"MIT"
] | null | null | null |
account/migrations/0002_userinfo.py
|
ZiYin-ss/python-blog2
|
78f30bb969fc0d2f0ddb7c1221bb2210387f2c09
|
[
"MIT"
] | null | null | null |
account/migrations/0002_userinfo.py
|
ZiYin-ss/python-blog2
|
78f30bb969fc0d2f0ddb7c1221bb2210387f2c09
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-10-21 18:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school', models.CharField(blank=True, max_length=100)),
('company', models.CharField(blank=True, max_length=100)),
('profession', models.CharField(blank=True, max_length=100)),
('address', models.CharField(blank=True, max_length=100)),
('aboutme', models.TextField(blank=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.689655
| 121
| 0.62406
|
565cb246133194bb38b5a48e36746987ff513d69
| 873
|
py
|
Python
|
home/views.py
|
ViniciusBessa/django_hortifruti
|
d98ce422c32eecfc3c776207a562b8a880595088
|
[
"MIT"
] | 1
|
2021-12-02T01:04:41.000Z
|
2021-12-02T01:04:41.000Z
|
home/views.py
|
ViniciusBessa/django_hortifruti
|
d98ce422c32eecfc3c776207a562b8a880595088
|
[
"MIT"
] | null | null | null |
home/views.py
|
ViniciusBessa/django_hortifruti
|
d98ce422c32eecfc3c776207a562b8a880595088
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views import View
from core.models import Produto, dados_comuns
class HomeView(View):
"""
View que renderiza a home do projeto
Attribute template_name: Recebe o template que deve ser renderizado pelo view
Attribute context: Um dicionário que será utilizado no template
"""
template_name = 'home.html'
context = {}
def get(self, request, *args, **kwargs):
self.context.update(dados_comuns(request.user))
produtos_mais_vendidos = Produto.mais_vendidos()
produtos_categorias = Produto.receber_produtos(self.context.get('categorias'), 4, 2)
self.context.update({
'produtos_categorias': produtos_categorias,
'produtos_mais_vendidos': produtos_mais_vendidos,
})
return render(request, self.template_name, self.context)
| 30.103448
| 92
| 0.702176
|
b1b20daa07905e92fd8a5f9057bd73a4cd02ce81
| 838
|
py
|
Python
|
170522_visualizing_one_million_cells/cluster.py
|
Sangram-Rout/scanpy_usage
|
38030605c74c755167302caa431cf3ff62de646f
|
[
"BSD-3-Clause"
] | 52
|
2017-07-25T20:30:02.000Z
|
2022-03-18T07:22:45.000Z
|
170522_visualizing_one_million_cells/cluster.py
|
Sangram-Rout/scanpy_usage
|
38030605c74c755167302caa431cf3ff62de646f
|
[
"BSD-3-Clause"
] | 16
|
2017-08-11T12:26:10.000Z
|
2020-12-10T23:12:00.000Z
|
170522_visualizing_one_million_cells/cluster.py
|
Sangram-Rout/scanpy_usage
|
38030605c74c755167302caa431cf3ff62de646f
|
[
"BSD-3-Clause"
] | 49
|
2017-09-10T22:39:31.000Z
|
2022-03-06T08:04:32.000Z
|
import matplotlib
matplotlib.use('Agg') # plotting backend compatible with screen
import sys
import scanpy.api as sc
sc.settings.verbosity = 2 # show logging output
sc.settings.autosave = True # save figures, do not show them
sc.settings.set_figure_params(dpi=300) # set sufficiently high resolution for saving
filename = sys.argv[1] # read filename from command line
def basic_analysis(filename):
adata = sc.read_10x_h5(filename)
sc.pp.recipe_zheng17(adata)
sc.pp.neighbors(adata)
sc.tl.louvain(adata)
sc.tl.paga(adata)
sc.tl.umap(adata)
sc.tl.rank_genes_groups(adata, 'louvain')
adata.write('./write/result.h5ad')
# plotting
sc.pl.paga(adata)
sc.pl.umap(adata, color='louvain')
sc.pl.rank_genes_groups(adata, save='.pdf')
if __name__ == "__main__":
basic_analysis(filename)
| 28.896552
| 85
| 0.71957
|
b9d889ee1044fa5b5ee94d8c8cd78a594fdf590b
| 8,954
|
py
|
Python
|
tools/build.py
|
NeuroSystemsLLC/brainflow
|
1178bc767eb55819148e8be25592fa24a91868db
|
[
"MIT"
] | null | null | null |
tools/build.py
|
NeuroSystemsLLC/brainflow
|
1178bc767eb55819148e8be25592fa24a91868db
|
[
"MIT"
] | null | null | null |
tools/build.py
|
NeuroSystemsLLC/brainflow
|
1178bc767eb55819148e8be25592fa24a91868db
|
[
"MIT"
] | null | null | null |
import argparse
import platform
import subprocess
import os
from shutil import rmtree
def run_command(cmd, cwd=None):
print('Running command: %s' % (' '.join(cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
while True:
p.stdout.flush()
line = p.stdout.read(1)
if line:
print(line.decode('utf-8', 'ignore'), end='')
else:
if p.poll() != None:
break
if p.returncode != 0:
raise ValueError('Process finished with error code %d' % p.returncode)
class Generator:
def __init__(self, priority):
self.priority = priority
def get_generator(self):
raise NotImplementedError
def get_arch(self):
raise NotImplementedError
def __lt__(self, other):
return self.priority < other.priority
class VS2019(Generator):
def __init__(self):
super(VS2019, self).__init__(10)
def get_generator(self):
return 'Visual Studio 16 2019'
def get_arch(self):
return 'x64'
class VS2017(Generator):
def __init__(self):
super(VS2017, self).__init__(5)
def get_generator(self):
return 'Visual Studio 15 2017 Win64'
def get_arch(self):
return None
def get_win_generators():
result = list()
try:
output = subprocess.check_output(['C:\\Program Files (x86)\\Microsoft Visual Studio\\Installer\\vswhere.exe', '-property', 'displayName'])
output = output.decode('utf-8', 'ignore')
print(output)
if '2019' in output:
result.append(VS2019())
if '2017' in output:
result.append(VS2017())
except BaseException as e:
print('No Visual Studio Installations Found')
return sorted(result, reverse=True)
def check_deps():
try:
cmake = subprocess.check_output(['cmake', '--version'])
except BaseException as e:
print('You need to install CMake first. Run: python -m pip install cmake')
raise e
def prepare_args():
parser = argparse.ArgumentParser()
cur_folder = os.path.dirname(os.path.abspath(__file__))
bluetooth_default = False
if platform.system() == 'Windows':
bluetooth_default = True
parser.add_argument('--oymotion', dest='oymotion', action='store_true')
parser.add_argument('--no-oymotion', dest='oymotion', action='store_false')
parser.set_defaults(oymotion=True)
parser.add_argument('--msvc-runtime', type=str, choices=['static', 'dynamic'], help='how to link MSVC runtime', required=False, default='static')
generators = get_win_generators()
if not generators:
parser.add_argument('--generator', type=str, help='generator for CMake', required=True)
parser.add_argument('--arch', type=str, help='arch for CMake', required=False)
else:
generator = generators[0]
parser.add_argument('--generator', type=str, help='generator for CMake', required=False, default=generator.get_generator())
if generator.get_arch() is not None:
parser.add_argument('--arch', type=str, choices=['x64', 'Win32', 'ARM', 'ARM64'], help='arch for CMake', required=False, default=generator.get_arch())
else:
parser.add_argument('--arch', type=str, choices=['x64', 'Win32', 'ARM', 'ARM64'], help='arch for CMake', required=False)
parser.add_argument('--cmake-system-version', type=str, help='system version for win', required=False, default='8.1')
elif platform.system() == 'Darwin':
macos_ver = platform.mac_ver()[0]
versions = [int(x) for x in macos_ver.split('.')]
if versions[0] >= 11:
parser.add_argument('--cmake-osx-architectures', type=str, help='archs for osx', required=False, default='"arm64;x86_64"')
else:
parser.add_argument('--cmake-osx-architectures', type=str, help='archs for osx', required=False)
parser.add_argument('--cmake-osx-deployment-target', type=str, help='min supported version of osx', required=False, default='10.13')
parser.add_argument('--use-libftdi', action='store_true')
try:
output = subprocess.check_output(['ninja', '--version'])
print(output)
parser.add_argument('--generator', type=str, help='CMake generator', required=False, default='Ninja')
except BaseException:
parser.add_argument('--generator', type=str, help='CMake generator', required=False, default='')
print('Ninja is a recommended generator for MacOS and is not found')
else:
parser.add_argument('--generator', type=str, help='CMake generator', required=False)
parser.add_argument('--use-libftdi', action='store_true')
parser.add_argument('--build-dir', type=str, help='build folder', required=False, default=os.path.join(cur_folder, '..', 'build'))
parser.add_argument('--cmake-install-prefix', type=str, help='installation folder, full path', required=False, default=os.path.join(cur_folder, '..', 'installed'))
parser.add_argument('--use-openmp', action='store_true')
parser.add_argument('--build-bluetooth', action='store_true')
parser.add_argument('--warnings-as-errors', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--clear-build-dir', action='store_true')
parser.add_argument('--num-jobs', type=int, help='num jobs to run in parallel', required=False, default=4)
parser.add_argument('--bluetooth', dest='bluetooth', action='store_true')
parser.add_argument('--no-bluetooth', dest='bluetooth', action='store_false')
parser.set_defaults(bluetooth=bluetooth_default)
args = parser.parse_args()
return args
def config(args):
if args.clear_build_dir:
if os.path.exists(args.build_dir):
rmtree(args.build_dir)
if os.path.exists(args.cmake_install_prefix):
rmtree(args.cmake_install_prefix)
if not os.path.exists(args.build_dir):
os.makedirs(args.build_dir)
cur_folder = os.path.dirname(os.path.abspath(__file__))
brainflow_root_folder = os.path.join(cur_folder, '..')
cmd_config = list()
cmd_config.append('cmake')
cmd_config.append('-DCMAKE_INSTALL_PREFIX=%s' % args.cmake_install_prefix)
if hasattr(args, 'cmake_system_version') and args.cmake_system_version:
cmd_config.append('-DCMAKE_SYSTEM_VERSION=%s' % args.cmake_system_version)
if hasattr(args, 'use_libftdi') and args.use_libftdi:
cmd_config.append('-DUSE_LIBFTDI=ON')
if args.warnings_as_errors:
cmd_config.append('-DWARNINGS_AS_ERRORS=ON')
if args.use_openmp:
cmd_config.append('-DUSE_OPENMP=ON')
if hasattr(args, 'oymotion') and args.oymotion:
cmd_config.append('-DBUILD_OYMOTION_SDK=ON')
if hasattr(args, 'build_bluetooth') and args.build_bluetooth:
cmd_config.append('-DBUILD_BLUETOOTH=ON')
if hasattr(args, 'cmake_osx_architecture') and args.cmake_osx_architecture:
cmd_config.append('-DCMAKE_OSX_ARCHITECTURES=%s' % args.cmake_osx_architecture)
if hasattr(args, 'cmake_osx_deployment_target') and args.cmake_osx_deployment_target:
cmd_config.append('-DCMAKE_OSX_DEPLOYMENT_TARGET=%s' % args.cmake_osx_deployment_target)
if hasattr(args, 'generator') and args.generator:
cmd_config.extend(['-G', args.generator])
if hasattr(args, 'arch') and args.arch:
cmd_config.extend(['-A', args.arch])
if hasattr(args, 'msvc_runtime'):
cmd_config.append('-DMSVC_RUNTIME=%s' % (args.msvc_runtime))
if platform.system() != 'Windows':
if hasattr(args, 'debug') and args.debug:
cmd_config.append('-DCMAKE_BUILD_TYPE=Debug')
else:
cmd_config.append('-DCMAKE_BUILD_TYPE=Release')
if hasattr(args, 'bluetooth') and args.bluetooth:
cmd_config.append('-DBUILD_BLUETOOTH=ON')
cmd_config.append(brainflow_root_folder)
run_command(cmd_config, args.build_dir)
def build(args):
if platform.system() == 'Windows':
config = 'Release'
if args.debug:
config = 'Debug'
cmd_build = ['cmake', '--build', '.', '--target', 'install', '--config', config, '-j', str(args.num_jobs), '--parallel', str(args.num_jobs)]
run_command(cmd_build, cwd=args.build_dir)
else:
if hasattr(args, 'generator') and args.generator and args.generator.lower() == 'ninja':
run_command(['ninja', '-j', str(args.num_jobs)], cwd=args.build_dir)
run_command(['ninja', 'install'], cwd=args.build_dir)
else:
run_command(['make', '-j', str(args.num_jobs)], cwd=args.build_dir)
run_command(['make', 'install'], cwd=args.build_dir)
def main():
check_deps()
args = prepare_args()
config(args)
build(args)
if __name__ == '__main__':
main()
| 42.436019
| 167
| 0.655573
|
647914e929c2f6455e5e69370806d9172d4475be
| 4,435
|
py
|
Python
|
micro_cata/.ipynb_checkpoints/param_gamma-checkpoint.py
|
amrodrig72/micro_cata
|
7f216b0847e479e40af52801a5390d6f930085a4
|
[
"MIT"
] | null | null | null |
micro_cata/.ipynb_checkpoints/param_gamma-checkpoint.py
|
amrodrig72/micro_cata
|
7f216b0847e479e40af52801a5390d6f930085a4
|
[
"MIT"
] | null | null | null |
micro_cata/.ipynb_checkpoints/param_gamma-checkpoint.py
|
amrodrig72/micro_cata
|
7f216b0847e479e40af52801a5390d6f930085a4
|
[
"MIT"
] | null | null | null |
#Expected file: '../data/gardner_mt_catastrophe_only_tubulin.csv'
def param_gamma (file):
#Import packages
import pandas as pd; import numpy as np; import bebi103
import math; import scipy; import scipy.stats as st; import numba; import tqdm; import warnings
import iqplot; import bokeh.io; import bokeh.plotting; import colorcet; import holoviews as hv
bokeh.io.output_notebook()
hv.extension("bokeh")
#import data
data3 = pd.read_csv(file, comment= "#")
#reorganize data frame to increasing concentration
df = data3[['7 uM', '9 uM', '10 uM', '12 uM', '14 uM']]
#Rename columns so that they start at 0 and end at 4 for looping
df = df.rename(columns={"7 uM": 0, "9 uM": 1, "10 uM": 2, "12 uM": 3, "14 uM": 4, })
#Melt dataframe
df = pd.melt(df, value_name='Time[s]')
#Rename column varible to Concentration
df.rename(columns = {'variable':'Concentration'}, inplace = True)
#Drop NaN rows
df = df.dropna()
#Reset our index
df = df.reset_index(drop = True)
#Define function to get log likelihood for the gamma distribution
def log_like_gamma(params, t):
"""Log likelihood for a Gamma distribution."""
alpha, beta = params
if alpha <= 0 or beta <= 0:
return -np.inf
return st.gamma.logpdf(t, alpha, loc=0, scale=1/beta).sum()
##Now we can make the funciton to find the parameter estimates for gamma distribution model
def gamma_mle(t):
# Initial guess
t_bar = np.mean(t)
beta_guess = t_bar / np.var(t)
alpha_guess = t_bar * beta_guess
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We need to crank up the tolerance for this one
res = scipy.optimize.minimize(
lambda params, t: -log_like_gamma(params, t),
(alpha_guess, beta_guess),
args=(t,),
method="powell",
tol=1e-7,
)
if res.success:
return res.x
else:
raise RuntimeError('Convergence failed with message', res.message)
# Bootstrapping functions
rg = np.random.default_rng(10000)
def draw_bs_sample(data):
"""Draw a bootstrap sample from a 1D data set."""
return rg.choice(data, size=len(data))
def draw_bs_reps_mle(mle_fun, data, args=(), size=1000, progress_bar=False):
"""Draw nonparametric bootstrap replicates of maximum likelihood estimator.
Returns
-------
output : numpy array
Bootstrap replicates of MLEs.
"""
if progress_bar:
iterator = tqdm.tqdm(range(size))
else:
iterator = range(size)
return np.array([mle_fun(draw_bs_sample(data), *args) for _ in iterator])
#Create an empty dataframe containing our Alpha and Beta parameter values for the different concentrations of tubulin
results = np.empty((len(df["Concentration"].unique()), 2))
for Concentration, g in df.groupby("Concentration"):
results[Concentration] = gamma_mle(g["Time[s]"].values)
df_mle = pd.DataFrame(
data=results,
columns=["Alpha", "Beta"],
)
#Now we can plot our parameter estimates and their confidence intervals for the five difference concentrations of
##tubulin added
colors = colorcet.b_glasbey_category10
reps = {}
for Concentration, g in tqdm.tqdm(df.groupby("Concentration")):
# Extract time points
t = g["Time[s]"].values
# Generate bootstrap replicates
reps[Concentration] = draw_bs_reps_mle(gamma_mle, t, size=5000, progress_bar = False,)
p = bokeh.plotting.figure(
x_axis_label="Alpha",
y_axis_label="Beta",
frame_height=400,
frame_width=400,
)
for Concentration, bs_reps in reps.items():
# Extract contour lines in Alpha-Beta plane.
x_line, y_line = bebi103.viz.contour_lines_from_samples(
x=bs_reps[:, -2], y=bs_reps[:, -2], levels=[0.95]
)
# Plot the contour lines with fill
for x, y in zip(x_line, y_line):
p.line(x, y, line_width=2, color=colors[Concentration], legend_label=f'Concentration {Concentration}')
p.patch(x, y, fill_color=colors[Concentration], alpha=0.3)
p.legend.location = "top_left"
return bokeh.io.show(p)
| 34.648438
| 121
| 0.623224
|
bdf2fb866102f04138515d031595c2f26b9911e9
| 4,394
|
py
|
Python
|
tatk/nlu/bert/model.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | null | null | null |
tatk/nlu/bert/model.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | null | null | null |
tatk/nlu/bert/model.py
|
yqzhangthu/tatk
|
4d27e89604a33f19f1c7b8fe5dc92d4ba6c6f10a
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from pytorch_pretrained_bert import BertModel
class BertNLU(nn.Module):
def __init__(self, model_config, intent_dim, tag_dim, DEVICE, intent_weight=None):
super(BertNLU, self).__init__()
self.DEVICE = DEVICE
self.bert = BertModel.from_pretrained(model_config['pre-trained'])
for p in self.parameters():
p.requires_grad = False
self.intent_dim = intent_dim
self.tag_dim = tag_dim
self.dropout = nn.Dropout(model_config['dropout'])
self.intent_classifier = nn.Linear(self.bert.config.hidden_size, self.intent_dim)
self.tag_classifier = nn.Linear(self.bert.config.hidden_size, self.tag_dim)
nn.init.xavier_uniform_(self.intent_classifier.weight)
nn.init.xavier_uniform_(self.tag_classifier.weight)
self.tag_loss = torch.nn.CrossEntropyLoss()
self.intent_loss = torch.nn.BCEWithLogitsLoss(pos_weight=intent_weight)
if model_config['optimizer'] == 'Adam':
self.optim = torch.optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=model_config['lr'])
else:
self.optim = torch.optim.SGD(filter(lambda p: p.requires_grad, self.parameters()), lr=model_config['lr'])
def forward(self, word_seq_tensor, word_mask_tensor):
self.bert.eval()
word_seq_tensor = word_seq_tensor.to(self.DEVICE)
word_mask_tensor = word_mask_tensor.to(self.DEVICE)
with torch.no_grad():
encoder_layers, pooled_output = self.bert(input_ids=word_seq_tensor,
attention_mask=word_mask_tensor,
output_all_encoded_layers=False)
# encoder_layers = [batch_size, sequence_length, hidden_size]
# pooled_output = [batch_size, hidden_size]
encoder_layers = self.dropout(encoder_layers)
pooled_output = self.dropout(pooled_output)
tag_logits = self.tag_classifier(encoder_layers)
intent_logits = self.intent_classifier(pooled_output)
# tag_logits = [batch_size, sequence_length, tag_dim]
# intent_logits = [batch_size, intent_dim]
return intent_logits, tag_logits
def train_batch(self, word_seq_tensor, tag_seq_tensor, intent_tensor, word_mask_tensor, tag_mask_tensor):
self.optim.zero_grad()
word_seq_tensor = word_seq_tensor.to(self.DEVICE)
tag_seq_tensor = tag_seq_tensor.to(self.DEVICE)
intent_tensor = intent_tensor.to(self.DEVICE, torch.float)
word_mask_tensor = word_mask_tensor.to(self.DEVICE)
tag_mask_tensor = tag_mask_tensor.to(self.DEVICE)
intent_logits, tag_logits = self.forward(word_seq_tensor, word_mask_tensor)
active_tag_loss = tag_mask_tensor.view(-1) == 1
active_tag_logits = tag_logits.view(-1, self.tag_dim)[active_tag_loss]
active_tag_labels = tag_seq_tensor.view(-1)[active_tag_loss]
intent_loss = self.intent_loss(intent_logits, intent_tensor)/intent_tensor.size(0)
tag_loss = self.tag_loss(active_tag_logits, active_tag_labels)/intent_tensor.size(0)
loss = intent_loss + tag_loss
loss.backward()
self.optim.step()
return intent_loss.item(), tag_loss.item(), loss.item()
def eval_batch(self, word_seq_tensor, tag_seq_tensor, intent_tensor, word_mask_tensor, tag_mask_tensor):
with torch.no_grad():
word_seq_tensor = word_seq_tensor.to(self.DEVICE)
tag_seq_tensor = tag_seq_tensor.to(self.DEVICE)
intent_tensor = intent_tensor.to(self.DEVICE, torch.float)
word_mask_tensor = word_mask_tensor.to(self.DEVICE)
tag_mask_tensor = tag_mask_tensor.to(self.DEVICE)
intent_logits, tag_logits = self.forward(word_seq_tensor, word_mask_tensor)
active_tag_loss = tag_mask_tensor.view(-1) == 1
active_tag_logits = tag_logits.view(-1, self.tag_dim)[active_tag_loss]
active_tag_labels = tag_seq_tensor.view(-1)[active_tag_loss]
intent_loss = self.intent_loss(intent_logits, intent_tensor)/intent_tensor.size(0)
tag_loss = self.tag_loss(active_tag_logits, active_tag_labels)/intent_tensor.size(0)
loss = intent_loss + tag_loss
return intent_loss.item(), tag_loss.item(), loss.item()
| 52.939759
| 118
| 0.686391
|
7e810db2476ab0a46b7ec51bb35a6c9f0d241d61
| 3,768
|
py
|
Python
|
Machine-Learning-with-Python- From-LM-to-DL/Unit 2. Non Linear Classification, Linear Regression, Colaborative Filtering/mnist/part1/test_environment6.py
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | 1
|
2020-09-29T17:29:34.000Z
|
2020-09-29T17:29:34.000Z
|
Machine-Learning-with-Python- From-LM-to-DL/Unit 2. Non Linear Classification, Linear Regression, Colaborative Filtering/mnist/part1/test_environment6.py
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | null | null | null |
Machine-Learning-with-Python- From-LM-to-DL/Unit 2. Non Linear Classification, Linear Regression, Colaborative Filtering/mnist/part1/test_environment6.py
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | null | null | null |
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("..")
from utils import *
from linear_regression import *
from svm import *
from softmax import *
from features import *
from kernel import *
##Given the following parameters
X = np.array([
[1, 2, 3],
[2, 4, 6],
[3, 6, 9],
[4, 8, 12],
])
n_components = 2
##Understanding Principal Components Analysis. These two functions were given.
def center_data(X):
feature_means = X.mean(axis=0)
return (X - feature_means), feature_means
def principal_components(centered_data):
scatter_matrix = np.dot(centered_data.transpose(), centered_data)
eigen_values, eigen_vectors = np.linalg.eig(scatter_matrix)
# Re-order eigenvectors by eigenvalue magnitude:
idx = eigen_values.argsort()[::-1]
eigen_values = eigen_values[idx]
eigen_vectors = eigen_vectors[:, idx]
return eigen_vectors
def project_onto_PC(X, pcs, n_components, feature_means):
x_centered, feature_means = center_data(X)
n,m = X.shape
X_pca = np.dot(x_centered, pcs)
return X_pca[0:n, 0:n_components]
##Trying the MINST regression using the given components
train_x, train_y, test_x, test_y = get_MNIST_data()
def center_data(X):
feature_means = X.mean(axis=0)
return (X - feature_means)
def principal_components(centered_data):
scatter_matrix = np.dot(centered_data.transpose(), centered_data)
eigen_values, eigen_vectors = np.linalg.eig(scatter_matrix)
# Re-order eigenvectors by eigenvalue magnitude:
idx = eigen_values.argsort()[::-1]
eigen_values = eigen_values[idx]
eigen_vectors = eigen_vectors[:, idx]
return eigen_vectors
#principal_components(center_data(train_x))
def project_onto_PC(X, n_components):
data_centered = center_data(X)
pcs= principal_components(data_centered)
x_centered = center_data(X)
n,m = X.shape
X_pca = np.dot(x_centered, pcs)
return X_pca[0:n, 0:n_components]
test_x_pca = project_onto_PC(test_x, n_components)
train_x_pca= project_onto_PC(train_x, n_components)
def run_softmax_on_MNIST_using_PCA(temp_parameter=1):
n_components = 18
train_x, train_y, test_x, test_y = get_MNIST_data()
train_x_pca = project_onto_PC(train_x, n_components)
test_x_pca = project_onto_PC(test_x, n_components)
theta, cost_function_history = softmax_regression(train_x_pca, train_y,
temp_parameter, alpha=0.3,
lambda_factor=1.0e-4, k=10,
num_iterations=150)
plot_cost_function_over_time(cost_function_history)
test_error = compute_test_error(test_x_pca,
test_y, theta,
temp_parameter)
return test_error
print('softmax PCA using test_error=', run_softmax_on_MNIST_using_PCA(temp_parameter=1))
##Dimensionality Reduction using Principal Component Analysis-- Other Approach
train_x_centered, feature_means = center_data(X)
pcs = principal_components(train_x_centered)
##Mean of each feature
feature_means= np.mean(X, axis=0)
##Center of the data
data_center= X - feature_means
##Covariance of the matrix in order to check the covariance score for each column with respect to the actual column
cov_matrix= np.cov(data_center)
eigenval, eigenvec = np.linalg.eig(cov_matrix)
significance = [np.abs(i)/np.sum(eigenval) for i in eigenval]
#Plotting the Cumulative Summation of the Explained Variance
plt.figure()
plt.plot(np.cumsum(significance))
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Pulsar Dataset Explained Variance')
plt.show()
| 30.387097
| 115
| 0.701168
|
fd096cb33e7c6df37e6df5db7bbc0435218879ae
| 12,372
|
py
|
Python
|
boqn/dropwave_test_runner.py
|
RaulAstudillo06/BOQN
|
c5b2bb9e547e2489f856ebf86c749fb24eba1022
|
[
"MIT"
] | null | null | null |
boqn/dropwave_test_runner.py
|
RaulAstudillo06/BOQN
|
c5b2bb9e547e2489f856ebf86c749fb24eba1022
|
[
"MIT"
] | null | null | null |
boqn/dropwave_test_runner.py
|
RaulAstudillo06/BOQN
|
c5b2bb9e547e2489f856ebf86c749fb24eba1022
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
from copy import copy
import numpy as np
import torch
torch.set_default_dtype(torch.float64)
import botorch
from botorch.settings import debug
debug._set_state(True)
# Get script directory
script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
project_path = script_dir[:-5]
# Problem setup
from dropwave import Dropwave
dropwave = Dropwave()
input_dim = 2
test_problem = 'dropwave'
results_folder = project_path + '/experiments_results/' + test_problem + '/'
# Define network structure
from dag import DAG
n_nodes = 2
dag_as_list = [[]]
dag_as_list.append([0])
dag= DAG(dag_as_list)
active_input_indices = [[0, 1], []]
main_input_indices = copy(active_input_indices)
# EI-QN especifics
from botorch.acquisition.objective import GenericMCObjective
from network_gp import NetworkGP
from botorch.acquisition.monte_carlo import qExpectedImprovement
from botorch.sampling.samplers import SobolQMCNormalSampler
from posterior_mean import PosteriorMean
g_mapping = lambda Y: Y[..., -1]
g = GenericMCObjective(g_mapping)
def output_for_EIQN(simulator_output):
return simulator_output
MC_SAMPLES = 512
BATCH_SIZE = 1
# EI especifics
from botorch.models import FixedNoiseGP
from gpytorch.mlls import ExactMarginalLogLikelihood
from botorch.acquisition import ExpectedImprovement
from botorch.acquisition import PosteriorMean as GPPosteriorMean
from botorch import fit_gpytorch_model
from botorch.models.transforms import Standardize
def output_for_EI(simulator_output):
return simulator_output[...,[-1]]
# KG especifics
from botorch.acquisition import qKnowledgeGradient
def optimize_KG_and_get_suggested_point(acq_func):
"""Optimizes the KG acquisition function, and returns a new candidate."""
candidate, _ = custom_optimize_acqf(
acq_function=acq_func,
bounds=bounds,
q=BATCH_SIZE,
num_restarts=10*input_dim,
raw_samples=100*input_dim,
#options={'disp': True, 'iprint': 101},
)
new_x = candidate.detach()
return new_x
# Random especifics
def update_random_observations(best_Random):
"""Simulates a random policy by taking a the current list of best values observed randomly,
drawing a new random point, observing its value, and updating the list.
"""
x = torch.rand([1, input_dim])
simulator_output = dropwave.evaluate(x)
fx = output_for_EI(simulator_output)
next_Random_best = fx.max().item()
best_Random.append(max(best_Random[-1], next_Random_best))
return best_Random
# GP model training
def initialize_model(X, Y, Yvar=None):
# define model
model = FixedNoiseGP(X, Y, torch.ones(Y.shape) * 1e-6, outcome_transform=Standardize(m=Y.shape[-1], batch_shape=torch.Size([])))
mll = ExactMarginalLogLikelihood(model.likelihood, model)
return mll, model
# Initial data generation
def generate_initial_X(n, seed=None):
# generate training data
if seed is not None:
old_state = torch.random.get_rng_state()
torch.manual_seed(seed)
X = torch.rand([n, input_dim])
torch.random.set_rng_state(old_state)
else:
X = torch.rand([n, input_dim])
return X
# Acquisition function optimization
from botorch.optim import optimize_acqf
from custom_optimizer import custom_optimize_acqf
bounds = torch.tensor([[0. for i in range(input_dim)], [1. for i in range(input_dim)]])
def optimize_acqf_and_get_suggested_point(acq_func, posterior_mean):
"""Optimizes the acquisition function, and returns a new candidate."""
baseline_candidate, _ = optimize_acqf(
acq_function=posterior_mean,
bounds=bounds,
q=BATCH_SIZE,
num_restarts=10*input_dim,
raw_samples=100*input_dim,
)
baseline_candidate = baseline_candidate.detach().view(torch.Size([1, BATCH_SIZE, input_dim]))
candidate, acq_value = custom_optimize_acqf(
acq_function=acq_func,
bounds=bounds,
q=BATCH_SIZE,
num_restarts=10*input_dim,
raw_samples=100*input_dim,
baseline_initial_conditions=baseline_candidate,
#options={'disp': True, 'iprint': 101},
)
baseline_acq_value = acq_func.forward(baseline_candidate)[0].detach()
print('Test begins')
print(acq_value)
print(baseline_acq_value)
print('Test ends')
if baseline_acq_value > acq_value:
print('Baseline candidate was best found.')
new_x = baseline_candidate
elif baseline_acq_value == acq_value:
p = np.random.rand(1)
if p > 0.5:
new_x = baseline_candidate
else:
new_x = candidate
else:
new_x = candidate
new_x = new_x.detach().view([BATCH_SIZE, input_dim])
return new_x
# Run BO loop times
N_BATCH = 100
if not os.path.exists(results_folder):
os.makedirs(results_folder)
if not os.path.exists(results_folder + 'X/'):
os.makedirs(results_folder + 'X/')
if not os.path.exists(results_folder + 'Y/'):
os.makedirs(results_folder + 'Y/')
if not os.path.exists(results_folder + 'running_times/'):
os.makedirs(results_folder + 'running_times/')
run_EIQN = False
run_EI = False
run_KG = True
run_Random = False
if len(sys.argv) == 3:
first_trial = int(sys.argv[1])
last_trial = int(sys.argv[2])
elif len(sys.argv) == 2:
first_trial = int(sys.argv[1])
last_trial = int(sys.argv[1])
if len(sys.argv) > 1:
for trial in range(first_trial, last_trial + 1):
# call helper functions to generate initial training data and initialize model
X = generate_initial_X(n=2*(input_dim+1), seed=trial)
simulator_output_at_X = dropwave.evaluate(X)
if run_EIQN:
best_observed_EIQN = []
running_times_EIQN = []
X_EIQN = X.clone()
fX_EIQN = simulator_output_at_X
best_value_EIQN = g_mapping(fX_EIQN).max().item()
best_observed_EIQN.append(best_value_EIQN)
if run_EI:
best_observed_EI = []
running_times_EI = []
X_EI = X.clone()
fX_EI = output_for_EI(simulator_output_at_X)
mll_EI, model_EI = initialize_model(X_EI, fX_EI)
best_value_EI = fX_EI.max().item()
best_observed_EI.append(best_value_EI)
if run_KG:
best_observed_KG = []
running_times_KG = []
X_KG = X.clone()
fX_KG = output_for_EI(simulator_output_at_X)
mll_KG, model_KG = initialize_model(X_KG, fX_KG)
best_value_KG = fX_KG.max().item()
best_observed_KG.append(best_value_KG)
if run_Random:
best_observed_Random = []
running_times_Random = []
best_observed_Random.append(output_for_EI(simulator_output_at_X).max().item())
# run N_BATCH rounds of BayesOpt after the initial random batch
for iteration in range(1, N_BATCH + 1):
print('Experiment: ' + test_problem)
print('Replication id: ' + str(trial))
print('Iteration: ' + str(iteration))
if run_EIQN:
t0 = time.time()
model_EIQN = NetworkGP(dag, X_EIQN, fX_EIQN, active_input_indices=active_input_indices, main_input_indices=main_input_indices)
qmc_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
EIQN = qExpectedImprovement(
model=model_EIQN,
best_f=best_value_EIQN,
sampler=qmc_sampler,
objective=g,
)
posterior_mean_EIQN = PosteriorMean(
model=model_EIQN,
sampler=qmc_sampler,
objective=g,
)
new_x_EIQN = optimize_acqf_and_get_suggested_point(EIQN, posterior_mean_EIQN)
print('Candidate suggested by the EIQN policy: ' + str(new_x_EIQN))
t1 = time.time()
running_times_EIQN.append(t1 - t0)
new_fx_EIQN = dropwave.evaluate(new_x_EIQN)
X_EIQN = torch.cat([X_EIQN, new_x_EIQN], 0)
fX_EIQN = torch.cat([fX_EIQN, new_fx_EIQN], 0)
best_value_EIQN = g_mapping(fX_EIQN).max().item()
best_observed_EIQN.append(best_value_EIQN)
print('Best value so far found the EIQN policy: ' + str(best_value_EIQN) )
np.savetxt(results_folder + test_problem + '_EIQN_' + str(trial) + '.txt', np.atleast_1d(best_observed_EIQN))
np.savetxt(results_folder + 'running_times/' + test_problem + '_rt_EIQN_' + str(trial) + '.txt', np.atleast_1d(running_times_EIQN))
np.savetxt(results_folder + 'X/' + test_problem + '_X_EIQN_' + str(trial) + '.txt', X_EIQN.numpy())
np.savetxt(results_folder + 'Y/' + test_problem + '_Y_EIQN_' + str(trial) + '.txt', fX_EIQN.numpy())
if run_EI:
t0 = time.time()
fit_gpytorch_model(mll_EI)
EI = ExpectedImprovement(model=model_EI, best_f=best_value_EI)
posterior_mean_EI = GPPosteriorMean(model=model_EI)
new_x_EI = optimize_acqf_and_get_suggested_point(EI, posterior_mean_EI)
mll_EI, model_EI = initialize_model(X_EI, fX_EI)
t1 = time.time()
running_times_EI.append(t1 - t0)
new_fx_EI = output_for_EI(dropwave.evaluate(new_x_EI))
X_EI = torch.cat([X_EI, new_x_EI], 0)
fX_EI = torch.cat([fX_EI, new_fx_EI], 0)
best_value_EI = fX_EI.max().item()
best_observed_EI.append(best_value_EI)
print('Best value so far found the EI policy: ' + str(best_value_EI) )
np.savetxt(results_folder + test_problem + '_EI_' + str(trial) + '.txt', np.atleast_1d(best_observed_EI))
np.savetxt(results_folder + 'running_times/' + test_problem + '_rt_EI_' + str(trial) + '.txt', np.atleast_1d(running_times_EI))
np.savetxt(results_folder + 'X/' + test_problem + '_X_EI_' + str(trial) + '.txt', X_EI.numpy())
np.savetxt(results_folder + 'Y/' + test_problem + '_Y_EI_' + str(trial) + '.txt', fX_EI.numpy())
if run_KG:
t0 = time.time()
fit_gpytorch_model(mll_KG)
KG = qKnowledgeGradient(model=model_KG, num_fantasies=8)
new_x_KG = optimize_KG_and_get_suggested_point(KG)
mll_KG, model_KG = initialize_model(X_KG, fX_KG)
t1 = time.time()
running_times_KG.append(t1 - t0)
new_fx_KG = output_for_EI(dropwave.evaluate(new_x_KG))
X_KG = torch.cat([X_KG, new_x_KG], 0)
fX_KG = torch.cat([fX_KG, new_fx_KG], 0)
best_value_KG = fX_KG.max().item()
best_observed_KG.append(best_value_KG)
print('Best value so far found the KG policy: ' + str(best_value_KG) )
np.savetxt(results_folder + test_problem + '_KG_' + str(trial) + '.txt', np.atleast_1d(best_observed_KG))
np.savetxt(results_folder + 'running_times/' + test_problem + '_rt_KG_' + str(trial) + '.txt', np.atleast_1d(running_times_KG))
np.savetxt(results_folder + 'X/' + test_problem + '_X_KG_' + str(trial) + '.txt', X_KG.numpy())
np.savetxt(results_folder + 'Y/' + test_problem + '_Y_KG_' + str(trial) + '.txt', fX_KG.numpy())
if run_Random:
best_observed_Random = update_random_observations(best_observed_Random)
print('Best value so far found the Random policy: ' + str(best_observed_Random[-1]))
np.savetxt(results_folder + test_problem + '_Random_' + str(trial) + '.txt', np.atleast_1d(best_observed_Random))
print('')
| 38.303406
| 147
| 0.620272
|
4c8c2371406849ab59119bb6c6541ede81cf303c
| 7,673
|
py
|
Python
|
onshape.py
|
PartyParrot9000/APITools
|
34b3faf1e96102e27d162973497e135296971c69
|
[
"MIT"
] | null | null | null |
onshape.py
|
PartyParrot9000/APITools
|
34b3faf1e96102e27d162973497e135296971c69
|
[
"MIT"
] | null | null | null |
onshape.py
|
PartyParrot9000/APITools
|
34b3faf1e96102e27d162973497e135296971c69
|
[
"MIT"
] | null | null | null |
'''
onshape
======
Provides access to the Onshape REST API
'''
import os
import random
import string
import json
import hmac
import hashlib
import base64
import urllib.request
import urllib.parse
import urllib.error
import datetime
import requests
from urllib.parse import urlparse
from urllib.parse import parse_qs
class Onshape():
'''
Provides access to the Onshape REST API.
Attributes:
- stack (str): Base URL
- creds (str, default='./sketchgraphs/onshape/creds/creds.json'): Credentials location
- logging (bool, default=True): Turn logging on or off
'''
def __init__(self, stack, creds='./creds.json', logging=True):
'''
Instantiates an instance of the Onshape class. Reads credentials from a JSON file
of this format:
{
"http://cad.onshape.com": {
"access_key": "YOUR KEY HERE",
"secret_key": "YOUR KEY HERE"
},
etc... add new object for each stack to test on
}
The creds.json file should be stored in the root project folder; optionally,
you can specify the location of a different file.
Args:
- stack (str): Base URL
- creds (str, default='./sketchgraphs/onshape/creds/creds.json'): Credentials location
'''
if not os.path.isfile(creds):
raise IOError('%s is not a file' % creds)
with open(creds) as f:
try:
stacks = json.load(f)
if stack in stacks:
self._url = stack
self._access_key = stacks[stack]['access_key'].encode(
'utf-8')
self._secret_key = stacks[stack]['secret_key'].encode(
'utf-8')
self._logging = logging
else:
raise ValueError('specified stack not in file')
except TypeError:
raise ValueError('%s is not valid json' % creds)
if self._logging:
print('onshape instance created: url = %s, access key = %s' % (
self._url, self._access_key))
def _make_nonce(self):
'''
Generate a unique ID for the request, 25 chars in length
Returns:
- str: Cryptographic nonce
'''
chars = string.digits + string.ascii_letters
nonce = ''.join(random.choice(chars) for i in range(25))
if self._logging:
print('nonce created: %s' % nonce)
return nonce
def _make_auth(self, method, date, nonce, path, query={}, ctype='application/json'):
'''
Create the request signature to authenticate
Args:
- method (str): HTTP method
- date (str): HTTP date header string
- nonce (str): Cryptographic nonce
- path (str): URL pathname
- query (dict, default={}): URL query string in key-value pairs
- ctype (str, default='application/json'): HTTP Content-Type
'''
query = urllib.parse.urlencode(query)
hmac_str = (method + '\n' + nonce + '\n' + date + '\n' + ctype + '\n' + path +
'\n' + query + '\n').lower().encode('utf-8')
signature = base64.b64encode(
hmac.new(self._secret_key, hmac_str, digestmod=hashlib.sha256).digest())
auth = 'On ' + self._access_key.decode('utf-8') + \
':HmacSHA256:' + signature.decode('utf-8')
if self._logging:
print({
'query': query,
'hmac_str': hmac_str,
'signature': signature,
'auth': auth
})
return auth
def _make_headers(self, method, path, query={}, headers={}):
'''
Creates a headers object to sign the request
Args:
- method (str): HTTP method
- path (str): Request path, e.g. /api/documents. No query string
- query (dict, default={}): Query string in key-value format
- headers (dict, default={}): Other headers to pass in
Returns:
- dict: Dictionary containing all headers
'''
date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
nonce = self._make_nonce()
ctype = headers.get(
'Content-Type') if headers.get('Content-Type') else 'application/json'
auth = self._make_auth(method, date, nonce, path,
query=query, ctype=ctype)
req_headers = {
'Content-Type': 'application/json',
'Date': date,
'On-Nonce': nonce,
'Authorization': auth,
'User-Agent': 'Onshape Python Sample App',
'Accept': 'application/json'
}
# add in user-defined headers
for h in headers:
req_headers[h] = headers[h]
return req_headers
def request(self, method, path, query={}, headers={}, body={}, base_url=None, timeout=None, check_status=True):
'''
Issues a request to Onshape
Args:
- method (str): HTTP method
- path (str): Path e.g. /api/documents/:id
- query (dict, default={}): Query params in key-value pairs
- headers (dict, default={}): Key-value pairs of headers
- body (dict, default={}): Body for POST request
- base_url (str, default=None): Host, including scheme and port (if different from creds file)
- timeout (float, default=None): Timeout to use with requests.request().
- check_status (bool, default=True): Raise exception if response status code is unsuccessful.
Returns:
- requests.Response: Object containing the response from Onshape
'''
req_headers = self._make_headers(method, path, query, headers)
if base_url is None:
base_url = self._url
url = base_url + path + '?' + urllib.parse.urlencode(query)
if self._logging:
print(body)
print(req_headers)
print('request url: ' + url)
is_binary = False
if 'Accept' in headers and headers['Accept'] == 'application/vnd.onshape.v1+octet-stream':
is_binary = True
# only parse as json string if we have to
body = json.dumps(body) if type(body) == dict else body
res = requests.request(
method, url, headers=req_headers, data=body, allow_redirects=False, stream=True,
timeout=timeout)
if res.status_code == 307:
location = urlparse(res.headers["Location"])
querystring = parse_qs(location.query)
if self._logging:
print('request redirected to: ' + location.geturl())
new_query = {}
new_base_url = location.scheme + '://' + location.netloc
for key in querystring:
# won't work for repeated query params
new_query[key] = querystring[key][0]
return self.request(method, location.path, query=new_query, headers=headers, base_url=new_base_url)
elif not 200 <= res.status_code <= 206:
if self._logging:
print('request failed, details: ' + res.text)
else:
if self._logging:
if is_binary:
print('request succeeded')
else:
print('request succeeded, details: ' + res.text)
if check_status:
res.raise_for_status()
return res
| 33.50655
| 115
| 0.549589
|
5399301142bba7f00de32fea340036aa796a3a52
| 10,198
|
py
|
Python
|
examples/moocdl/moocdl.py
|
Myfanily123456/DecryptLogin
|
bf11dde3430b3b64ed59d3487803baac48685d4d
|
[
"MIT"
] | 1
|
2022-02-23T11:58:57.000Z
|
2022-02-23T11:58:57.000Z
|
examples/moocdl/moocdl.py
|
Myfanily123456/DecryptLogin
|
bf11dde3430b3b64ed59d3487803baac48685d4d
|
[
"MIT"
] | null | null | null |
examples/moocdl/moocdl.py
|
Myfanily123456/DecryptLogin
|
bf11dde3430b3b64ed59d3487803baac48685d4d
|
[
"MIT"
] | null | null | null |
'''
Function:
MOOC下载器
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import os
import re
import time
import click
import random
import shutil
import argparse
import subprocess
from tqdm import tqdm
from DecryptLogin import login
from urllib.parse import urlencode
'''命令行参数解析'''
def parseArgs():
parser = argparse.ArgumentParser(description='MOOC下载器')
parser.add_argument('--url', dest='url', help='课程链接, 例如: https://www.icourse163.org/course/SJTU-1003381021', type=str, required=True)
args = parser.parse_args()
return args
'''MOOC下载器'''
class MOOCDL():
def __init__(self, username='s_sharing@126.com', password='123456', **kwargs):
self.infos_return, self.session = self.login(username, password)
'''运行'''
def run(self, url):
# 从课程主页面获取信息
url = url.replace('learn/', 'course/')
response = self.session.get(url)
term_id = re.findall(r'termId : "(\d+)"', response.text)[0]
course_name = ' - '.join(re.findall(r'name:"(.+)"', response.text))
course_name = self.filterBadCharacter(course_name)
course_id = re.findall(r'https?://www.icourse163.org/(course|learn)/\w+-(\d+)', url)[0]
print(f'从课程主页面获取的信息如下:\n\t[课程名]: {course_name}, [课程ID]: {course_name}, [TID]: {term_id}')
# 获取资源列表
resource_list = []
data = {
'tid': term_id,
'mob-token': self.infos_return['results']['mob-token'],
}
response = self.session.post('https://www.icourse163.org/mob/course/courseLearn/v1', data=data)
course_info = response.json()
file_types = [1, 3, 4]
for chapter_num, chapter in enumerate(course_info.get('results', {}).get('termDto', {}).get('chapters', [])):
for lesson_num, lesson in enumerate(chapter.get('lessons', [])) if chapter.get('lessons') is not None else []:
for unit_num, unit in enumerate(lesson.get('units', [])):
if unit['contentType'] not in file_types: continue
savedir = course_name
self.checkdir(savedir)
for item in [self.filterBadCharacter(chapter['name']), self.filterBadCharacter(lesson['name']), self.filterBadCharacter(unit['name'])]:
savedir = os.path.join(savedir, item)
self.checkdir(savedir)
if unit['contentType'] == file_types[0]:
savename = self.filterBadCharacter(unit['name']) + '.mp4'
resource_list.append({
'savedir': savedir,
'savename': savename,
'type': 'video',
'contentId': unit['contentId'],
'id': unit['id'],
})
elif unit['contentType'] == file_types[1]:
savename = self.filterBadCharacter(unit['name']) + '.pdf'
resource_list.append({
'savedir': savedir,
'savename': savename,
'type': 'pdf',
'contentId': unit['contentId'],
'id': unit['id'],
})
elif unit['contentType'] == file_types[2]:
if unit.get('jsonContent'):
json_content = eval(unit['jsonContent'])
savename = self.filterBadCharacter(json_content['fileName'])
resource_list.append({
'savedir': savedir,
'savename': savename,
'type': 'rich_text',
'jsonContent': json_content,
})
print(f'成功获得资源列表, 数量为{len(resource_list)}')
# 下载对应资源
pbar = tqdm(resource_list)
for resource in pbar:
pbar.set_description(f'downloading {resource["savename"]}')
# --下载视频
if resource['type'] == 'video':
data = {
'bizType': '1',
'mob-token': self.infos_return['results']['mob-token'],
'bizId': resource['id'],
'contentType': '1',
}
while True:
response = self.session.post('https://www.icourse163.org/mob/j/v1/mobileResourceRpcBean.getResourceToken.rpc', data=data)
if response.json()['results'] is not None: break
time.sleep(0.5 + random.random())
signature = response.json()['results']['videoSignDto']['signature']
data = {
'enVersion': '1',
'clientType': '2',
'mob-token': self.infos_return['results']['mob-token'],
'signature': signature,
'videoId': resource['contentId'],
}
response = self.session.post('https://vod.study.163.com/mob/api/v1/vod/videoByNative', data=data)
# ----下载视频
videos = response.json()['results']['videoInfo']['videos']
resolutions, video_url = [3, 2, 1], None
for resolution in resolutions:
for video in videos:
if video['quality'] == resolution:
video_url = video["videoUrl"]
break
if video_url is not None: break
if '.m3u8' in video_url:
self.m3u8download({
'download_url': video_url,
'savedir': resource['savedir'],
'savename': resource['savename'],
})
else:
self.defaultdownload({
'download_url': video_url,
'savedir': resource['savedir'],
'savename': resource['savename'],
})
# ----下载字幕
srt_info = response.json()['results']['videoInfo']['srtCaptions']
if srt_info:
for srt_item in srt_info:
srt_name = os.path.splitext(resource['savename'])[0] + '_' + srt_item['languageCode'] + '.srt'
srt_url = srt_item['url']
response = self.session.get(srt_url)
fp = open(os.path.join(resource['savedir'], srt_name), 'wb')
fp.write(response.content)
fp.close()
# --下载PDF
elif resource['type'] == 'pdf':
data = {
't': '3',
'cid': resource['contentId'],
'unitId': resource['id'],
'mob-token': self.infos_return['results']['mob-token'],
}
response = self.session.post('http://www.icourse163.org/mob/course/learn/v1', data=data)
pdf_url = response.json()['results']['learnInfo']['textOrigUrl']
self.defaultdownload({
'download_url': pdf_url,
'savedir': resource['savedir'],
'savename': resource['savename'],
})
# --下载富文本
elif resource['type'] == 'rich_text':
download_url = 'http://www.icourse163.org/mob/course/attachment.htm?' + urlencode(resource['jsonContent'])
self.defaultdownload({
'download_url': download_url,
'savedir': resource['savedir'],
'savename': resource['savename'],
})
'''登录'''
def login(self, username, password):
lg = login.Login()
infos_return, session = lg.icourse163(username, password)
return infos_return, session
'''检查文件夹是否存在'''
def checkdir(self, dirpath):
if not os.path.exists(dirpath):
os.mkdir(dirpath)
return False
return True
'''清除可能出问题的字符'''
def filterBadCharacter(self, string):
need_removed_strs = ['<em>', '</em>', '<', '>', '\\', '/', '?', ':', '"', ':', '|', '?', '*']
for item in need_removed_strs:
string = string.replace(item, '')
try:
rule = re.compile(u'[\U00010000-\U0010ffff]')
except:
rule = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
string = rule.sub('', string)
return string.strip().encode('utf-8', 'ignore').decode('utf-8')
'''默认下载器'''
def defaultdownload(self, info):
try:
is_success = False
with self.session.get(info['download_url'], stream=True, verify=False) as response:
if response.status_code == 200:
total_size, chunk_size = int(response.headers['content-length']), 1024
label = '[FileSize]: %0.2fMB' % (total_size / 1024 / 1024)
with click.progressbar(length=total_size, label=label) as progressbar:
with open(os.path.join(info['savedir'], info['savename']), 'wb') as fp:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
fp.write(chunk)
progressbar.update(len(chunk))
is_success = True
except:
is_success = False
return is_success
'''下载m3u8文件'''
def m3u8download(self, info):
savepath = os.path.join(info['savedir'], info['savename'])
ext = os.path.splitext(info['savename'])[-1]
download_url = info['download_url']
p = subprocess.Popen(f'ffmpeg -i "{download_url}" tmp.{ext}')
while True:
if subprocess.Popen.poll(p) is not None:
shutil.move(f'tmp.{ext}', savepath)
return True
'''run'''
if __name__ == '__main__':
args = parseArgs()
client = MOOCDL()
client.run(args.url)
| 45.123894
| 155
| 0.488429
|
00ea98fc4c7a2248daeea39702a1737a091a26c4
| 706
|
py
|
Python
|
tests/mock.py
|
ushiboy/pi-cmd
|
1d9cba8f1e82005078e0d737efaa6d628a55f420
|
[
"MIT"
] | 1
|
2021-06-28T02:15:55.000Z
|
2021-06-28T02:15:55.000Z
|
tests/mock.py
|
ushiboy/picmd
|
1d9cba8f1e82005078e0d737efaa6d628a55f420
|
[
"MIT"
] | 1
|
2020-05-20T14:38:06.000Z
|
2020-05-20T14:44:31.000Z
|
tests/mock.py
|
ushiboy/picmd
|
1d9cba8f1e82005078e0d737efaa6d628a55f420
|
[
"MIT"
] | null | null | null |
class MockSerial:
@property
def in_waiting(self):
if self._cursor < len(self._buffer_datas):
return len(self._buffer_datas[self._cursor])
return 0
def __init__(self, buffer_datas=None):
if buffer_datas is None:
buffer_datas = []
self._buffer_datas = buffer_datas
self._cursor = 0
self.written_data = b''
def read(self, size):
# ignore size
if self._cursor < len(self._buffer_datas):
r = self._buffer_datas[self._cursor]
self._cursor += 1
return r
return b''
def write(self, data):
self.written_data += data
def close(self):
pass
| 24.344828
| 56
| 0.570822
|
aba3d494758ab59bb84eb39eff03c15da0eafeae
| 767
|
py
|
Python
|
461.Hamming-Distance.py
|
mickey0524/leetcode
|
6bedeb6ff29b02a97178cca464c5fd639951801f
|
[
"MIT"
] | 18
|
2018-07-14T12:45:37.000Z
|
2022-03-26T14:51:04.000Z
|
461.Hamming-Distance.py
|
mickey0524/leetcode
|
6bedeb6ff29b02a97178cca464c5fd639951801f
|
[
"MIT"
] | null | null | null |
461.Hamming-Distance.py
|
mickey0524/leetcode
|
6bedeb6ff29b02a97178cca464c5fd639951801f
|
[
"MIT"
] | 3
|
2019-05-29T04:09:22.000Z
|
2021-06-07T23:37:46.000Z
|
# https://leetcode.com/problems/hamming-distance/
#
# algorithms
# Easy (70.58%)
# Total Accepted: 253,549
# Total Submissions: 359,219
class Solution(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
ans = 0
for _ in xrange(32):
x_bit, y_bit = x & 1, y & 1
if x_bit != y_bit:
ans += 1
x >>= 1
y >>= 1
return ans
class Solution1(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
xor = x ^ y
res = 0
while xor:
res += 1
xor &= (xor - 1)
return res
| 17.431818
| 49
| 0.431551
|
d0081ba553d230f1a10a1fad0d54c902a4264a35
| 2,246
|
py
|
Python
|
flow/benchmarks/baselines/us_merge012.py
|
cuijiaxun/MITC
|
a226308424237a69b5e938baf72949de9b1b4bf2
|
[
"MIT"
] | 1
|
2021-06-17T03:25:13.000Z
|
2021-06-17T03:25:13.000Z
|
flow/benchmarks/baselines/us_merge012.py
|
cuijiaxun/MITC
|
a226308424237a69b5e938baf72949de9b1b4bf2
|
[
"MIT"
] | null | null | null |
flow/benchmarks/baselines/us_merge012.py
|
cuijiaxun/MITC
|
a226308424237a69b5e938baf72949de9b1b4bf2
|
[
"MIT"
] | 1
|
2021-03-18T16:20:57.000Z
|
2021-03-18T16:20:57.000Z
|
"""Evaluates the baseline performance of merge without RL control.
Baseline is no AVs.
"""
import numpy as np
from flow.core.experiment import Experiment
from flow.core.params import InitialConfig
from flow.core.params import TrafficLightParams
from flow.benchmarks.us_merge_baseline import flow_params
def merge_baseline(num_runs):
"""Run script for all merge baselines.
Parameters
----------
num_runs : int
number of rollouts the performance of the environment is evaluated
over
flow_params : dict
the flow meta-parameters describing the structure of a benchmark.
Must be one of the merge flow_params
Returns
-------
flow.core.experiment.Experiment
class needed to run simulations
"""
exp_tag = flow_params['exp_tag']
sim_params = flow_params['sim']
vehicles = flow_params['veh']
env_params = flow_params['env']
net_params = flow_params['net']
initial_config = flow_params.get('initial', InitialConfig())
traffic_lights = flow_params.get('tls', TrafficLightParams())
# set the evaluation flag to True
env_params.evaluate = True
# import the scenario class
module = __import__('flow.scenarios', fromlist=[flow_params['scenario']])
scenario_class = getattr(module, flow_params['scenario'])
# create the scenario object
scenario = scenario_class(
name=exp_tag,
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=traffic_lights
)
# import the environment class
module = __import__('flow.envs', fromlist=[flow_params['env_name']])
env_class = getattr(module, flow_params['env_name'])
# create the environment object
env = env_class(env_params, sim_params, scenario)
exp = Experiment(env)
results = exp.run(num_runs, env_params.horizon, convert_to_csv=True)
avg_speed = np.mean(results['mean_returns'])
return avg_speed
if __name__ == '__main__':
runs = 30 # number of simulations to average over
res = merge_baseline(num_runs=runs)
print('---------')
print('TODO SEEMS WRONG The average speed across {} runs is {}'.format(runs, res))
| 29.946667
| 86
| 0.685663
|
a6a2bb24074a52a95a5c5a759aed70bedb5affbb
| 48
|
py
|
Python
|
sanic-utils/sanic_utils/__init__.py
|
chivandikwa/sanic-utils
|
cbce8b7eb860d9c47b77d48c77ec6ecd7b13aa13
|
[
"MIT"
] | null | null | null |
sanic-utils/sanic_utils/__init__.py
|
chivandikwa/sanic-utils
|
cbce8b7eb860d9c47b77d48c77ec6ecd7b13aa13
|
[
"MIT"
] | null | null | null |
sanic-utils/sanic_utils/__init__.py
|
chivandikwa/sanic-utils
|
cbce8b7eb860d9c47b77d48c77ec6ecd7b13aa13
|
[
"MIT"
] | null | null | null |
from sanic_utils import from_json # noqa: F401
| 24
| 47
| 0.791667
|
e18b7bfacb8f282c34efe5ea99032c07d4c3a6f1
| 1,312
|
py
|
Python
|
setup.py
|
clld/ids
|
ae52459d3b9284e5da8d755b66e2757dbb3875df
|
[
"Apache-2.0"
] | 4
|
2016-08-26T17:53:33.000Z
|
2021-11-23T15:05:14.000Z
|
setup.py
|
clld/ids
|
ae52459d3b9284e5da8d755b66e2757dbb3875df
|
[
"Apache-2.0"
] | 13
|
2015-01-29T13:17:10.000Z
|
2021-11-23T10:49:02.000Z
|
setup.py
|
clld/ids
|
ae52459d3b9284e5da8d755b66e2757dbb3875df
|
[
"Apache-2.0"
] | 2
|
2015-12-06T22:03:27.000Z
|
2021-11-22T12:32:27.000Z
|
from setuptools import setup, find_packages
setup(
name='ids',
version='0.0',
description='ids',
long_description='',
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'clld>=9.0.0',
'clldmpg>=4.2.0',
'clldutils>=3.9.0',
'clld-glottologfamily-plugin>=4.0.0',
'csvw>=1.11.0',
'pyglottolog>=3.6.0',
'pyconcepticon>=2.8.0',
'pycldf>=1.23.0',
'sqlalchemy>=1.4.23',
'waitress>=1.4.4',
],
extras_require={
'dev': [
'flake8',
'tox',
],
'test': [
'psycopg2>=2.8.6',
'pytest>=6.2.5',
'pytest-clld>=1.0.3',
'pytest-mock>=3.6.1',
'pytest-cov>=2.12.1',
'coverage>=5.5',
'selenium>=3.141.0',
'zope.component>=5.0.1',
],
},
test_suite="ids",
entry_points="""\
[paste.app_factory]
main = ids:main
""",
)
| 23.428571
| 63
| 0.476372
|
d309498cdbc0049546db403b718365f43d98bc5c
| 10,945
|
py
|
Python
|
python/phonenumbers/geocoder.py
|
rodgar-nvkz/python-phonenumbers
|
4c7c4892211dbc9bc328bc3356b03853eaf993dc
|
[
"Apache-2.0"
] | 2,424
|
2015-01-05T05:34:45.000Z
|
2022-03-28T22:37:53.000Z
|
python/phonenumbers/geocoder.py
|
rodgar-nvkz/python-phonenumbers
|
4c7c4892211dbc9bc328bc3356b03853eaf993dc
|
[
"Apache-2.0"
] | 166
|
2015-01-30T23:59:18.000Z
|
2022-03-14T21:08:42.000Z
|
python/phonenumbers/geocoder.py
|
rodgar-nvkz/python-phonenumbers
|
4c7c4892211dbc9bc328bc3356b03853eaf993dc
|
[
"Apache-2.0"
] | 345
|
2015-01-02T00:33:27.000Z
|
2022-03-26T13:06:57.000Z
|
"""Phone number geocoding functionality
>>> import phonenumbers
>>> from phonenumbers.geocoder import description_for_number
>>> from phonenumbers.util import u
>>> gb_number = phonenumbers.parse("+442083612345", "GB")
>>> de_number = phonenumbers.parse("0891234567", "DE")
>>> ch_number = phonenumbers.parse("0431234567", "CH")
>>> str(description_for_number(gb_number, "en"))
'London'
>>> str(description_for_number(gb_number, "fr")) # fall back to English
'London'
>>> str(description_for_number(gb_number, "en", region="GB"))
'London'
>>> str(description_for_number(gb_number, "en", region="US")) # fall back to country
'United Kingdom'
>>> str(description_for_number(de_number, "en"))
'Munich'
>>> u('M\u00fcnchen') == description_for_number(de_number, "de")
True
>>> u('Z\u00fcrich') == description_for_number(ch_number, "de")
True
>>> str(description_for_number(ch_number, "en"))
'Zurich'
>>> str(description_for_number(ch_number, "fr"))
'Zurich'
>>> str(description_for_number(ch_number, "it"))
'Zurigo'
"""
# Based very loosely on original Java code:
# java/src/com/google/i18n/phonenumbers/geocoding/PhoneNumberOfflineGeocoder.java
# Copyright (C) 2009-2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .util import prnt, unicod, u, U_EMPTY_STRING
from .phonenumberutil import region_code_for_number, PhoneNumberType
from .phonenumberutil import country_mobile_token, national_significant_number, number_type
from .phonenumberutil import region_code_for_country_code, region_codes_for_country_code
from .phonenumberutil import is_valid_number_for_region, parse, NumberParseException
from .phonenumberutil import is_number_type_geographical
from .prefix import _prefix_description_for_number
try:
from .geodata import GEOCODE_DATA, GEOCODE_LONGEST_PREFIX
from .geodata.locale import LOCALE_DATA
except ImportError: # pragma no cover
# Before the generated code exists, the geodata/ directory is empty.
# The generation process imports this module, creating a circular
# dependency. The hack below works around this.
import os
import sys
if (os.path.basename(sys.argv[0]) == "buildmetadatafromxml.py" or
os.path.basename(sys.argv[0]) == "buildprefixdata.py"):
prnt("Failed to import generated data (but OK as during autogeneration)", file=sys.stderr)
GEOCODE_DATA = {'1': {'en': u('United States')}}
GEOCODE_LONGEST_PREFIX = 1
LOCALE_DATA = {'US': {'en': u('United States')}}
else:
raise
__all__ = ['country_name_for_number', 'description_for_valid_number', 'description_for_number']
def country_name_for_number(numobj, lang, script=None, region=None):
"""Returns the customary display name in the given language for the given
territory the given PhoneNumber object is from. If it could be from many
territories, nothing is returned.
Arguments:
numobj -- The PhoneNumber object for which we want to get a text description.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB")
The script and region parameters are currently ignored.
Returns a text description in the given language code, for the given phone
number's region, or an empty string if no description is available."""
region_codes = region_codes_for_country_code(numobj.country_code)
if len(region_codes) == 1:
return _region_display_name(region_codes[0], lang, script, region)
else:
region_where_number_is_valid = u("ZZ")
for region_code in region_codes:
if is_valid_number_for_region(numobj, region_code):
# If the number has already been found valid for one region,
# then we don't know which region it belongs to so we return
# nothing.
if region_where_number_is_valid != u("ZZ"):
return U_EMPTY_STRING
region_where_number_is_valid = region_code
return _region_display_name(region_where_number_is_valid, lang, script, region)
def _region_display_name(region_code, lang, script=None, region=None):
if region_code in LOCALE_DATA:
# The Locale data has a set of names for this region, in various languages.
name = LOCALE_DATA[region_code].get(lang, "")
if name.startswith('*'):
# If the location name is "*<other_lang>", this indicates that the
# name is held elsewhere, specifically in the [other_lang] entry
other_lang = name[1:]
name = LOCALE_DATA[region_code].get(other_lang, "")
return unicod(name)
return U_EMPTY_STRING
def description_for_valid_number(numobj, lang, script=None, region=None):
"""Return a text description of a PhoneNumber object, in the language
provided.
The description might consist of the name of the country where the phone
number is from and/or the name of the geographical area the phone number
is from if more detailed information is available.
If the phone number is from the same region as the user, only a
lower-level description will be returned, if one exists. Otherwise, the
phone number's region will be returned, with optionally some more detailed
information.
For example, for a user from the region "US" (United States), we would
show "Mountain View, CA" for a particular number, omitting the United
States from the description. For a user from the United Kingdom (region
"GB"), for the same number we may show "Mountain View, CA, United States"
or even just "United States".
This function assumes the validity of the number passed in has already
been checked, and that the number is suitable for geocoding. We consider
fixed-line and mobile numbers possible candidates for geocoding.
Arguments:
numobj -- A valid PhoneNumber object for which we want to get a text
description.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- The region code for a given user. This region will be omitted
from the description if the phone number comes from this
region. It should be a two-letter upper-case CLDR region
code.
Returns a text description in the given language code, for the given phone
number, or an empty string if the number could come from multiple countries,
or the country code is in fact invalid."""
number_region = region_code_for_number(numobj)
if region is None or region == number_region:
mobile_token = country_mobile_token(numobj.country_code)
national_number = national_significant_number(numobj)
if mobile_token != U_EMPTY_STRING and national_number.startswith(mobile_token):
# In some countries, eg. Argentina, mobile numbers have a mobile token
# before the national destination code, this should be removed before
# geocoding.
national_number = national_number[len(mobile_token):]
region = region_code_for_country_code(numobj.country_code)
try:
copied_numobj = parse(national_number, region)
except NumberParseException:
# If this happens, just re-use what we had.
copied_numobj = numobj
area_description = _prefix_description_for_number(GEOCODE_DATA, GEOCODE_LONGEST_PREFIX,
copied_numobj, lang, script, region)
else:
area_description = _prefix_description_for_number(GEOCODE_DATA, GEOCODE_LONGEST_PREFIX,
numobj, lang, script, region)
if area_description != "":
return area_description
else:
# Fall back to the description of the number's region
return country_name_for_number(numobj, lang, script, region)
else:
# Otherwise, we just show the region(country) name for now.
return _region_display_name(number_region, lang, script, region)
# TODO: Concatenate the lower-level and country-name information in an
# appropriate way for each language.
def description_for_number(numobj, lang, script=None, region=None):
"""Return a text description of a PhoneNumber object for the given language.
The description might consist of the name of the country where the phone
number is from and/or the name of the geographical area the phone number
is from. This function explicitly checks the validity of the number passed in
Arguments:
numobj -- The PhoneNumber object for which we want to get a text description.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- The region code for a given user. This region will be omitted
from the description if the phone number comes from this
region. It should be a two-letter upper-case CLDR region
code.
Returns a text description in the given language code, for the given phone
number, or an empty string if no description is available."""
ntype = number_type(numobj)
if ntype == PhoneNumberType.UNKNOWN:
return ""
elif not is_number_type_geographical(ntype, numobj.country_code):
return country_name_for_number(numobj, lang, script, region)
return description_for_valid_number(numobj, lang, script, region)
if __name__ == '__main__': # pragma no cover
import doctest
doctest.testmod()
| 48.429204
| 99
| 0.696848
|
ba1b045a2f8bddbee6061c434ea2b4c9f5c2f60c
| 952
|
py
|
Python
|
readthedocs/payments/utils.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 10
|
2019-05-21T03:00:40.000Z
|
2022-03-12T11:24:39.000Z
|
readthedocs/payments/utils.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 12
|
2019-12-05T04:47:01.000Z
|
2022-01-09T00:56:58.000Z
|
readthedocs/payments/utils.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 5
|
2019-07-08T23:45:10.000Z
|
2021-02-26T07:29:49.000Z
|
# -*- coding: utf-8 -*-
"""
Payment utility functions.
These are mostly one-off functions. Define the bulk of Stripe operations on
:py:class:`readthedocs.payments.forms.StripeResourceMixin`.
"""
import stripe
from django.conf import settings
stripe.api_key = getattr(settings, 'STRIPE_SECRET', None)
def delete_customer(customer_id):
"""Delete customer from Stripe, cancelling subscriptions."""
try:
customer = stripe.Customer.retrieve(customer_id)
return customer.delete()
except stripe.error.InvalidRequestError:
pass
def cancel_subscription(customer_id, subscription_id):
"""Cancel Stripe subscription, if it exists."""
try:
customer = stripe.Customer.retrieve(customer_id)
if hasattr(customer, 'subscriptions'):
subscription = customer.subscriptions.retrieve(subscription_id)
return subscription.delete()
except stripe.error.StripeError:
pass
| 27.2
| 75
| 0.712185
|
4e983584f89e0e6a328c365c38be256664a4d7ed
| 116
|
py
|
Python
|
web/templatetags/to_i.py
|
otoyo/satisfactory-mobile
|
60d3fba20031bfbc795d4ff831370c27834c1d21
|
[
"MIT"
] | null | null | null |
web/templatetags/to_i.py
|
otoyo/satisfactory-mobile
|
60d3fba20031bfbc795d4ff831370c27834c1d21
|
[
"MIT"
] | null | null | null |
web/templatetags/to_i.py
|
otoyo/satisfactory-mobile
|
60d3fba20031bfbc795d4ff831370c27834c1d21
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
@register.filter
def to_i(value):
return int(value)
| 14.5
| 29
| 0.75
|
a4407848923c8c6ff2de703135b7b7bea3e85333
| 3,689
|
py
|
Python
|
library/cartridge_get_disabled_instances.py
|
tarantool/ansible-cartridge
|
14d86752d582f43f0d8efb27dbaa1175ff6e8ac2
|
[
"BSD-2-Clause"
] | 17
|
2019-09-02T15:31:56.000Z
|
2022-03-29T18:49:59.000Z
|
library/cartridge_get_disabled_instances.py
|
tarantool/ansible-cartridge
|
14d86752d582f43f0d8efb27dbaa1175ff6e8ac2
|
[
"BSD-2-Clause"
] | 171
|
2019-10-24T15:34:34.000Z
|
2022-03-29T09:18:46.000Z
|
library/cartridge_get_disabled_instances.py
|
tarantool/ansible-cartridge
|
14d86752d582f43f0d8efb27dbaa1175ff6e8ac2
|
[
"BSD-2-Clause"
] | 14
|
2019-12-23T08:27:06.000Z
|
2021-07-06T15:53:49.000Z
|
#!/usr/bin/env python
from ansible.module_utils.helpers import Helpers as helpers
argument_spec = {
'module_hostvars': {'required': True, 'type': 'dict'},
'play_hosts': {'required': True, 'type': 'list'},
'ignore_split_brain': {'required': False, 'type': 'bool', 'default': False},
}
def get_disabled_instances_from_instance_config(instance_vars):
return instance_vars['instance_info']['disabled_instances']
def get_topology_checksum_from_instance_config(instance_vars):
return instance_vars['instance_info']['topology_checksum']
def config_mismatched(module_hostvars, instance_name, other_hosts):
current_checksum = get_topology_checksum_from_instance_config(module_hostvars[instance_name])
for other_name in other_hosts:
other_checksum = get_topology_checksum_from_instance_config(module_hostvars[other_name])
if current_checksum != other_checksum:
return True
return False
def count_cluster_disabled_instances(module_hostvars, play_hosts, ignore_split_brain=False):
config_mismatch_count = 0
healthy_count = 0
votes_to_disable = {}
play_hosts = list(filter(
# Disabled instances is None on stateboard and not started instances
lambda name: all([
get_disabled_instances_from_instance_config(module_hostvars[name]) is not None,
get_topology_checksum_from_instance_config(module_hostvars[name]) is not None,
]),
play_hosts,
))
for instance_name in play_hosts:
disabled_instances = get_disabled_instances_from_instance_config(module_hostvars[instance_name])
not_disabled_names = list(filter(lambda other_name: other_name not in disabled_instances, play_hosts))
if config_mismatched(module_hostvars, instance_name, not_disabled_names):
config_mismatch_count += 1
continue
healthy_count += 1
for disabled_instance in disabled_instances:
votes_to_disable[disabled_instance] = votes_to_disable.get(disabled_instance, 0) + 1
if healthy_count == 0 and config_mismatch_count > 0:
return None, 'All instances in cluster has different topology configs'
final_disabled_instances = []
split_brain_detected = False
for name, score in votes_to_disable.items():
if score >= float(healthy_count) / 2:
final_disabled_instances.append(name)
if score != healthy_count:
split_brain_detected = True
if split_brain_detected:
msg = "It seems that you have split brain in your cluster."
if ignore_split_brain:
helpers.warn(msg)
else:
msg += " Set 'cartridge_ignore_split_brain' flag to ignore this error."
return None, msg
return sorted(final_disabled_instances), None
def count_inventory_disabled_instances(module_hostvars, play_hosts):
return sorted(filter(
lambda name: helpers.is_disabled(module_hostvars[name]),
play_hosts,
))
def count_disabled_instances(params):
module_hostvars = params['module_hostvars']
play_hosts = params['play_hosts']
ignore_split_brain = params['ignore_split_brain']
inventory_disabled_instances = count_inventory_disabled_instances(module_hostvars, play_hosts)
cluster_disabled_instances, err = count_cluster_disabled_instances(module_hostvars, play_hosts, ignore_split_brain)
if err:
return helpers.ModuleRes(failed=True, msg=err)
return helpers.ModuleRes(changed=False, inventory=inventory_disabled_instances, cluster=cluster_disabled_instances)
if __name__ == '__main__':
helpers.execute_module(argument_spec, count_disabled_instances)
| 35.815534
| 119
| 0.73299
|
fa8bfdd43fa8170c8fefb81e61066ab0f0503372
| 143
|
py
|
Python
|
main.py
|
dclavijo45/mvc-flask-template
|
4904047ad4cd0679ce634b0a92ec5fcdd6152f15
|
[
"MIT"
] | null | null | null |
main.py
|
dclavijo45/mvc-flask-template
|
4904047ad4cd0679ce634b0a92ec5fcdd6152f15
|
[
"MIT"
] | null | null | null |
main.py
|
dclavijo45/mvc-flask-template
|
4904047ad4cd0679ce634b0a92ec5fcdd6152f15
|
[
"MIT"
] | null | null | null |
from config import PORT, HOST, DEBUG
from __init__ import app
if __name__ == "__main__":
app.run(host=HOST, port=PORT, debug=bool(DEBUG))
| 23.833333
| 52
| 0.727273
|
6e8f29999283e6ae9da9895523043178413a0b0c
| 4,550
|
py
|
Python
|
parse_display.py
|
leiflundgren/mx-trace-print
|
6523b63ef7d196fb761ee17bf6576174c0d9ec40
|
[
"Apache-2.0"
] | null | null | null |
parse_display.py
|
leiflundgren/mx-trace-print
|
6523b63ef7d196fb761ee17bf6576174c0d9ec40
|
[
"Apache-2.0"
] | 2
|
2019-01-21T12:54:04.000Z
|
2019-01-28T16:51:55.000Z
|
parse_display.py
|
leiflundgren/mx-trace-print
|
6523b63ef7d196fb761ee17bf6576174c0d9ec40
|
[
"Apache-2.0"
] | null | null | null |
import io
import tools
class ParseDisplayOutput:
class Individual:
def __init__(self, dict) -> None:
self.dict = dict
def __str__(self) -> str:
return "{id}: {name} {state}".format(id=self.id, name=self.unit_name, state=self.state)
def get(self, attrName) -> str:
val = self.dict.get(attrName)
return ( None if val is None else val.strip() )
@property
def is_header(self) -> bool:
return self.dict.find('Version') is not None
@property
def id(self) -> str:
return self.get('Trace ind')
@property
def state(self) -> str:
return self.get('State')
@property
def stored(self) -> str:
return self.get('Stored')
@property
def size(self) -> str:
return self.get('Size per lim')
@property
def trace_type(self) -> str:
return self.get('Type')
@property
def rotating(self) -> str:
return self.get('Rotating')
@property
def textlevel(self) -> str:
return self.get('Textlevel')
@property
def lim(self) -> str:
return self.get('Lim no')
@property
def unit_no(self) -> str:
return self.get('Unit no')
@property
def unit_name(self) -> str:
return self.get('Unit name')
@property
def time_mark(self) -> str:
return self.get('Time mark')
@property
def by_user(self) -> str:
return self.get('by user')
# @property
# def (self) -> str:
# return self.dict[''].strip()
def __init__(self, source) -> None:
self.source = tools.read(source)
if isinstance(self.source, str):
self.source = self.source.splitlines()
parts = [] #List[str]
self.individuals = [] # List['ParseDisplayOutput.Individual']
in_header = True
for line in self.source:
line = line.strip()
## skip header, until a line starts with Version
if in_header:
if line.startswith('Version'):
in_header = False
else:
continue
if line.startswith('Version'):
mpos = line.index(', Market:')
self.version = line[8:mpos].strip()
self.market = line[mpos+9:].strip()
continue
if line.startswith('First'):
last = line.find('Last:')-1
while last > 0 and line[last] == ' ':
last=last-1
if last>0 and line[last] != ',':
line = line[:last+1] + ',' + line[last+1:]
if len(line) > 0 :
parts.extend(map(str.strip, line.split(',')))
else:
individual = self.parse_individual(parts)
if individual is not None:
self.individuals.append(individual)
parts = []
def __str__(self) -> str:
return "\n".join( [str(i) for i in self.individuals ] )
@property
def is_valid(self) -> bool:
return not self.individuals is None
@property
def first_trace(self) -> str:
return self.individuals[0].get('First')
@property
def last_trace(self) -> str:
return self.individuals[0].get('Last')
def get_individual(self, id) -> 'Individual':
if isinstance(id, int):
return self.individuals[id] if id < len(self.individuals) else None
for ind in self.individuals[1:]: # Avoid header
if ind.id == id or ind.unit_name == id:
return ind
return None
### convenience method that returns the id of Individual matching unitname, or None
def get_id(self, unitname) -> str:
ind = self.get_individual(unitname)
return ind.id if not ind is None else None
## Trace ind: 3, State: setup , Stored: 0, Size per lim: 5000, Type : unit-trace , Rotating: on , Textlevel: all, Lim no : 1, Unit no: 0206, Unit name: CMP , Time mark: 2018-12-13 16:46:11 (CET), by user: mxone_admin
@staticmethod
def parse_individual(parts) -> 'Individual':
d = dict(map(str.strip, itm.split(':', 1)) for itm in parts)
return ParseDisplayOutput.Individual(d) if len(d) > 0 else None
| 33.455882
| 246
| 0.520659
|
f053e128fd90e83ac18d9161549734473560349a
| 6,954
|
py
|
Python
|
MP3/mp3.py
|
AndrewQuinn2020/EECS-332-MPs
|
ee164e98bd6b1b05296e4abec69a8b5d5de2581b
|
[
"MIT"
] | null | null | null |
MP3/mp3.py
|
AndrewQuinn2020/EECS-332-MPs
|
ee164e98bd6b1b05296e4abec69a8b5d5de2581b
|
[
"MIT"
] | null | null | null |
MP3/mp3.py
|
AndrewQuinn2020/EECS-332-MPs
|
ee164e98bd6b1b05296e4abec69a8b5d5de2581b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Anything not directly related to processing here
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from mp3_helper import *
from PIL import Image
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(linewidth=1000)
def img2dict(data):
"""Construct a histogram of the count of every value found in the
2D array data, in a dictionary, along with the minimum and maximum
greyscale value it found in the data."""
hist_data = {}
for i in range(0, data.shape[0]):
for j in range(0, data.shape[1]):
if data[i, j] not in hist_data.keys():
hist_data[data[i, j]] = 1
else:
hist_data[data[i, j]] += 1
return hist_data
def hist2matrix(hist):
"""Returns a |hist|-by-2 matrix, with keys on top and values on bottom."""
m = np.zeros((len(hist), 2)).astype(int)
# print(m.shape)
c = 0
for key in sorted(hist.keys()):
m[c, 0] = key
m[c, 1] = hist[key]
c += 1
return m
def matrix2cmd(matrix_in):
"""Given an n*2 matrix of keys in column 0 and integer values in column
1, returns a new n*2 matrix of the same keys, but the values have been
summed up."""
matrix_out = np.zeros_like(matrix_in)
c = 0
for i in range(0, matrix_in.shape[0]):
c += matrix_in[i, 1]
# print(c)
matrix_out[i, 0] = matrix_in[i, 0]
matrix_out[i, 1] = c
return matrix_out
def cmd2plottable(cmd_in):
"""Given the output of matrix2cmd, constructs a 256*2 matrix for plotting
the cumulative distribution function."""
matrix_out = np.zeros((256, 2))
cumval = 0
nextval = 0
for i in range(0, 256):
matrix_out[i, 0] = i
matrix_out[i, 1] = cumval
if matrix_out[i, 0] == cmd_in[nextval, 0]:
matrix_out[i, 1] = cmd_in[nextval, 1]
cumval = matrix_out[i, 1]
if nextval < cmd_in.shape[0] - 1:
nextval += 1
return matrix_out
def cmd2dict(cmd):
"""Returns a dictionary of what to replace each value by."""
pixel_count = cmd[cmd.shape[0] - 1, cmd.shape[1] - 1]
scaling_dict = dict()
for i in range(0, cmd.shape[0]):
scaling_dict[cmd[i, 0]] = round(
((cmd[i, 1] - cmd[0, 1]) / (pixel_count - cmd[0, 1])) * 255
)
return scaling_dict
if __name__ == "__main__":
hello()
for image in test_images:
img_data = load_gs(image)
hist_data = img2dict(img_data)
hist = hist2matrix(hist_data)
hist_cmd = matrix2cmd(hist)
plottable_cmd = cmd2plottable(hist_cmd)
hist_eq_dict = cmd2dict(hist_cmd)
results_data = np.zeros_like(img_data).astype(int)
for i in range(0, results_data.shape[0]):
for j in range(0, results_data.shape[1]):
results_data[i, j] = hist_eq_dict[img_data[i, j]]
results_hist = img2dict(results_data)
results_hist_matrix = hist2matrix(results_hist)
results_hist_cmd = matrix2cmd(results_hist_matrix)
plottable_results_cmd = cmd2plottable(results_hist_cmd)
test_results_path = save_gs(
results_data, Path(image).stem, dir=test_results_dir
)
print("Processed image saved to: {}".format(test_results_path))
# Let's plot some histograms.
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Pixels per color value: `{}`".format(Path(image).stem + ".bmp"))
ax1.set_xlabel("Pixel color value ($v \in \{0, 1, \dots, 255\}$)")
ax1.set_ylabel("# / pixels per grayscale value")
ln1 = ax1.bar(
hist_data.keys(), hist_data.values(), alpha=0.6, color="r", label="Original"
)
ln2 = ax1.bar(
results_hist.keys(),
results_hist.values(),
alpha=0.6,
color="b",
label="EQ'd",
)
ax2 = ax1.twinx()
ln3 = ax2.bar(
plottable_cmd[:, 0],
plottable_cmd[:, 1],
alpha=0.1,
color="g",
label="cdf (Original)",
)
ln4 = ax2.bar(
plottable_results_cmd[:, 0],
plottable_results_cmd[:, 1],
alpha=0.1,
color="purple",
label="cdf (EQ'd)",
)
plt.legend(
[ln1, ln2, ln3, ln4],
[ln1.get_label(), ln2.get_label(), ln3.get_label(), ln4.get_label()],
)
plt.tight_layout()
plt.savefig(os.path.join(test_results_dir, Path(image).stem + "_hist.svg"))
plt.savefig(os.path.join(test_results_dir, Path(image).stem + "_hist.jpg"))
plt.close()
for image in images:
img_data = load_gs(image)
hist_data = img2dict(img_data)
hist = hist2matrix(hist_data)
hist_cmd = matrix2cmd(hist)
plottable_cmd = cmd2plottable(hist_cmd)
hist_eq_dict = cmd2dict(hist_cmd)
results_data = np.zeros_like(img_data).astype(int)
for i in range(0, results_data.shape[0]):
for j in range(0, results_data.shape[1]):
results_data[i, j] = hist_eq_dict[img_data[i, j]]
results_hist = img2dict(results_data)
results_hist_matrix = hist2matrix(results_hist)
results_hist_cmd = matrix2cmd(results_hist_matrix)
plottable_results_cmd = cmd2plottable(results_hist_cmd)
results_path = save_gs(results_data, Path(image).stem)
print("Processed image saved: {}".format(results_path))
# Let's plot some histograms.
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Pixels per color value: `{}`".format(Path(image).stem + ".bmp"))
ax1.set_xlabel("Pixel color value ($v \in \{0, 1, \dots, 255\}$)")
ax1.set_ylabel("Number of pixels")
ln1 = ax1.bar(
hist_data.keys(), hist_data.values(), alpha=0.6, color="r", label="Original"
)
ln2 = ax1.bar(
results_hist.keys(),
results_hist.values(),
alpha=0.6,
color="b",
label="EQ'd",
)
ax2 = ax1.twinx()
ln3 = ax2.bar(
plottable_cmd[:, 0],
plottable_cmd[:, 1],
alpha=0.1,
color="g",
label="cdf (Original)",
)
ln4 = ax2.bar(
plottable_results_cmd[:, 0],
plottable_results_cmd[:, 1],
alpha=0.1,
color="purple",
label="cdf (EQ'd)",
)
plt.legend(
[ln1, ln2, ln3, ln4],
[ln1.get_label(), ln2.get_label(), ln3.get_label(), ln4.get_label()],
)
plt.tight_layout()
plt.savefig(os.path.join(results_dir, Path(image).stem + "_hist.svg"))
plt.savefig(os.path.join(results_dir, Path(image).stem + "_hist.jpg"))
plt.close()
| 30.234783
| 88
| 0.568881
|
45019d83b315ffa8eeae4c46657baa6ed9afbded
| 923
|
py
|
Python
|
allennlp/modules/token_embedders/pretrained_transformer_embedder.py
|
donna-legal/allennlp
|
fd1e3cfaed07ec3ba03b922d12eee47f8be16837
|
[
"Apache-2.0"
] | 2
|
2019-12-03T20:04:56.000Z
|
2021-03-29T10:38:06.000Z
|
allennlp/modules/token_embedders/pretrained_transformer_embedder.py
|
donna-legal/allennlp
|
fd1e3cfaed07ec3ba03b922d12eee47f8be16837
|
[
"Apache-2.0"
] | null | null | null |
allennlp/modules/token_embedders/pretrained_transformer_embedder.py
|
donna-legal/allennlp
|
fd1e3cfaed07ec3ba03b922d12eee47f8be16837
|
[
"Apache-2.0"
] | 2
|
2019-12-04T16:55:13.000Z
|
2019-12-06T18:47:15.000Z
|
from overrides import overrides
from transformers.modeling_auto import AutoModel
import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("pretrained_transformer")
class PretrainedTransformerEmbedder(TokenEmbedder):
"""
Uses a pretrained model from ``transformers`` as a ``TokenEmbedder``.
"""
def __init__(self, model_name: str) -> None:
super().__init__()
self.transformer_model = AutoModel.from_pretrained(model_name)
# I'm not sure if this works for all models; open an issue on github if you find a case
# where it doesn't work.
self.output_dim = self.transformer_model.config.hidden_size
@overrides
def get_output_dim(self):
return self.output_dim
def forward(self, token_ids: torch.LongTensor) -> torch.Tensor: # type: ignore
return self.transformer_model(token_ids)[0]
| 32.964286
| 95
| 0.728061
|
5a610adcf6fb461d2175f7ca917d41c401c4b549
| 27,401
|
py
|
Python
|
whitebot.py
|
kittykatz9/whitebot
|
bed4b41800249286d95832fa15c49364831d34b5
|
[
"MIT"
] | null | null | null |
whitebot.py
|
kittykatz9/whitebot
|
bed4b41800249286d95832fa15c49364831d34b5
|
[
"MIT"
] | null | null | null |
whitebot.py
|
kittykatz9/whitebot
|
bed4b41800249286d95832fa15c49364831d34b5
|
[
"MIT"
] | 1
|
2018-10-09T18:58:49.000Z
|
2018-10-09T18:58:49.000Z
|
import discord
import datetime
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from discord.ext import commands
import json
pending = []
# https://discordapp.com/oauth2/authorize?client_id=498889371030781952&scope=bot&permissions=268512342
# https://discordapp.com/oauth2/authorize?client_id=498964999918583820&scope=bot&permissions=268512306
now = datetime.datetime.now()
with open("stats.json") as f:
stats = json.load(f)
with open("config.json") as f:
config = json.load(f)
with open("config.json") as f:
config = json.load(f)
TOKEN = config['LOGIN'][0]['TOKEN']
PREFIX = config['LOGIN'][0]['PREFIX']
print(PREFIX)
client = commands.Bot(command_prefix=PREFIX)
def save_stats_backup(stats):
with open("stats_backup.json", "w") as f:
f.write(json.dumps(stats))
def save_stats(stats):
with open("stats.json", "w") as f:
f.write(json.dumps(stats))
@client.event
async def on_ready():
print("Bot is Ready")
@client.event
async def on_message(message):
await client.process_commands(message)
@client.event
async def on_member_remove(member):
if member.mention in pending:
pending.remove(member.mention)
@client.event
async def on_member_join(member):
role = discord.utils.get(member.server.roles, name="pending")
await client.add_roles(member, role)
pending.append(member.mention)
@client.event
async def on_member_kick(member):
role = discord.utils.get(member.server.roles,
name="pending")
if member.mention in pending:
time = now.strftime("%Y-%m-%d %H:%M")
if role in member.roles:
y = str(now.year)
m = str(now.month)
d = str(now.day)
await client.kick(ctx.message.mentions[0])
stats['Statistics'][0]['Applicants'] += 1
stats['Statistics'][0]['Users Denied'] += 1
stats['Applied'][0][str(member)] = time
stats['Denied'][0][str(member)] = time
stats['Data Applied'][0][y][0][m][0][d] += 1
stats['Data Denied'][0][y][0][m][0][d] += 1
save_stats(stats)
await client.say("User " + member + " denied")
pending.remove(member)
@client.event
async def on_member_ban(member):
role = discord.utils.get(member.server.roles,
name="pending")
if member.mention in pending:
time = now.strftime("%Y-%m-%d %H:%M")
if role in member.roles:
y = str(now.year)
m = str(now.month)
d = str(now.day)
await client.kick(ctx.message.mentions[0])
stats['Statistics'][0]['Applicants'] += 1
stats['Statistics'][0]['Users Denied'] += 1
stats['Applied'][0][str(member)] = time
stats['Denied'][0][str(member)] = time
stats['Data Applied'][0][y][0][m][0][d] += 1
stats['Data Denied'][0][y][0][m][0][d] += 1
save_stats(stats)
await client.say("User " + member + " denied")
pending.remove(member)
@client.event
async def on_member_update(before, member):
role = discord.utils.get(member.server.roles,
name="pending")
if role in before.roles and role not in member.roles:
if member.mention in pending:
time = now.strftime("%Y-%m-%d %H:%M")
y = str(now.year)
m = str(now.month)
d = str(now.day)
await client.remove_roles(member, role)
stats['Statistics'][0]['Applicants'] += 1
stats['Statistics'][0]['Users Accepted'] += 1
stats['Applied'][0][str(member)] = time
stats['Accepted'][0][str(member)] = time
stats['Data Applied'][0][y][0][m][0][d] += 1
stats['Data Accepted'][0][y][0][m][0][d] += 1
save_stats(stats)
await client.say("User", member, "accepted")
pending.remove(member.mention)
@client.command(pass_context=True)
async def accept(ctx):
if ctx.message.author.server_permissions.administrator:
try:
if ctx.message.mentions[0] in ctx.message.server.members:
print("valid")
channel = discord.utils.get(ctx.message.server.channels,
id="498971465144860672")
if ctx.message.channel == channel:
role = discord.utils.get(ctx.message.server.roles,
name="pending")
member = ctx.message.mentions[0]
time = now.strftime("%Y-%m-%d %H:%M")
if role in member.roles:
y = str(now.year)
m = str(now.month)
d = str(now.day)
pending.remove(member)
await client.remove_roles(member, role)
stats['Statistics'][0]['Applicants'] += 1
stats['Statistics'][0]['Users Accepted'] += 1
stats['Applied'][0][str(member)] = time
stats['Accepted'][0][str(member)] = time
stats['Data Applied'][0][y][0][m][0][d] += 1
stats['Data Accepted'][0][y][0][m][0][d] += 1
save_stats(stats)
await client.say("User " + member + " accepted")
else:
await client.say("User Not pending...")
except IndexError:
await client.say("You must mention someone...")
else:
await client.say("You can't use this... Not admin...")
await client.delete_message(ctx.message)
@client.command(pass_context=True)
async def deny(ctx):
if ctx.message.author.server_permissions.administrator:
try:
if ctx.message.mentions[0] in ctx.message.server.members:
print("valid")
save_stats_backup(stats)
role = discord.utils.get(ctx.message.server.roles,
name="pending")
channel = discord.utils.get(ctx.message.server.channels,
id="498971465144860672")
if ctx.message.channel == channel:
member = ctx.message.mentions[0]
time = now.strftime("%Y-%m-%d %H:%M")
if role in member.roles:
y = str(now.year)
m = str(now.month)
d = str(now.day)
pending.remove(member)
await client.kick(ctx.message.mentions[0])
stats['Statistics'][0]['Applicants'] += 1
stats['Statistics'][0]['Users Denied'] += 1
stats['Applied'][0][str(member)] = time
stats['Denied'][0][str(member)] = time
stats['Data Applied'][0][y][0][m][0][d] += 1
stats['Data Denied'][0][y][0][m][0][d] += 1
save_stats(stats)
await client.say("User " + member + " denied")
else:
await client.say("User already denied him")
except IndexError:
await client.say("You must mention someone...")
else:
await client.say("You can't use this... Not admin...")
await client.delete_message(ctx.message)
# @client.command(pass_context=True)
# async def ping(ctx):
# now = datetime.datetime.utcnow()
# diff = str(ctx.message.timestamp - now)
# final = []
# for i in diff:
# if i == ":" or i == "0":
# continue
# else:
# final.append(i)
# final = ''.join(str(x) for x in final)
# embed = discord.Embed(title="Ping: " '{:.2f}ms'.format(float(final)*100))
# await client.say(embed=embed)
@client.command(pass_context=True)
async def showstats(ctx):
if ctx.message.author.server_permissions.administrator:
accepted = int(stats['Statistics'][0]['Users Accepted'])
denied = int(stats['Statistics'][0]['Users Denied'])
applied = int(stats['Statistics'][0]['Applicants'])
try:
acceptedP = (accepted/applied) * 100
deniedP = (denied/applied) * 100
except ZeroDivisionError:
acceptedP = 0
deniedP = 0
embed = discord.Embed(title="Statistics",
colour=discord.Colour(0xf0d434))
embed.add_field(name="Users Applied (total)",
value=applied)
embed.add_field(name="Users Accepted (total/percentage)",
value="{} / {:.2f}%".format(accepted, acceptedP))
embed.add_field(name="Users Denied (total/percentage)",
value="{} / {:.2f}%".format(denied, deniedP))
await client.say(embed=embed)
else:
await client.say("You can't use this... Not admin...")
await client.delete_message(ctx.message)
@client.command(pass_context=True)
async def showaccepted(ctx):
embed = discord.Embed(title="Statistics",
colour=discord.Colour(0xf0d434))
embed.set_author(name="Users Accepted",
icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
for i in stats['Accepted'][0]:
embed.add_field(name=i, value=stats['Accepted'][0][i])
await client.say(embed=embed)
await client.delete_message(ctx.message)
@client.command(pass_context=True)
async def showdenied(ctx):
embed = discord.Embed(title="Users Denied",
colour=discord.Colour(0xf0d434))
embed.set_author(name="Users Denied",
icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
for i in stats['Denied'][0]:
embed.add_field(name=i, value=stats['Denied'][0][i])
await client.say(embed=embed)
await client.delete_message(ctx.message)
@client.command(pass_context=True)
async def showapplied(ctx):
embed = discord.Embed(title="Users Applied",
colour=discord.Colour(0xf0d434))
embed.set_author(name="Users Applied",
icon_url="https://cdn.discordapp.com/embed/avatars/0.png")
for i in stats['Applied'][0]:
embed.add_field(name=i, value=stats['Applied'][0][i])
await client.say(embed=embed)
await client.delete_message(ctx.message)
@client.command(pass_context=True)
async def showgraph(ctx, *args):
"""
This command has 4 possible arguments:
There are 3 types of graphs:
Plot graphs
Pie graphs
Bar graph
To make a plot graph:
.showgraph [year] [month1] [month2] <plot>
All of these arguments are optional but give different outputs.
Example:
.showgraph 2018 (Shows a plot graph of the whole year)
.showgraph 2018 1 2 (Shows a plot graph of Jan and Feb)
.showgraph 2018 1 (Shows a plot graph for Jan)
.showgraph 2018 1 plot (Same as before)
.showgraph now (Shows a plot graph for the current month)
To make a Pie graph:
.showgraph pie
Pie graphs calculate using all the statistics, so there are no other options.
To make a Bar graph:
.showgraph [year] [month1] [month2] <bar>
Bar graphs only work with more than one month, so you have to always specify the months
unless you want to get a chart for the whole year.
Example:
.showgraph 2018 1 5 bar (Shows a bar graph for the months Jan through May)
.showgraph 2018 bar (Shows a bar graph for the whole year)
Any questions or improvements? Talk to Kitty ^^ xoxo
"""
m = 0
m2 = 0
try:
arg1 = args[0]
yr = arg1
arg2 = args[1]
m = arg2
arg3 = args[2]
m2 = arg3
arg4 = args[3]
except:
pass
if len(args) > 4:
return await client.say("Too many arguments...")
elif len(args) == 4:
try:
if int(arg1) > 2020 or int(arg1) < 2018 or int(arg2) > 12 or int(arg2) < 1 or int(arg3) > 12 or int(arg3) < 1:
return await client.say("Expected year between 2018 and 2020. Read up on instructions.")
else:
if str(arg4).lower() == 'plot':
graphtype = 'plot'
elif str(arg4).lower() == 'bar':
graphtype = 'bar'
elif str(arg4).lower() == 'pie':
graphtype = 'pie'
except TypeError:
print(arg1, arg2, arg3, arg4)
print(type(arg1), type(arg2), type(arg3), type(arg4))
return await client.say("Expected a number. Read up on instructions")
elif len(args) == 3:
try:
if int(arg1) > 2020 or int(arg1) < 2018 or int(arg2) > 12 or int(arg2) < 1:
return await client.say("Expected year between 2018 and 2020 and a month between 1 and 12\n. Read up on instructions.")
if str(arg3).lower() == 'plot':
graphtype = 'plot'
m2 = 0
elif str(arg3).lower() == 'bar':
graphtype = 'bar'
m2 = 0
elif str(arg3).lower() == 'pie':
graphtype = 'pie'
m2 = 0
else:
if int(arg3) > 12 or int(arg3) < 1:
return await client.say("Expected a month between 1 and 12 Read up on instructions.")
graphtype = 'plot'
m2 = 0
except TypeError:
return await client.say("Expected a month or a graph type. Read up on instructions")
except UnboundLocalError:
graphtype = 'plot'
m2 = 0
elif len(args) == 2:
try:
if int(arg1) > 2020 or int(arg1) < 2018:
return await client.say("Expected year between 2018 and 2020\n. Read up on instructions.")
if str(arg2).lower() == 'plot':
graphtype = 'plot'
elif str(arg2).lower() == 'bar':
graphtype = 'bar'
elif str(arg2).lower() == 'pie':
graphtype = 'pie'
else:
if arg3:
if int(arg3) > 12 or int(arg3) < 1:
return await client.say("Expected a year between 2018 and 2020. Read up on instructions.")
else:
graphtype = 'plot'
except TypeError:
return await client.say("Expected a year between 2018 and 2020 or a graph type. Read up on instructions")
except UnboundLocalError:
graphtype = 'plot'
except ValueError:
if str(arg2).lower() == 'plot':
graphtype = 'plot'
elif str(arg2).lower() == 'bar':
graphtype = 'bar'
elif str(arg2).lower() == 'pie':
graphtype = 'pie'
else:
graphtype = 'plot'
elif len(args) == 1:
try:
if int(arg1) > 2020 or int(arg1) < 2018:
return await client.say("Expected a year between 2018 and 2020. Read instructions")
graphtype = 'plot'
m = 1
m2 = 12
realmonth = 'January'
realmonth2 = 'December'
except TypeError:
if str(arg1).lower() == 'plot':
graphtype = plot
elif str(arg1).lower() == 'bar':
graphtype = 'bar'
elif str(arg1).lower() == 'pie':
graphtype = 'pie'
elif str(arg1).lower() == 'now':
yr = now.year
m = now.month
realmonth = now.strftime("%B")
m2 = 0
else:
return await client.say("Expected a year between 2018 and 2020 or a graph type. Read up on instructions.")
except ValueError:
if str(arg1).lower() == 'plot':
graphtype = 'plot'
elif str(arg1).lower() == 'bar':
graphtype = 'bar'
elif str(arg1).lower() == 'pie':
graphtype = 'pie'
else:
graphtype = 'plot'
else:
return await client.say("Type '{}help showgraph' if you are in doubt...".format(PREFIX))
if m == "1":
realmonth = "January"
if m2 == "1":
realmonth = "January"
if m == "2":
realmonth = "February"
if m2 == "2":
realmonth2 = "February"
if m == "3":
realmonth = "March"
if m2 == "3":
realmonth2 = "March"
if m == "4":
realmonth = "April"
if m2 == "4":
realmonth2 = "April"
if m == "5":
realmonth = "May"
if m2 == "5":
realmonth2 = "May"
if m == "6":
realmonth = "June"
if m2 == "6":
realmonth2 = "June"
if m == "7":
realmonth = "July"
if m2 == "7":
realmonth2 = "July"
if m == "8":
realmonth = "August"
if m2 == "8":
realmonth2 = "August"
if m == "9":
realmonth = "September"
if m2 == "9":
realmonth2 = "September"
if m == "10":
realmonth = "October"
if m2 == "10":
realmonth2 = "October"
if m == "11":
realmonth = "November"
if m2 == "11":
realmonth2 = "November"
if m == "12":
realmonth = "December"
if m2 == "12":
realmonth2 = "December"
channel = ctx.message.channel
bg_color = '#36393E'
fg_color = 'white'
if graphtype == "plot":
if m2 == 0:
x = []
y = []
print("m: ", m)
print(m2)
for i in stats['Data Applied'][0][str(yr)][0][str(m)][0]:
x.append(int(i))
y.append(stats['Data Applied'][0][str(yr)][0][str(m)][0][i])
ax = plt.figure().gca()
ax.plot(x, y)
# ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['right'].set_color('white')
ax.spines['left'].set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax.yaxis.label.set_color('white')
ax.xaxis.label.set_color('white')
ax.title.set_color('white')
ax.set_facecolor('#36393E')
plt.xlabel('Days of the Month', color='white')
plt.ylabel('Number of Applicants', color='white')
plt.title('Applicant Data for {}, {}'.format(realmonth, yr), color='white')
plt.plot(x, y, color='whitesmoke')
plt.savefig("plot.png", bbox_inches='tight', facecolor='#36393E')
with open("plot.png", "rb") as f:
await client.send_file(channel, f)
plt.clf()
plt.cla()
else:
if int(m) > int(m2):
return await client.say("Wrong order of months")
else:
x = []
y = []
for j in range(int(m), int(m2)+1):
for i in stats['Data Applied'][0][str(yr)][0][str(j)][0]:
y.append(stats['Data Applied'][0][str(yr)][0][str(j)][0][i])
x.append(int(j))
ax = plt.figure().gca()
ax.plot(x, y)
# ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['right'].set_color('white')
ax.spines['left'].set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax.yaxis.label.set_color('white')
ax.xaxis.label.set_color('white')
ax.title.set_color('white')
ax.set_facecolor('#36393E')
plt.xlabel('Months', color='whitesmoke')
plt.ylabel('Number of Applicants', color='white')
plt.title('Applicant Data for {}, between {} and {}'.format(
yr, realmonth, realmonth2), color='white')
plt.plot(x, y, color='whitesmoke')
plt.savefig("plot.png", bbox_inches='tight', facecolor='#36393E')
with open("plot.png", "rb") as f:
await client.send_file(channel, f)
plt.clf()
plt.cla()
if graphtype == "pie":
x = [stats['Statistics'][0]['Users Accepted']]
y = [stats['Statistics'][0]['Users Denied']]
slices = [x, y]
activities = ["Accepted", "Denied"]
cols = ['c', 'm']
plt.title('Total Applicant Data', color='white')
patches, texts, autotexts = plt.pie(slices, labels=activities, colors=cols,
startangle=90,
shadow=True,
explode=(0, 0),
autopct='%1.1f%%')
for text in texts:
text.set_color('white')
for autotext in autotexts:
autotext.set_color('black')
plt.savefig("pie.png", bbox_inches='tight', facecolor='#36393E')
with open("pie.png", "rb") as f:
await client.send_file(channel, f)
plt.clf()
plt.cla()
if graphtype == "bar":
print("M2: ", m2)
if m2 != 0:
accepted = []
denied = []
dates = []
applied = []
for j in range(int(m), int(m2)+1):
for i in stats['Data Accepted'][0][str(yr)][0][str(j)][0]:
accepted.append(stats['Data Accepted'][0][str(yr)][0][str(j)][0][i])
dates.append(int(j))
for j in range(int(m), int(m2)+1):
for i in stats['Data Denied'][0][str(yr)][0][str(j)][0]:
denied.append(stats['Data Denied'][0][str(yr)][0][str(j)][0][i])
for j in range(int(m), int(m2)+1):
for i in stats['Data Applied'][0][str(yr)][0][str(j)][0]:
applied.append(stats['Data Applied'][0][str(yr)][0][str(j)][0][i])
width = 0.35
print(dates)
ax = plt.figure().gca()
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['right'].set_color('white')
ax.spines['left'].set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax.yaxis.label.set_color('white')
ax.xaxis.label.set_color('white')
ax.title.set_color('white')
plt.xticks(dates, dates)
p1 = plt.bar(dates, accepted, width)
p2 = plt.bar(dates, denied, width, bottom=denied)
ax.set_facecolor('#36393E')
plt.ylabel('Users', color='white')
plt.title('Users Accepted/Denied\nPer months: {} - {}'.format(realmonth,
realmonth2), color='white')
plt.legend((p1[0], p2[0]), ('Accepted', 'Denied '))
plt.savefig("bar.png", bbox_inches='tight', facecolor='#36393E')
with open("bar.png", "rb") as f:
await client.send_file(channel, f)
plt.clf()
plt.cla()
else:
await client.say("For bar graph, you must specify 2 or more months")
@client.command(pass_context=True)
async def clear(ctx, number, age=None):
number = int(number)
counter = 0
if number > 100 and age is None:
secondcounter = number
while secondcounter > 100:
mgs = []
number = int(number)
async for x in client.logs_from(ctx.message.channel, limit=number):
mgs.append(x)
await client.delete_messages(mgs)
if secondcounter > 0:
mgs = []
number = int(number)
async for x in client.logs_from(ctx.message.channel, limit=number):
mgs.append(x)
await client.delete_messages(mgs)
else:
print("done cleaning of {} messages".format(number))
elif number < 100 and age is None:
mgs = []
number = int(number)
async for x in client.logs_from(ctx.message.channel, limit=number):
mgs.append(x)
await client.delete_messages(mgs)
else:
async for x in client.logs_from(ctx.message.channel, limit=number):
if counter < number:
await client.delete_message(x)
counter += 1
@client.command(pass_context=True)
async def resetstats(ctx):
save_stats_backup(stats)
for k in range(2018, 2021):
for j in range(1, 8, 2):
for i in range(1, 32):
stats['Data Applied'][0][str(k)][0][str(j)][0][str(i)] = 0
stats['Data Accepted'][0][str(k)][0][str(j)][0][str(i)] = 0
stats['Data Denied'][0][str(k)][0][str(j)][0][str(i)] = 0
for k in range(2018, 2021):
for j in range(4, 7, 2):
for i in range(1, 31):
stats['Data Applied'][0][str(k)][0][str(j)][0][str(i)] = 0
stats['Data Accepted'][0][str(k)][0][str(j)][0][str(i)] = 0
stats['Data Denied'][0][str(k)][0][str(j)][0][str(i)] = 0
for k in range(2018, 2021):
for j in range(8, 13, 2):
for i in range(1, 32):
stats['Data Applied'][0][str(k)][0][str(j)][0][str(i)] = 0
stats['Data Accepted'][0][str(k)][0][str(j)][0][str(i)] = 0
stats['Data Denied'][0][str(k)][0][str(j)][0][str(i)] = 0
for k in range(2018, 2021):
for j in range(9, 12, 2):
for i in range(1, 32):
stats['Data Applied'][0][str(k)][0][str(j)][0][str(i)] = 0
stats['Data Accepted'][0][str(k)][0][str(j)][0][str(i)] = 0
stats['Data Denied'][0][str(k)][0][str(j)][0][str(i)] = 0
for k in range(2018, 2021):
for i in range(1, 32):
stats['Data Applied'][0][str(k)][0]["2"][0][str(i)] = 0
stats['Data Accepted'][0][str(k)][0]["2"][0][str(i)] = 0
stats['Data Denied'][0][str(k)][0]["2"][0][str(i)] = 0
for element in list(stats['Applied'][0]):
del stats['Applied'][0][element]
for element in list(stats['Denied'][0]):
del stats['Denied'][0][element]
for element in list(stats['Accepted'][0]):
del stats['Accepted'][0][element]
stats['Statistics'][0]['Applicants'] = 0
stats['Statistics'][0]['Users Accepted'] = 0
stats['Statistics'][0]['Users Denied'] = 0
save_stats(stats)
client.run(TOKEN)
| 38.811615
| 135
| 0.513412
|
07c3af3b65c62f585f64625260dc2611c57e297e
| 1,396
|
py
|
Python
|
trace/google/cloud/trace_v1/types.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | null | null | null |
trace/google/cloud/trace_v1/types.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | 40
|
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
trace/google/cloud/trace_v1/types.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | 2
|
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.cloud.trace_v1.proto import trace_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
_shared_modules = [
empty_pb2,
timestamp_pb2,
]
_local_modules = [
trace_pb2,
]
names = []
for module in _shared_modules: # pragma: NO COVER
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.trace_v1.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| 27.372549
| 74
| 0.738539
|
7e1fed207803633be00841d46bc4dc68e3aec535
| 3,422
|
py
|
Python
|
setup.py
|
FJLendinez/django-ninja-knox
|
cf4790e52d39b0da678c43eef3fe80876fec3912
|
[
"MIT"
] | null | null | null |
setup.py
|
FJLendinez/django-ninja-knox
|
cf4790e52d39b0da678c43eef3fe80876fec3912
|
[
"MIT"
] | null | null | null |
setup.py
|
FJLendinez/django-ninja-knox
|
cf4790e52d39b0da678c43eef3fe80876fec3912
|
[
"MIT"
] | null | null | null |
# Always prefer setuptools over distutils
# To use a consistent encoding
from codecs import open
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-ninja-knox',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.2',
description='Authentication for django-ninja',
long_description=long_description,
long_description_content_type='text/markdown',
# The project's main homepage.
url='https://github.com/FJLendinez/django-ninja-knox/',
# Author details
author='Francisco Javier Lendínez Tirado',
author_email='fjlendinez@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Session',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='django-ninja authentication login',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(
exclude=['contrib', 'docs', 'tests*', 'knox_project']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['django', 'django-ninja', 'cryptography'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': [],
},
python_requires='>=3.7',
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
)
| 36.795699
| 94
| 0.679427
|
5c43288fec9a21a4d7a81f459187f8f61c547a6b
| 331
|
py
|
Python
|
activity/migrations/0009_remove_service_worship.py
|
welz-atm/RCCG-dominionSanctuary
|
46d9ad65370f783756152e731343626e39b1c686
|
[
"MIT"
] | null | null | null |
activity/migrations/0009_remove_service_worship.py
|
welz-atm/RCCG-dominionSanctuary
|
46d9ad65370f783756152e731343626e39b1c686
|
[
"MIT"
] | null | null | null |
activity/migrations/0009_remove_service_worship.py
|
welz-atm/RCCG-dominionSanctuary
|
46d9ad65370f783756152e731343626e39b1c686
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-09-21 15:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('activity', '0008_auto_20210921_1345'),
]
operations = [
migrations.RemoveField(
model_name='service',
name='worship',
),
]
| 18.388889
| 48
| 0.595166
|
817a9de7524c97bcbb3cc00457bd2e02688a0130
| 23,768
|
py
|
Python
|
tests/sampledata.py
|
pentest-a2p2v/pentest-a2p2v-core
|
d8dfee3312c656551f85587ed04770795090879d
|
[
"Apache-2.0"
] | 56
|
2021-06-09T23:40:08.000Z
|
2022-02-10T14:39:57.000Z
|
tests/sampledata.py
|
pentest-a2p2v/pentest-a2p2v-core
|
d8dfee3312c656551f85587ed04770795090879d
|
[
"Apache-2.0"
] | 3
|
2021-05-18T12:23:49.000Z
|
2022-03-03T16:27:57.000Z
|
tests/sampledata.py
|
pentest-a2p2v/pentest-a2p2v-core
|
d8dfee3312c656551f85587ed04770795090879d
|
[
"Apache-2.0"
] | 4
|
2021-06-16T01:38:24.000Z
|
2022-03-28T05:55:11.000Z
|
#!/usr/bin/env python3
#
# Copyright (C) 2018-2021 Toshiba Corporation and Peraton Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
from a2p2v.types import Capability
class NetworkGraph(dict):
def __init__(self, nodes={}):
self.nodes = nodes
@classmethod
def create_sample_graph(cls):
network_graph = NetworkGraph()
# Populate the dictionary
network_graph['SW1'] = {
'OPC-2': {'route': False},
'USER2-2': {'route': False},
'PLC-1': {'route': False},
}
network_graph['SW2'] = {
'HMI-2': {'route': False},
'OPC-1': {'route': False},
'USER2-1': {'route': False},
}
network_graph['SW3'] = {
'GW-2': {'route': False},
'USER1-1': {'route': False},
'HMI-1': {'route': False},
}
network_graph['HMI'] = {
'HMI-1': {'route': False},
'HMI-2': {'route': False},
}
network_graph['HMI-1'] = {
'HMI': {'route': False},
'SW3': {'route': False},
}
network_graph['HMI-2'] = {
'HMI': {'route': False},
'SW2': {'route': False},
}
network_graph['USER1'] = {'USER1-1': {'route': False}}
network_graph['USER1-1'] = {
'USER1': {'route': False},
'SW3': {'route': False},
}
network_graph['USER2'] = {
'USER2-1': {'route': False},
'USER2-2': {'route': False},
}
network_graph['USER2-1'] = {
'USER2': {'route': False},
'SW2': {'route': False},
}
network_graph['USER2-2'] = {
'USER2': {'route': False},
'SW1': {'route': False},
}
network_graph['GW'] = {
'GW-1': {'route': False},
'GW-2': {'route': False},
}
network_graph['GW-1'] = {
'GW': {'route': False},
'EX_CLOUD': {'route': False},
}
network_graph['GW-2'] = {
'GW': {'route': False},
'SW3': {'route': False},
}
network_graph['EX_CLOUD'] = {
'ATTACKER-1': {'route': False},
'GW-1': {'route': False},
}
network_graph['ATTACKER'] = {'ATTACKER-1': {'route': False}}
network_graph['ATTACKER-1'] = {
'ATTACKER': {'route': False},
'EX_CLOUD': {'route': False},
}
network_graph['OPC'] = {
'OPC-1': {'route': False},
'OPC-2': {'route': False},
}
network_graph['OPC-1'] = {
'OPC': {'route': False},
'SW2': {'route': False},
}
network_graph['OPC-2'] = {
'OPC': {'route': False},
'SW1': {'route': False},
}
network_graph['PLC'] = {'PLC-1': {'route': False}}
network_graph['PLC-1'] = {
'PLC': {'route': False},
'SW1': {'route': False},
}
# Populate the nodes dictionary
network_graph.nodes['SW1'] = {
'node_id': 'SW1',
'node_passive': True,
'nodeType': 'switch',
}
network_graph.nodes['SW2'] = {
'node_id': 'SW2',
'node_passive': True,
'nodeType': 'switch',
}
network_graph.nodes['SW3'] = {
'node_id': 'SW3',
'node_passive': True,
'nodeType': 'switch',
}
network_graph.nodes['HMI'] = {
'node_id': 'HMI',
'node_passive': False,
'nodeType': 'host',
}
network_graph.nodes['HMI-1'] = {
'node_id': '192.168.30.101',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['HMI-2'] = {
'node_id': '192.168.20.101',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['USER1'] = {
'node_id': 'USER1',
'node_passive': False,
'nodeType': 'host',
}
network_graph.nodes['USER1-1'] = {
'node_id': '192.168.30.102',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['USER2'] = {
'node_id': 'USER2',
'node_passive': False,
'nodeType': 'host',
}
network_graph.nodes['USER2-1'] = {
'node_id': '192.168.20.103',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['USER2-2'] = {
'node_id': '192.168.10.103',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['GW'] = {
'node_id': 'GW',
'node_passive': False,
'nodeType': 'host',
}
network_graph.nodes['GW-1'] = {
'node_id': '172.16.1.1',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['GW-2'] = {
'node_id': '192.168.30.1',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['EX_CLOUD'] = {
'node_id': 'EX_CLOUD',
'node_passive': True,
'nodeType': 'switch',
}
network_graph.nodes['ATTACKER'] = {
'node_id': 'ATTACKER',
'node_passive': False,
'nodeType': 'host',
}
network_graph.nodes['ATTACKER-1'] = {
'node_id': '192.168.30.200',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['OPC'] = {
'node_id': 'OPC',
'node_passive': False,
'nodeType': 'host',
}
network_graph.nodes['OPC-1'] = {
'node_id': '192.168.20.104',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['OPC-2'] = {
'node_id': '192.168.10.104',
'node_passive': True,
'nodeType': 'interface',
}
network_graph.nodes['PLC'] = {
'node_id': 'PLC',
'node_passive': False,
'nodeType': 'host',
}
network_graph.nodes['PLC-1'] = {
'node_id': '192.168.10.250',
'node_passive': True,
'nodeType': 'interface',
}
return network_graph
def get_hosts_db():
hosts_db = {
'HMI': {
'interfaces': {('192.168.30.101', ''), ('192.168.20.101', '')},
'capabilities': [
'smb',
'exploit/windows/smb/ms10_061_spoolss',
'exploit/windows/smb/ms17_010_psexec',
'rdp',
],
},
'USER1': {
'interfaces': {('192.168.30.102', '')},
'capabilities': [
'rdp',
'smb',
'exploit/windows/smb/ms17_010_psexec',
],
},
'USER2': {
'interfaces': {('192.168.10.103', ''), ('192.168.20.103', '')},
'capabilities': [
'rdp',
'smb',
'exploit/windows/smb/ms17_010_psexec',
],
},
'GW': {
'interfaces': {('192.168.30.1', ''), ('172.16.1.1', '')},
'capabilities': ['ssh'],
},
'ATTACKER': {
'interfaces': {('192.168.30.200', '')},
'capabilities': [],
},
'OPC': {
'interfaces': {('192.168.20.104', ''), ('192.168.10.104', '')},
'capabilities': [
'rdp',
'smb',
'exploit/windows/smb/ms17_010_psexec',
],
},
'PLC': {
'interfaces': {('192.168.10.250', '')},
'capabilities': [
'modbus',
'modbus.write_register',
'modbus.write_register.change_temp',
],
},
}
return hosts_db
# Capabilities dictionary
def get_capabilities():
capabilities = {
'remote_code_execution': Capability(
capability_id="remote_code_execution",
title="remote_code_execution",
capability_class="exploit",
subclass="",
severity="high",
solution="",
rank="",
cves="",
preconditions=[],
actions=[],
postconditions=[
{
'type': 'state',
'key': 'current_access',
'value': 'metasploit',
},
{
'type': 'state',
'key': 'current_host',
'value': '$target_host',
},
{'type': 'state', 'key': 'current_role', 'value': 'admin'},
],
cvss=10,
),
'privilege_escalation': Capability(
capability_id="privilege_escalation",
title="privilege_escalation",
capability_class="exploit",
subclass="",
severity="high",
solution="",
rank="",
cves="",
preconditions=[
{
'host': '$target_host',
'type': 'state',
'key': 'current_access',
'value': 'shell',
}
],
actions=[],
postconditions=[
{'type': 'state', 'key': 'current_role', 'value': 'admin'}
],
cvss=10,
),
'smb': Capability(
capability_id="smb",
title="smb",
capability_class="service",
subclass="smb",
severity="low",
solution="Consider disabling or restricting access to file sharing on hosts",
rank="normal",
cves="",
preconditions=[
{
'host': '$current_host',
'type': 'state',
'key': 'current_access',
'value': 'shell',
},
{
'host': '$target_host',
'type': 'credential',
'key': 'role',
'value': 'user',
},
{
'host': '$target_host',
'type': 'service',
'key': 'port',
'value': 'tcp/445',
},
],
actions=[
{
'type': 'shell_command',
'key': 'SMB Client',
'value': 'smbclient -U $username -P $password //$target_host/C$',
}
],
postconditions=[],
cvss=10,
),
'rdp': Capability(
capability_id="rdp",
title="rdp",
capability_class="service",
subclass="rdp",
severity="low",
solution="Ensure secure passwords are used",
rank="normal",
cves="",
preconditions=[
{
'host': '$current_host',
'type': 'state',
'key': 'current_access',
'value': 'shell',
},
{
'host': '$target_host',
'type': 'credential',
'key': 'role',
'value': 'user',
},
{
'host': '$target_host',
'type': 'service',
'key': 'port',
'value': 'tcp/3389',
},
],
actions=[
{
'type': 'shell_command',
'key': 'Remote Desktop',
'value': 'rdesktop -u $username -p $password $target_host',
}
],
postconditions=[
{'type': 'state', 'key': 'current_access', 'value': 'desktop'},
{
'type': 'state',
'key': 'current_host',
'value': '$target_host',
},
{'type': 'state', 'key': 'current_role', 'value': '$role'},
],
cvss=10,
),
'ssh': Capability(
capability_id="ssh",
title="ssh",
capability_class="service",
subclass="ssh",
severity="none",
solution="Ensure secure passwords are used",
rank="normal",
cves="",
preconditions=[
{
'host': '$current_host',
'type': 'state',
'key': 'current_access',
'value': 'shell',
},
{
'host': '$target_host',
'type': 'credential',
'key': 'role',
'value': 'user',
},
{
'host': '$target_host',
'type': 'service',
'key': 'port',
'value': 'tcp/22',
},
],
actions=[
{
'type': 'metasploit',
'key': 'parameters',
'value': 'module=auxiliary/scanner/ssh/ssh_login rhosts=$target_host username=$username password=$password',
}
],
postconditions=[
{'type': 'state', 'key': 'current_access', 'value': 'shell'},
{
'type': 'state',
'key': 'current_host',
'value': '$target_host',
},
{'type': 'state', 'key': 'current_role', 'value': '$role'},
],
cvss=10,
),
'modbus': Capability(
capability_id="modbus",
title="modbus",
capability_class="service",
subclass="modbus",
severity="high",
solution="Restrict access to this sensitive, insecure service.",
rank="excellent",
cves="",
preconditions=[
{
'host': '$current_host',
'type': 'state',
'key': 'current_access',
'value': 'shell',
},
{
'host': '$target_host',
'type': 'service',
'key': 'port',
'value': 'tcp/502',
},
],
actions=[
{
'type': 'metasploit',
'key': 'parameters',
'value': 'module=auxiliary/scanner/scada/modbusclient rhost=$target_host action=$action data_address=$data_address data_registers=$data_registers unit_number=$unit_number',
}
],
postconditions=[],
cvss=10,
),
'modbus.write_register': Capability(
capability_id="modbus.write_register",
title="modbus",
capability_class="service",
subclass="modbus.write_register",
severity="high",
solution="Restrict access to this sensitive, insecure service.",
rank="excellent",
cves="",
preconditions=[
{
'host': '$target_host',
'type': 'capability',
'key': 'capability_id',
'value': 'modbus',
}
],
actions=[
{
'type': 'capability',
'key': 'parameters',
'value': 'action=WRITE_REGISTERS',
}
],
postconditions=[],
cvss=10,
),
'modbus.write_register.change_temp': Capability(
capability_id="modbus.write_register.change_temp",
title="modbus",
capability_class="service",
subclass="modbus.write_register.change_temp",
severity="high",
solution="Restrict access to this sensitive, insecure service.",
rank="excellent",
cves="",
preconditions=[
{
'host': '$target_host',
'type': 'capability',
'key': 'capability_id',
'value': 'modbus.write_register',
}
],
actions=[
{
'type': 'capability',
'key': 'parameters',
'value': 'data_address=2 data_registers=30 unit_number=255',
}
],
postconditions=[],
cvss=10,
),
'exploit/windows/smb/ms17_010_psexec': Capability(
capability_id="exploit/windows/smb/ms17_010_psexec",
title="MS17-010 EternalRomance/EternalSynergy/EternalChampion SMB Remote Windows Code Execution",
capability_class="exploit",
subclass="remote_code_execution",
severity="high",
solution="Apply Microsoft patches",
rank="normal",
cves="CVE-2017-0143,CVE-2017-0146,CVE-2017-0147",
preconditions=[],
actions=[
{
'type': 'metasploit',
'key': 'remote_code_execution',
'value': 'module=exploit/windows/smb/ms17_010_psexec rhosts=$target_host lport=4444',
}
],
postconditions=[
{
'type': 'state',
'key': 'current_access',
'value': 'metasploit',
},
{
'type': 'state',
'key': 'current_host',
'value': '$target_host',
},
{'type': 'state', 'key': 'current_role', 'value': 'admin'},
],
cvss=10,
),
'exploit/windows/smb/ms10_061_spoolss': Capability(
capability_id="exploit/windows/smb/ms10_061_spoolss",
title="MS10-061 Microsoft Print Spooler Service Impersonation Vulnerability",
capability_class="exploit",
subclass="remote_code_execution",
severity="high",
solution="Microsoft has released a set of patches for Windows XP, 2003, Vista, 2008, 7, and 2008 R2.",
rank="excellent",
cves="CVE-2010-2729",
preconditions=[],
actions=[
{
'type': 'metasploit',
'key': 'remote_code_execution',
'value': 'module=exploit/windows/smb/ms10_061_spoolss rhosts=$target_host',
}
],
postconditions=[
{
'type': 'state',
'key': 'current_access',
'value': 'metasploit',
},
{
'type': 'state',
'key': 'current_host',
'value': '$target_host',
},
{'type': 'state', 'key': 'current_role', 'value': 'admin'},
],
cvss=10,
),
}
return capabilities
def create_sample_config():
# Create a configuration object
config = configparser.ConfigParser()
# Create the configuration sections
for section in [
'INPUT',
'PLANNING',
'INITIAL CONDITIONS',
'GOALS',
'SENSITIVE',
'METASPLOIT',
]:
config.add_section(section)
# Populate the INPUT section
config.set('INPUT', 'default_datafile', 'lab_config/simple.nessus')
config.set('INPUT', 'default_netfile', 'lab_config/networkGraph.xml')
# Populate the PLANNING sesction
config.set(
'PLANNING',
'score_weights',
'{"NUM_EXPLOITS":-0.5, "LENGTH":-0.5, "NUM_SERVICES":0.2, "SEVERITY":-0.2}',
)
# Populate the INITIAL CONDITIONS section
config.set(
'INITIAL CONDITIONS',
'initial_condition1',
'{"host":"attacker", "type":"state", "key":"initial_host", "value":"attacker"}',
)
config.set(
'INITIAL CONDITIONS',
'initial_condition2',
'{"host":"attacker", "type":"state", "key":"current_role", "value":"admin"}',
)
config.set(
'INITIAL CONDITIONS',
'initial_condition3',
'{"host":"attacker", "type":"state", "key":"current_access", "value":"metasploit"}',
)
config.set(
'INITIAL CONDITIONS',
'initial_condition4',
'{"host":"gw", "type":"credential", "username":"username", "password":"password", "role":"user"}',
)
config.set(
'INITIAL CONDITIONS',
'initial_condition5',
'{"host":"192.168.0.115", "type":"credential", "username":"testuser", "password":"welcome1", "role":"user"}',
)
# Populate the GOALS section
config.set(
'GOALS',
'change_temperature',
'{"type":"state", "key":"current_status", "value":"change_temp"}',
)
# Populate the SENSITIVE section
config.set(
'SENSITIVE',
'sensitive1',
'{"type":"state", "key":"current_host", "value":"OPC"}',
)
config.set(
'SENSITIVE',
'sensitive2',
'{"type":"state", "key":"current_status", "value":"change_temp"}',
)
# Populate the METASPLOIT section
config.set('METASPLOIT', 'host', '127.0.0.1')
config.set('METASPLOIT', 'port', '55552')
config.set('METASPLOIT', 'user', 'msf')
config.set('METASPLOIT', 'password', 'welcome1')
config.set('METASPLOIT', 'payload', 'example_payload')
return config
initial_conditions = {
'initial_condition1': '{"host":"attacker", "type":"state", "key":"initial_host", "value":"attacker"}',
'initial_condition2': '{"host":"attacker", "type":"state", "key":"current_role", "value":"admin"}',
'initial_condition3': '{"host":"attacker", "type":"state", "key":"current_access", "value":"metasploit"}',
'initial_condition4': '{"host":"gw", "type":"credential", "username":"username", "password":"password", "role":"user"}',
'initial_condition5': '{"host":"192.168.0.115", "type":"credential", "username":"testuser", "password":"welcome1", "role":"user"}',
}
goal_conditions = [
{
'id': 'goal_condition1',
"type": "state",
"key": "current_status",
"value": "change_temp",
},
]
sample_network_graph = NetworkGraph.create_sample_graph()
sample_hosts_db = get_hosts_db()
sample_config = create_sample_config()
if __name__ == '__main__':
for capability_name in capabilities.keys():
print(f'capability: {capability_name}')
| 32.337415
| 192
| 0.436595
|
00cd971e21c2a17e3ba268837961686a4247125a
| 2,964
|
py
|
Python
|
sdk/textanalytics/azure-ai-textanalytics/samples/sample_get_detailed_diagnostics_information.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/textanalytics/azure-ai-textanalytics/samples/sample_get_detailed_diagnostics_information.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/textanalytics/azure-ai-textanalytics/samples/sample_get_detailed_diagnostics_information.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_get_detailed_diagnostics_information.py
DESCRIPTION:
This sample demonstrates how to retrieve batch statistics, the
model version used, and the raw response in JSON format returned from the service.
USAGE:
python sample_get_detailed_diagnostics_information.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your Text Analytics subscription key
"""
import os
import logging
import json
_LOGGER = logging.getLogger(__name__)
class GetDetailedDiagnosticsInformationSample(object):
def get_detailed_diagnostics_information(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import TextAnalyticsClient
endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]
# This client will log detailed information about its HTTP sessions, at DEBUG level
text_analytics_client = TextAnalyticsClient(endpoint=endpoint, credential=AzureKeyCredential(key), logging_enable=True)
documents = [
"I had the best day of my life.",
"This was a waste of my time. The speaker put me to sleep.",
"No tengo dinero ni nada que dar...",
"L'hôtel n'était pas très confortable. L'éclairage était trop sombre."
]
json_responses = []
def callback(resp):
_LOGGER.debug("document_count: {}".format(resp.statistics["document_count"]))
_LOGGER.debug("valid_document_count: {}".format(resp.statistics["valid_document_count"]))
_LOGGER.debug("erroneous_document_count: {}".format(resp.statistics["erroneous_document_count"]))
_LOGGER.debug("transaction_count: {}".format(resp.statistics["transaction_count"]))
_LOGGER.debug("model_version: {}".format(resp.model_version))
json_response = json.dumps(resp.raw_response)
json_responses.append(json_response)
result = text_analytics_client.extract_key_phrases(
documents,
show_stats=True,
model_version="latest",
raw_response_hook=callback
)
for doc in result:
_LOGGER.warning("Doc with id {} has these warnings: {}".format(doc.id, doc.warnings))
_LOGGER.debug("json response: {}".format(json_responses[0]))
if __name__ == '__main__':
sample = GetDetailedDiagnosticsInformationSample()
sample.get_detailed_diagnostics_information()
| 39.52
| 127
| 0.670715
|
2c48a32a3dd63aa84092ff1e2c953991fc7e076d
| 689
|
py
|
Python
|
src/shared/fileloading.py
|
sr-lab/pyrrho
|
8e05101d6bea97872bf8386ff1aa4a86e259ffd1
|
[
"MIT"
] | null | null | null |
src/shared/fileloading.py
|
sr-lab/pyrrho
|
8e05101d6bea97872bf8386ff1aa4a86e259ffd1
|
[
"MIT"
] | null | null | null |
src/shared/fileloading.py
|
sr-lab/pyrrho
|
8e05101d6bea97872bf8386ff1aa4a86e259ffd1
|
[
"MIT"
] | null | null | null |
def load_file_lines (file):
""" Loads a file as a list of lines.
Args:
file (str): The path of the file.
Returns:
list of str: A list of lines in the file.
"""
data = []
with open(file, mode='r') as target:
for line in target:
data.append(line.rstrip('\n'))
return data
def load_float_file (file):
""" Loads a data file of newline-delimited floating-point values.
Args:
file (str): The path of the file.
Returns:
list of float: The data from the file.
"""
data = []
with open(file, 'r') as target:
for entry in target:
data.append(float(entry))
return data
| 23.758621
| 69
| 0.56894
|
2c16a3f84338d3792e22c9851882e63f18e39c34
| 4,137
|
py
|
Python
|
recsys19_hybridsvd/polara-master/polara/recommender/external/lightfm/lightfmwrapper.py
|
chenzheng128/SoRec
|
deb045e3b9ce575671d08e15ec5a011b49e4d45f
|
[
"Apache-2.0"
] | 1
|
2020-08-15T09:57:34.000Z
|
2020-08-15T09:57:34.000Z
|
recsys19_hybridsvd/polara-master/polara/recommender/external/lightfm/lightfmwrapper.py
|
chenzheng128/SoRec
|
deb045e3b9ce575671d08e15ec5a011b49e4d45f
|
[
"Apache-2.0"
] | null | null | null |
recsys19_hybridsvd/polara-master/polara/recommender/external/lightfm/lightfmwrapper.py
|
chenzheng128/SoRec
|
deb045e3b9ce575671d08e15ec5a011b49e4d45f
|
[
"Apache-2.0"
] | 1
|
2021-01-12T07:32:47.000Z
|
2021-01-12T07:32:47.000Z
|
# python 2/3 interoperability
from __future__ import print_function
import numpy as np
from numpy.lib.stride_tricks import as_strided
from lightfm import LightFM
from polara.recommender.models import RecommenderModel
from polara.lib.similarity import stack_features
from polara.tools.timing import track_time
class LightFMWrapper(RecommenderModel):
def __init__(self, *args, item_features=None, user_features=None, **kwargs):
super(LightFMWrapper, self).__init__(*args, **kwargs)
self.method='LightFM'
self.rank = 10
self.fit_method = 'fit'
self.item_features = item_features
self.item_feature_labels = None
self.item_alpha = 0.0
self.item_identity = True
self._item_features_csr = None
self.user_features = user_features
self.user_feature_labels = None
self.user_alpha = 0.0
self.user_identity = True
self._user_features_csr = None
self.loss = 'warp'
self.learning_schedule = 'adagrad'
self.learning_rate = 0.05
self.max_sampled = 10
self.seed = 0
self._model = None
def build(self):
self._model = LightFM(no_components=self.rank,
item_alpha=self.item_alpha,
user_alpha=self.user_alpha,
loss=self.loss,
learning_rate=self.learning_rate,
learning_schedule=self.learning_schedule,
max_sampled=self.max_sampled,
random_state=self.seed)
fit = getattr(self._model, self.fit_method)
matrix = self.get_training_matrix()
if self.item_features is not None:
item_features = self.item_features.reindex(self.data.index.itemid.old.values, fill_value=[])
self._item_features_csr, self.item_feature_labels = stack_features(item_features,
add_identity=self.item_identity,
normalize=True,
dtype='f4')
if self.user_features is not None:
user_features = self.user_features.reindex(self.data.index.userid.training.old.values, fill_value=[])
self._user_features_csr, self.user_feature_labels = stack_features(user_features,
add_identity=self.user_identity,
normalize=True,
dtype='f4')
with track_time(self.training_time, verbose=self.verbose, model=self.method):
fit(matrix, item_features=self._item_features_csr, user_features=self._user_features_csr)
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
if self.data.warm_start:
raise NotImplementedError
slice_data = self._slice_test_data(test_data, start, stop)
all_items = self.data.index.itemid.new.values
n_users = stop - start
n_items = len(all_items)
# use stride tricks to avoid unnecessary copies of repeated indices
# have to conform with LightFM's dtype to avoid additional copies
itemsize = np.dtype('i4').itemsize
useridx = as_strided(test_users[start:stop].astype('i4', copy=False),
(n_users, n_items), (itemsize, 0))
itemidx = as_strided(all_items.astype('i4', copy=False),
(n_users, n_items), (0, itemsize))
scores = self._model.predict(useridx.ravel(), itemidx.ravel(),
user_features=self._user_features_csr,
item_features=self._item_features_csr
).reshape(n_users, n_items)
return scores, slice_data
| 45.966667
| 113
| 0.564902
|
6b88e3f604f15e2a12f5cb4f9dbbfc2bff039b14
| 30,920
|
py
|
Python
|
include/HydrusConstants.py
|
sakharovaan/hydrus
|
443c8932947d0c786435bcd53911536e0bdce7e8
|
[
"WTFPL"
] | 3
|
2019-06-19T09:29:50.000Z
|
2019-12-18T14:17:21.000Z
|
include/HydrusConstants.py
|
sakharovaan/hydrus-unleashed
|
443c8932947d0c786435bcd53911536e0bdce7e8
|
[
"WTFPL"
] | 4
|
2019-02-03T14:43:05.000Z
|
2019-06-26T20:59:13.000Z
|
include/HydrusConstants.py
|
sakharovaan/hydrus
|
443c8932947d0c786435bcd53911536e0bdce7e8
|
[
"WTFPL"
] | null | null | null |
import os
import sys
# dirs
if getattr( sys, 'frozen', False ):
RUNNING_FROM_FROZEN_BUILD = True
# we are in a pyinstaller frozen app
BASE_DIR = getattr( sys, '_MEIPASS', None )
if BASE_DIR is None:
raise Exception( 'It seems this hydrus is running from a frozen bundle, but there was no _MEIPASS variable under sys to define the bundle directory.' )
else:
RUNNING_FROM_FROZEN_BUILD = False
BASE_DIR = sys.path[0]
if BASE_DIR == '':
BASE_DIR = os.getcwd()
PLATFORM_WINDOWS = False
PLATFORM_OSX = False
PLATFORM_LINUX = False
if sys.platform == 'win32': PLATFORM_WINDOWS = True
elif sys.platform == 'darwin': PLATFORM_OSX = True
elif sys.platform == 'linux': PLATFORM_LINUX = True
RUNNING_FROM_SOURCE = sys.argv[0].endswith( '.py' ) or sys.argv[0].endswith( '.pyw' )
RUNNING_FROM_OSX_APP = os.path.exists( os.path.join( BASE_DIR, 'running_from_app' ) )
BIN_DIR = os.path.join( BASE_DIR, 'bin' )
HELP_DIR = os.path.join( BASE_DIR, 'help' )
INCLUDE_DIR = os.path.join( BASE_DIR, 'include' )
STATIC_DIR = os.path.join( BASE_DIR, 'static' )
DEFAULT_DB_DIR = os.path.join( BASE_DIR, 'db' )
if PLATFORM_OSX:
USERPATH_DB_DIR = os.path.join( os.path.expanduser( '~' ), 'Library', 'Hydrus' )
else:
USERPATH_DB_DIR = os.path.join( os.path.expanduser( '~' ), 'Hydrus' )
LICENSE_PATH = os.path.join( BASE_DIR, 'license.txt' )
#
import traceback
import yaml
options = {}
# Misc
NETWORK_VERSION = 18
SOFTWARE_VERSION = 357
CLIENT_API_VERSION = 8
SERVER_THUMBNAIL_DIMENSIONS = ( 200, 200 )
HYDRUS_KEY_LENGTH = 32
UPDATE_DURATION = 100000
READ_BLOCK_SIZE = 256 * 1024
lifetimes = [ ( 'one month', 31 * 86400 ), ( 'three months', 3 * 31 * 86400 ), ( 'six months', 6 * 31 * 86400 ), ( 'one year', 12 * 31 * 86400 ), ( 'two years', 24 * 31 * 86400 ), ( 'five years', 60 * 31 * 86400 ), ( 'does not expire', None ) ]
# Enums
BANDWIDTH_TYPE_DATA = 0
BANDWIDTH_TYPE_REQUESTS = 1
bandwidth_type_string_lookup = {}
bandwidth_type_string_lookup[ BANDWIDTH_TYPE_DATA ] = 'data'
bandwidth_type_string_lookup[ BANDWIDTH_TYPE_REQUESTS ] = 'requests'
CONTENT_MERGE_ACTION_COPY = 0
CONTENT_MERGE_ACTION_MOVE = 1
CONTENT_MERGE_ACTION_TWO_WAY_MERGE = 2
content_merge_string_lookup = {}
content_merge_string_lookup[ CONTENT_MERGE_ACTION_COPY ] = 'copy from worse to better'
content_merge_string_lookup[ CONTENT_MERGE_ACTION_MOVE ] = 'move from worse to better'
content_merge_string_lookup[ CONTENT_MERGE_ACTION_TWO_WAY_MERGE ] = 'copy in both directions'
CONTENT_STATUS_CURRENT = 0
CONTENT_STATUS_PENDING = 1
CONTENT_STATUS_DELETED = 2
CONTENT_STATUS_PETITIONED = 3
content_status_string_lookup = {}
content_status_string_lookup[ CONTENT_STATUS_CURRENT ] = 'current'
content_status_string_lookup[ CONTENT_STATUS_PENDING ] = 'pending'
content_status_string_lookup[ CONTENT_STATUS_DELETED ] = 'deleted'
content_status_string_lookup[ CONTENT_STATUS_PETITIONED ] = 'petitioned'
CONTENT_TYPE_MAPPINGS = 0
CONTENT_TYPE_TAG_SIBLINGS = 1
CONTENT_TYPE_TAG_PARENTS = 2
CONTENT_TYPE_FILES = 3
CONTENT_TYPE_RATINGS = 4
CONTENT_TYPE_MAPPING = 5
CONTENT_TYPE_DIRECTORIES = 6
CONTENT_TYPE_URLS = 7
CONTENT_TYPE_VETO = 8
CONTENT_TYPE_ACCOUNTS = 9
CONTENT_TYPE_OPTIONS = 10
CONTENT_TYPE_SERVICES = 11
CONTENT_TYPE_UNKNOWN = 12
CONTENT_TYPE_ACCOUNT_TYPES = 13
CONTENT_TYPE_VARIABLE = 14
CONTENT_TYPE_HASH = 15
CONTENT_TYPE_TIMESTAMP = 16
CONTENT_TYPE_TITLE = 17
CONTENT_TYPE_NOTES = 18
CONTENT_TYPE_FILE_VIEWING_STATS = 19
content_type_string_lookup = {}
content_type_string_lookup[ CONTENT_TYPE_MAPPINGS ] = 'mappings'
content_type_string_lookup[ CONTENT_TYPE_TAG_SIBLINGS ] = 'tag siblings'
content_type_string_lookup[ CONTENT_TYPE_TAG_PARENTS ] = 'tag parents'
content_type_string_lookup[ CONTENT_TYPE_FILES ] = 'files'
content_type_string_lookup[ CONTENT_TYPE_RATINGS ] = 'ratings'
content_type_string_lookup[ CONTENT_TYPE_MAPPING ] = 'mapping'
content_type_string_lookup[ CONTENT_TYPE_DIRECTORIES ] = 'directories'
content_type_string_lookup[ CONTENT_TYPE_URLS ] = 'urls'
content_type_string_lookup[ CONTENT_TYPE_VETO ] = 'veto'
content_type_string_lookup[ CONTENT_TYPE_ACCOUNTS ] = 'accounts'
content_type_string_lookup[ CONTENT_TYPE_OPTIONS ] = 'options'
content_type_string_lookup[ CONTENT_TYPE_SERVICES ] = 'services'
content_type_string_lookup[ CONTENT_TYPE_UNKNOWN ] = 'unknown'
content_type_string_lookup[ CONTENT_TYPE_ACCOUNT_TYPES ] = 'account types'
content_type_string_lookup[ CONTENT_TYPE_VARIABLE ] = 'variable'
content_type_string_lookup[ CONTENT_TYPE_HASH ] = 'hash'
content_type_string_lookup[ CONTENT_TYPE_TIMESTAMP ] = 'timestamp'
content_type_string_lookup[ CONTENT_TYPE_TITLE ] = 'title'
content_type_string_lookup[ CONTENT_TYPE_NOTES ] = 'notes'
content_type_string_lookup[ CONTENT_TYPE_FILE_VIEWING_STATS ] = 'file viewing stats'
REPOSITORY_CONTENT_TYPES = [ CONTENT_TYPE_FILES, CONTENT_TYPE_MAPPINGS, CONTENT_TYPE_TAG_PARENTS, CONTENT_TYPE_TAG_SIBLINGS ]
CONTENT_UPDATE_ADD = 0
CONTENT_UPDATE_DELETE = 1
CONTENT_UPDATE_PEND = 2
CONTENT_UPDATE_RESCIND_PEND = 3
CONTENT_UPDATE_PETITION = 4
CONTENT_UPDATE_RESCIND_PETITION = 5
CONTENT_UPDATE_EDIT_LOG = 6
CONTENT_UPDATE_ARCHIVE = 7
CONTENT_UPDATE_INBOX = 8
CONTENT_UPDATE_RATING = 9
CONTENT_UPDATE_DENY_PEND = 11
CONTENT_UPDATE_DENY_PETITION = 12
CONTENT_UPDATE_ADVANCED = 13
CONTENT_UPDATE_UNDELETE = 14
CONTENT_UPDATE_SET = 15
CONTENT_UPDATE_FLIP = 16
content_update_string_lookup = {}
content_update_string_lookup[ CONTENT_UPDATE_ADD ] = 'add'
content_update_string_lookup[ CONTENT_UPDATE_DELETE ] = 'delete'
content_update_string_lookup[ CONTENT_UPDATE_PEND ] = 'pending'
content_update_string_lookup[ CONTENT_UPDATE_RESCIND_PEND ] = 'rescind pending'
content_update_string_lookup[ CONTENT_UPDATE_PETITION ] = 'petition'
content_update_string_lookup[ CONTENT_UPDATE_RESCIND_PETITION ] = 'rescind petition'
content_update_string_lookup[ CONTENT_UPDATE_EDIT_LOG ] = 'edit log'
content_update_string_lookup[ CONTENT_UPDATE_ARCHIVE ] = 'archive'
content_update_string_lookup[ CONTENT_UPDATE_INBOX ] = 'inbox'
content_update_string_lookup[ CONTENT_UPDATE_RATING ] = 'rating'
content_update_string_lookup[ CONTENT_UPDATE_DENY_PEND ] = 'deny pend'
content_update_string_lookup[ CONTENT_UPDATE_DENY_PETITION ] = 'deny petition'
content_update_string_lookup[ CONTENT_UPDATE_UNDELETE ] = 'undelete'
content_update_string_lookup[ CONTENT_UPDATE_SET ] = 'set'
content_update_string_lookup[ CONTENT_UPDATE_FLIP ] = 'flip on/off'
DEFINITIONS_TYPE_HASHES = 0
DEFINITIONS_TYPE_TAGS = 1
DUPLICATE_POTENTIAL = 0
DUPLICATE_FALSE_POSITIVE = 1
DUPLICATE_SAME_QUALITY = 2
DUPLICATE_ALTERNATE = 3
DUPLICATE_BETTER = 4
DUPLICATE_SMALLER_BETTER = 5
DUPLICATE_LARGER_BETTER = 6
DUPLICATE_WORSE = 7
DUPLICATE_MEMBER = 8
DUPLICATE_KING = 9
duplicate_type_string_lookup = {}
duplicate_type_string_lookup[ DUPLICATE_POTENTIAL ] = 'potential duplicates'
duplicate_type_string_lookup[ DUPLICATE_FALSE_POSITIVE ] = 'not related/false positive'
duplicate_type_string_lookup[ DUPLICATE_SAME_QUALITY ] = 'same quality'
duplicate_type_string_lookup[ DUPLICATE_ALTERNATE ] = 'alternates'
duplicate_type_string_lookup[ DUPLICATE_BETTER ] = 'this is better'
duplicate_type_string_lookup[ DUPLICATE_SMALLER_BETTER ] = 'smaller hash_id is better'
duplicate_type_string_lookup[ DUPLICATE_LARGER_BETTER ] = 'larger hash_id is better'
duplicate_type_string_lookup[ DUPLICATE_WORSE ] = 'this is worse'
duplicate_type_string_lookup[ DUPLICATE_MEMBER ] = 'duplicates'
duplicate_type_string_lookup[ DUPLICATE_KING ] = 'the best quality duplicate'
ENCODING_RAW = 0
ENCODING_HEX = 1
ENCODING_BASE64 = 2
encoding_string_lookup = {}
encoding_string_lookup[ ENCODING_RAW ] = 'raw bytes'
encoding_string_lookup[ ENCODING_HEX ] = 'hexadecimal'
encoding_string_lookup[ ENCODING_BASE64 ] = 'base64'
IMPORT_FOLDER_TYPE_DELETE = 0
IMPORT_FOLDER_TYPE_SYNCHRONISE = 1
EXPORT_FOLDER_TYPE_REGULAR = 0
EXPORT_FOLDER_TYPE_SYNCHRONISE = 1
HAMMING_EXACT_MATCH = 0
HAMMING_VERY_SIMILAR = 2
HAMMING_SIMILAR = 4
HAMMING_SPECULATIVE = 8
hamming_string_lookup = {}
hamming_string_lookup[ HAMMING_EXACT_MATCH ] = 'exact match'
hamming_string_lookup[ HAMMING_VERY_SIMILAR ] = 'very similar'
hamming_string_lookup[ HAMMING_SIMILAR ] = 'similar'
hamming_string_lookup[ HAMMING_SPECULATIVE ] = 'speculative'
HYDRUS_CLIENT = 0
HYDRUS_SERVER = 1
HYDRUS_TEST = 2
MAINTENANCE_IDLE = 0
MAINTENANCE_SHUTDOWN = 1
MAINTENANCE_FORCED = 2
GET_DATA = 0
POST_DATA = 1
POST_PETITIONS = 2
RESOLVE_PETITIONS = 3
MANAGE_USERS = 4
GENERAL_ADMIN = 5
EDIT_SERVICES = 6
UNKNOWN_PERMISSION = 7
CREATABLE_PERMISSIONS = [ GET_DATA, POST_DATA, POST_PETITIONS, RESOLVE_PETITIONS, MANAGE_USERS, GENERAL_ADMIN ]
ADMIN_PERMISSIONS = [ RESOLVE_PETITIONS, MANAGE_USERS, GENERAL_ADMIN, EDIT_SERVICES ]
permissions_string_lookup = {}
permissions_string_lookup[ GET_DATA ] = 'get data'
permissions_string_lookup[ POST_DATA ] = 'post data'
permissions_string_lookup[ POST_PETITIONS ] = 'post petitions'
permissions_string_lookup[ RESOLVE_PETITIONS ] = 'resolve petitions'
permissions_string_lookup[ MANAGE_USERS ] = 'manage users'
permissions_string_lookup[ GENERAL_ADMIN ] = 'general administration'
permissions_string_lookup[ EDIT_SERVICES ] = 'edit services'
permissions_string_lookup[ UNKNOWN_PERMISSION ] = 'unknown'
# new permissions
PERMISSION_ACTION_PETITION = 0
PERMISSION_ACTION_CREATE = 1
PERMISSION_ACTION_OVERRULE = 2
permission_pair_string_lookup = {}
permission_pair_string_lookup[ ( CONTENT_TYPE_ACCOUNTS, None ) ] = 'cannot change accounts'
permission_pair_string_lookup[ ( CONTENT_TYPE_ACCOUNTS, PERMISSION_ACTION_CREATE ) ] = 'can create accounts'
permission_pair_string_lookup[ ( CONTENT_TYPE_ACCOUNTS, PERMISSION_ACTION_OVERRULE ) ] = 'can manage accounts completely'
permission_pair_string_lookup[ ( CONTENT_TYPE_ACCOUNT_TYPES, None ) ] = 'cannot change account types'
permission_pair_string_lookup[ ( CONTENT_TYPE_ACCOUNT_TYPES, PERMISSION_ACTION_OVERRULE ) ] = 'can manage account types completely'
permission_pair_string_lookup[ ( CONTENT_TYPE_SERVICES, None ) ] = 'cannot change services'
permission_pair_string_lookup[ ( CONTENT_TYPE_SERVICES, PERMISSION_ACTION_OVERRULE ) ] = 'can manage services completely'
permission_pair_string_lookup[ ( CONTENT_TYPE_FILES, None ) ] = 'can only download files'
permission_pair_string_lookup[ ( CONTENT_TYPE_FILES, PERMISSION_ACTION_PETITION ) ] = 'can petition to remove existing files'
permission_pair_string_lookup[ ( CONTENT_TYPE_FILES, PERMISSION_ACTION_CREATE ) ] = 'can upload new files and petition existing ones'
permission_pair_string_lookup[ ( CONTENT_TYPE_FILES, PERMISSION_ACTION_OVERRULE ) ] = 'can upload and delete files and process petitions'
permission_pair_string_lookup[ ( CONTENT_TYPE_MAPPINGS, None ) ] = 'can only download mappings'
permission_pair_string_lookup[ ( CONTENT_TYPE_MAPPINGS, PERMISSION_ACTION_PETITION ) ] = 'can petition to remove existing mappings'
permission_pair_string_lookup[ ( CONTENT_TYPE_MAPPINGS, PERMISSION_ACTION_CREATE ) ] = 'can upload new mappings and petition existing ones'
permission_pair_string_lookup[ ( CONTENT_TYPE_MAPPINGS, PERMISSION_ACTION_OVERRULE ) ] = 'can upload and delete mappings and process petitions'
permission_pair_string_lookup[ ( CONTENT_TYPE_TAG_PARENTS, None ) ] = 'can only download tag parents'
permission_pair_string_lookup[ ( CONTENT_TYPE_TAG_PARENTS, PERMISSION_ACTION_PETITION ) ] = 'can petition to add or remove tag parents'
permission_pair_string_lookup[ ( CONTENT_TYPE_TAG_PARENTS, PERMISSION_ACTION_OVERRULE ) ] = 'can upload and delete tag parents and process petitions'
permission_pair_string_lookup[ ( CONTENT_TYPE_TAG_SIBLINGS, None ) ] = 'can only download tag siblings'
permission_pair_string_lookup[ ( CONTENT_TYPE_TAG_SIBLINGS, PERMISSION_ACTION_PETITION ) ] = 'can petition to add or remove tag siblings'
permission_pair_string_lookup[ ( CONTENT_TYPE_TAG_SIBLINGS, PERMISSION_ACTION_OVERRULE ) ] = 'can upload and delete tag siblings and process petitions'
TAG_REPOSITORY = 0
FILE_REPOSITORY = 1
LOCAL_FILE_DOMAIN = 2
MESSAGE_DEPOT = 3
LOCAL_TAG = 5
LOCAL_RATING_NUMERICAL = 6
LOCAL_RATING_LIKE = 7
RATING_NUMERICAL_REPOSITORY = 8
RATING_LIKE_REPOSITORY = 9
COMBINED_TAG = 10
COMBINED_FILE = 11
LOCAL_BOORU = 12
IPFS = 13
LOCAL_FILE_TRASH_DOMAIN = 14
COMBINED_LOCAL_FILE = 15
TEST_SERVICE = 16
LOCAL_NOTES = 17
CLIENT_API_SERVICE = 18
SERVER_ADMIN = 99
NULL_SERVICE = 100
service_string_lookup = {}
service_string_lookup[ TAG_REPOSITORY ] = 'hydrus tag repository'
service_string_lookup[ FILE_REPOSITORY ] = 'hydrus file repository'
service_string_lookup[ LOCAL_FILE_DOMAIN ] = 'local file domain'
service_string_lookup[ LOCAL_FILE_TRASH_DOMAIN ] = 'local trash file domain'
service_string_lookup[ COMBINED_LOCAL_FILE ] = 'virtual combined local file service'
service_string_lookup[ MESSAGE_DEPOT ] = 'hydrus message depot'
service_string_lookup[ LOCAL_TAG ] = 'local tag service'
service_string_lookup[ LOCAL_RATING_NUMERICAL ] = 'local numerical rating service'
service_string_lookup[ LOCAL_RATING_LIKE ] = 'local like/dislike rating service'
service_string_lookup[ RATING_NUMERICAL_REPOSITORY ] = 'hydrus numerical rating repository'
service_string_lookup[ RATING_LIKE_REPOSITORY ] = 'hydrus like/dislike rating repository'
service_string_lookup[ COMBINED_TAG ] = 'virtual combined tag service'
service_string_lookup[ COMBINED_FILE ] = 'virtual combined file service'
service_string_lookup[ LOCAL_BOORU ] = 'client local booru'
service_string_lookup[ CLIENT_API_SERVICE ] = 'client api'
service_string_lookup[ IPFS ] = 'ipfs daemon'
service_string_lookup[ TEST_SERVICE ] = 'test service'
service_string_lookup[ LOCAL_NOTES ] = 'local file notes service'
service_string_lookup[ SERVER_ADMIN ] = 'hydrus server administration service'
service_string_lookup[ NULL_SERVICE ] = 'null service'
LOCAL_FILE_SERVICES = ( LOCAL_FILE_DOMAIN, LOCAL_FILE_TRASH_DOMAIN, COMBINED_LOCAL_FILE )
LOCAL_TAG_SERVICES = ( LOCAL_TAG, )
LOCAL_SERVICES = LOCAL_FILE_SERVICES + LOCAL_TAG_SERVICES + ( LOCAL_RATING_LIKE, LOCAL_RATING_NUMERICAL, LOCAL_BOORU, LOCAL_NOTES, CLIENT_API_SERVICE )
RATINGS_SERVICES = ( LOCAL_RATING_LIKE, LOCAL_RATING_NUMERICAL, RATING_LIKE_REPOSITORY, RATING_NUMERICAL_REPOSITORY )
REPOSITORIES = ( TAG_REPOSITORY, FILE_REPOSITORY, RATING_LIKE_REPOSITORY, RATING_NUMERICAL_REPOSITORY )
RESTRICTED_SERVICES = REPOSITORIES + ( SERVER_ADMIN, MESSAGE_DEPOT )
REMOTE_SERVICES = RESTRICTED_SERVICES + ( IPFS, )
FILE_SERVICES = LOCAL_FILE_SERVICES + ( FILE_REPOSITORY, IPFS )
TAG_SERVICES = ( LOCAL_TAG, TAG_REPOSITORY )
ADDREMOVABLE_SERVICES = ( LOCAL_RATING_LIKE, LOCAL_RATING_NUMERICAL, FILE_REPOSITORY, TAG_REPOSITORY, SERVER_ADMIN, IPFS )
NONEDITABLE_SERVICES = ( LOCAL_FILE_DOMAIN, LOCAL_FILE_TRASH_DOMAIN, LOCAL_TAG, COMBINED_FILE, COMBINED_TAG, COMBINED_LOCAL_FILE )
AUTOCOMPLETE_CACHE_SPECIFIC_FILE_SERVICES = ( LOCAL_FILE_DOMAIN, LOCAL_FILE_TRASH_DOMAIN, COMBINED_LOCAL_FILE, FILE_REPOSITORY )
ALL_SERVICES = REMOTE_SERVICES + LOCAL_SERVICES + ( COMBINED_FILE, COMBINED_TAG )
SERVICES_WITH_THUMBNAILS = [ FILE_REPOSITORY, LOCAL_FILE_DOMAIN ]
DELETE_FILES_PETITION = 0
DELETE_TAG_PETITION = 1
BAN = 0
SUPERBAN = 1
CHANGE_ACCOUNT_TYPE = 2
ADD_TO_EXPIRES = 3
SET_EXPIRES = 4
SCORE_PETITION = 0
SERVICE_INFO_NUM_FILES = 0
SERVICE_INFO_NUM_INBOX = 1
SERVICE_INFO_NUM_LOCAL = 2
SERVICE_INFO_NUM_MAPPINGS = 3
SERVICE_INFO_NUM_DELETED_MAPPINGS = 4
SERVICE_INFO_NUM_DELETED_FILES = 5
SERVICE_INFO_NUM_THUMBNAILS = 6
SERVICE_INFO_NUM_THUMBNAILS_LOCAL = 7
SERVICE_INFO_TOTAL_SIZE = 8
SERVICE_INFO_NUM_NAMESPACES = 9
SERVICE_INFO_NUM_TAGS = 10
SERVICE_INFO_NUM_PENDING = 11
SERVICE_INFO_NUM_CONVERSATIONS = 12
SERVICE_INFO_NUM_UNREAD = 13
SERVICE_INFO_NUM_DRAFTS = 14
SERVICE_INFO_NUM_PENDING_MAPPINGS = 15
SERVICE_INFO_NUM_PETITIONED_MAPPINGS = 16
SERVICE_INFO_NUM_PENDING_FILES = 15
SERVICE_INFO_NUM_PETITIONED_FILES = 16
SERVICE_INFO_NUM_PENDING_TAG_SIBLINGS = 17
SERVICE_INFO_NUM_PETITIONED_TAG_SIBLINGS = 18
SERVICE_INFO_NUM_PENDING_TAG_PARENTS = 19
SERVICE_INFO_NUM_PETITIONED_TAG_PARENTS = 20
SERVICE_INFO_NUM_SHARES = 21
SERVICE_INFO_NUM_VIEWABLE_FILES = 22
SERVICE_UPDATE_DELETE_PENDING = 0
SERVICE_UPDATE_RESET = 1
ADD = 0
DELETE = 1
EDIT = 2
SET = 3
APPROVE = 0
DENY = 1
GET = 0
POST = 1
OPTIONS = 2
query_type_string_lookup = {}
query_type_string_lookup[ GET ] = 'GET'
query_type_string_lookup[ POST ] = 'POST'
query_type_string_lookup[ OPTIONS ] = 'OPTIONS'
APPLICATION_HYDRUS_CLIENT_COLLECTION = 0
IMAGE_JPEG = 1
IMAGE_PNG = 2
IMAGE_GIF = 3
IMAGE_BMP = 4
APPLICATION_FLASH = 5
APPLICATION_YAML = 6
IMAGE_ICON = 7
TEXT_HTML = 8
VIDEO_FLV = 9
APPLICATION_PDF = 10
APPLICATION_ZIP = 11
APPLICATION_HYDRUS_ENCRYPTED_ZIP = 12
AUDIO_MP3 = 13
VIDEO_MP4 = 14
AUDIO_OGG = 15
AUDIO_FLAC = 16
AUDIO_WMA = 17
VIDEO_WMV = 18
UNDETERMINED_WM = 19
VIDEO_MKV = 20
VIDEO_WEBM = 21
APPLICATION_JSON = 22
IMAGE_APNG = 23
UNDETERMINED_PNG = 24
VIDEO_MPEG = 25
VIDEO_MOV = 26
VIDEO_AVI = 27
APPLICATION_HYDRUS_UPDATE_DEFINITIONS = 28
APPLICATION_HYDRUS_UPDATE_CONTENT = 29
TEXT_PLAIN = 30
APPLICATION_RAR = 31
APPLICATION_7Z = 32
IMAGE_WEBP = 33
IMAGE_TIFF = 34
APPLICATION_PSD = 35
APPLICATION_OCTET_STREAM = 100
APPLICATION_UNKNOWN = 101
ALLOWED_MIMES = ( IMAGE_JPEG, IMAGE_PNG, IMAGE_APNG, IMAGE_GIF, IMAGE_BMP, IMAGE_WEBP, IMAGE_TIFF, IMAGE_ICON, APPLICATION_FLASH, VIDEO_AVI, VIDEO_FLV, VIDEO_MOV, VIDEO_MP4, VIDEO_MKV, VIDEO_WEBM, VIDEO_MPEG, APPLICATION_PSD, APPLICATION_PDF, APPLICATION_ZIP, APPLICATION_RAR, APPLICATION_7Z, AUDIO_MP3, AUDIO_OGG, AUDIO_FLAC, AUDIO_WMA, VIDEO_WMV, APPLICATION_HYDRUS_UPDATE_CONTENT, APPLICATION_HYDRUS_UPDATE_DEFINITIONS )
SEARCHABLE_MIMES = ( IMAGE_JPEG, IMAGE_PNG, IMAGE_APNG, IMAGE_GIF, IMAGE_WEBP, IMAGE_TIFF, IMAGE_ICON, APPLICATION_FLASH, VIDEO_AVI, VIDEO_FLV, VIDEO_MOV, VIDEO_MP4, VIDEO_MKV, VIDEO_WEBM, VIDEO_MPEG, APPLICATION_PSD, APPLICATION_PDF, APPLICATION_ZIP, APPLICATION_RAR, APPLICATION_7Z, AUDIO_MP3, AUDIO_OGG, AUDIO_FLAC, AUDIO_WMA, VIDEO_WMV )
DECOMPRESSION_BOMB_IMAGES = ( IMAGE_JPEG, IMAGE_PNG )
IMAGES = ( IMAGE_JPEG, IMAGE_PNG, IMAGE_APNG, IMAGE_GIF, IMAGE_BMP, IMAGE_WEBP, IMAGE_TIFF, IMAGE_ICON )
AUDIO = ( AUDIO_MP3, AUDIO_OGG, AUDIO_FLAC, AUDIO_WMA )
VIDEO = ( VIDEO_AVI, VIDEO_FLV, VIDEO_MOV, VIDEO_MP4, VIDEO_WMV, VIDEO_MKV, VIDEO_WEBM, VIDEO_MPEG )
NATIVE_VIDEO = ( IMAGE_APNG, VIDEO_AVI, VIDEO_FLV, VIDEO_MOV, VIDEO_MP4, VIDEO_WMV, VIDEO_MKV, VIDEO_WEBM, VIDEO_MPEG )
APPLICATIONS = ( APPLICATION_FLASH, APPLICATION_PSD, APPLICATION_PDF, APPLICATION_ZIP, APPLICATION_RAR, APPLICATION_7Z )
NOISY_MIMES = tuple( [ APPLICATION_FLASH ] + list( AUDIO ) + list( VIDEO ) )
ARCHIVES = ( APPLICATION_ZIP, APPLICATION_HYDRUS_ENCRYPTED_ZIP, APPLICATION_RAR, APPLICATION_7Z )
MIMES_WITH_THUMBNAILS = ( APPLICATION_FLASH, IMAGE_JPEG, IMAGE_PNG, IMAGE_APNG, IMAGE_GIF, IMAGE_BMP, IMAGE_WEBP, IMAGE_TIFF, IMAGE_ICON, VIDEO_AVI, VIDEO_FLV, VIDEO_MOV, VIDEO_MP4, VIDEO_WMV, VIDEO_MKV, VIDEO_WEBM, VIDEO_MPEG )
HYDRUS_UPDATE_FILES = ( APPLICATION_HYDRUS_UPDATE_DEFINITIONS, APPLICATION_HYDRUS_UPDATE_CONTENT )
MIMES_WE_CAN_PHASH = ( IMAGE_JPEG, IMAGE_PNG, IMAGE_WEBP, IMAGE_TIFF, IMAGE_ICON )
mime_enum_lookup = {}
mime_enum_lookup[ 'collection' ] = APPLICATION_HYDRUS_CLIENT_COLLECTION
mime_enum_lookup[ 'image/jpe' ] = IMAGE_JPEG
mime_enum_lookup[ 'image/jpeg' ] = IMAGE_JPEG
mime_enum_lookup[ 'image/jpg' ] = IMAGE_JPEG
mime_enum_lookup[ 'image/x-png' ] = IMAGE_PNG
mime_enum_lookup[ 'image/png' ] = IMAGE_PNG
mime_enum_lookup[ 'image/apng' ] = IMAGE_APNG
mime_enum_lookup[ 'image/gif' ] = IMAGE_GIF
mime_enum_lookup[ 'image/bmp' ] = IMAGE_BMP
mime_enum_lookup[ 'image/webp' ] = IMAGE_WEBP
mime_enum_lookup[ 'image/tiff' ] = IMAGE_TIFF
mime_enum_lookup[ 'image/x-icon' ] = IMAGE_ICON
mime_enum_lookup[ 'image/vnd.microsoft.icon' ] = IMAGE_ICON
mime_enum_lookup[ 'image' ] = IMAGES
mime_enum_lookup[ 'application/x-shockwave-flash' ] = APPLICATION_FLASH
mime_enum_lookup[ 'application/x-photoshop' ] = APPLICATION_PSD
mime_enum_lookup[ 'image/vnd.adobe.photoshop' ] = APPLICATION_PSD
mime_enum_lookup[ 'application/octet-stream' ] = APPLICATION_OCTET_STREAM
mime_enum_lookup[ 'application/x-yaml' ] = APPLICATION_YAML
mime_enum_lookup[ 'PDF document' ] = APPLICATION_PDF
mime_enum_lookup[ 'application/pdf' ] = APPLICATION_PDF
mime_enum_lookup[ 'application/zip' ] = APPLICATION_ZIP
mime_enum_lookup[ 'application/vnd.rar' ] = APPLICATION_RAR
mime_enum_lookup[ 'application/x-7z-compressed' ] = APPLICATION_7Z
mime_enum_lookup[ 'application/json' ] = APPLICATION_JSON
mime_enum_lookup[ 'application/hydrus-encrypted-zip' ] = APPLICATION_HYDRUS_ENCRYPTED_ZIP
mime_enum_lookup[ 'application/hydrus-update-content' ] = APPLICATION_HYDRUS_UPDATE_CONTENT
mime_enum_lookup[ 'application/hydrus-update-definitions' ] = APPLICATION_HYDRUS_UPDATE_DEFINITIONS
mime_enum_lookup[ 'application' ] = APPLICATIONS
mime_enum_lookup[ 'audio/mp3' ] = AUDIO_MP3
mime_enum_lookup[ 'audio/ogg' ] = AUDIO_OGG
mime_enum_lookup[ 'audio/flac' ] = AUDIO_FLAC
mime_enum_lookup[ 'audio/x-ms-wma' ] = AUDIO_WMA
mime_enum_lookup[ 'text/html' ] = TEXT_HTML
mime_enum_lookup[ 'text/plain' ] = TEXT_PLAIN
mime_enum_lookup[ 'video/x-msvideo' ] = VIDEO_AVI
mime_enum_lookup[ 'video/x-flv' ] = VIDEO_FLV
mime_enum_lookup[ 'video/quicktime' ] = VIDEO_MOV
mime_enum_lookup[ 'video/mp4' ] = VIDEO_MP4
mime_enum_lookup[ 'video/mpeg' ] = VIDEO_MPEG
mime_enum_lookup[ 'video/x-ms-wmv' ] = VIDEO_WMV
mime_enum_lookup[ 'video/x-matroska' ] = VIDEO_MKV
mime_enum_lookup[ 'video/webm' ] = VIDEO_WEBM
mime_enum_lookup[ 'video' ] = VIDEO
mime_enum_lookup[ 'unknown mime' ] = APPLICATION_UNKNOWN
mime_string_lookup = {}
mime_string_lookup[ APPLICATION_HYDRUS_CLIENT_COLLECTION ] = 'collection'
mime_string_lookup[ IMAGE_JPEG ] = 'image/jpg'
mime_string_lookup[ IMAGE_PNG ] = 'image/png'
mime_string_lookup[ IMAGE_APNG ] = 'image/apng'
mime_string_lookup[ IMAGE_GIF ] = 'image/gif'
mime_string_lookup[ IMAGE_BMP ] = 'image/bmp'
mime_string_lookup[ IMAGE_WEBP ] = 'image/webp'
mime_string_lookup[ IMAGE_TIFF ] = 'image/tiff'
mime_string_lookup[ IMAGE_ICON ] = 'image/x-icon'
mime_string_lookup[ IMAGES ] = 'image'
mime_string_lookup[ APPLICATION_FLASH ] = 'application/x-shockwave-flash'
mime_string_lookup[ APPLICATION_OCTET_STREAM ] = 'application/octet-stream'
mime_string_lookup[ APPLICATION_YAML ] = 'application/x-yaml'
mime_string_lookup[ APPLICATION_JSON ] = 'application/json'
mime_string_lookup[ APPLICATION_PDF ] = 'application/pdf'
mime_string_lookup[ APPLICATION_PSD ] = 'application/x-photoshop'
mime_string_lookup[ APPLICATION_ZIP ] = 'application/zip'
mime_string_lookup[ APPLICATION_RAR ] = 'application/vnd.rar'
mime_string_lookup[ APPLICATION_7Z ] = 'application/x-7z-compressed'
mime_string_lookup[ APPLICATION_HYDRUS_ENCRYPTED_ZIP ] = 'application/hydrus-encrypted-zip'
mime_string_lookup[ APPLICATION_HYDRUS_UPDATE_CONTENT ] = 'application/hydrus-update-content'
mime_string_lookup[ APPLICATION_HYDRUS_UPDATE_DEFINITIONS ] = 'application/hydrus-update-definitions'
mime_string_lookup[ APPLICATIONS ] = 'application'
mime_string_lookup[ AUDIO_MP3 ] = 'audio/mp3'
mime_string_lookup[ AUDIO_OGG ] = 'audio/ogg'
mime_string_lookup[ AUDIO_FLAC ] = 'audio/flac'
mime_string_lookup[ AUDIO_WMA ] = 'audio/x-ms-wma'
mime_string_lookup[ AUDIO ] = 'audio'
mime_string_lookup[ TEXT_HTML ] = 'text/html'
mime_string_lookup[ TEXT_PLAIN ] = 'text/plain'
mime_string_lookup[ VIDEO_AVI ] = 'video/x-msvideo'
mime_string_lookup[ VIDEO_FLV ] = 'video/x-flv'
mime_string_lookup[ VIDEO_MOV ] = 'video/quicktime'
mime_string_lookup[ VIDEO_MP4 ] = 'video/mp4'
mime_string_lookup[ VIDEO_MPEG ] = 'video/mpeg'
mime_string_lookup[ VIDEO_WMV ] = 'video/x-ms-wmv'
mime_string_lookup[ VIDEO_MKV ] = 'video/x-matroska'
mime_string_lookup[ VIDEO_WEBM ] = 'video/webm'
mime_string_lookup[ VIDEO ] = 'video'
mime_string_lookup[ UNDETERMINED_WM ] = 'audio/x-ms-wma or video/x-ms-wmv'
mime_string_lookup[ APPLICATION_UNKNOWN ] = 'unknown mime'
mime_ext_lookup = {}
mime_ext_lookup[ APPLICATION_HYDRUS_CLIENT_COLLECTION ] = '.collection'
mime_ext_lookup[ IMAGE_JPEG ] = '.jpg'
mime_ext_lookup[ IMAGE_PNG ] = '.png'
mime_ext_lookup[ IMAGE_APNG ] = '.png'
mime_ext_lookup[ IMAGE_GIF ] = '.gif'
mime_ext_lookup[ IMAGE_BMP ] = '.bmp'
mime_ext_lookup[ IMAGE_WEBP ] = '.webp'
mime_ext_lookup[ IMAGE_TIFF ] = '.tiff'
mime_ext_lookup[ IMAGE_ICON ] = '.ico'
mime_ext_lookup[ APPLICATION_FLASH ] = '.swf'
mime_ext_lookup[ APPLICATION_OCTET_STREAM ] = '.bin'
mime_ext_lookup[ APPLICATION_YAML ] = '.yaml'
mime_ext_lookup[ APPLICATION_JSON ] = '.json'
mime_ext_lookup[ APPLICATION_PDF ] = '.pdf'
mime_ext_lookup[ APPLICATION_PSD ] = '.psd'
mime_ext_lookup[ APPLICATION_ZIP ] = '.zip'
mime_ext_lookup[ APPLICATION_RAR ] = '.rar'
mime_ext_lookup[ APPLICATION_7Z ] = '.7z'
mime_ext_lookup[ APPLICATION_HYDRUS_ENCRYPTED_ZIP ] = '.zip.encrypted'
mime_ext_lookup[ APPLICATION_HYDRUS_UPDATE_CONTENT ] = ''
mime_ext_lookup[ APPLICATION_HYDRUS_UPDATE_DEFINITIONS ] = ''
mime_ext_lookup[ AUDIO_MP3 ] = '.mp3'
mime_ext_lookup[ AUDIO_OGG ] = '.ogg'
mime_ext_lookup[ AUDIO_FLAC ] = '.flac'
mime_ext_lookup[ AUDIO_WMA ] = '.wma'
mime_ext_lookup[ TEXT_HTML ] = '.html'
mime_ext_lookup[ TEXT_PLAIN ] = '.txt'
mime_ext_lookup[ VIDEO_AVI ] = '.avi'
mime_ext_lookup[ VIDEO_FLV ] = '.flv'
mime_ext_lookup[ VIDEO_MOV ] = '.mov'
mime_ext_lookup[ VIDEO_MP4 ] = '.mp4'
mime_ext_lookup[ VIDEO_MPEG ] = '.mpeg'
mime_ext_lookup[ VIDEO_WMV ] = '.wmv'
mime_ext_lookup[ VIDEO_MKV ] = '.mkv'
mime_ext_lookup[ VIDEO_WEBM ] = '.webm'
mime_ext_lookup[ APPLICATION_UNKNOWN ] = ''
#mime_ext_lookup[ 'application/x-rar-compressed' ] = '.rar'
ALLOWED_MIME_EXTENSIONS = [ mime_ext_lookup[ mime ] for mime in ALLOWED_MIMES ]
PREDICATE_TYPE_TAG = 0
PREDICATE_TYPE_NAMESPACE = 1
PREDICATE_TYPE_PARENT = 2
PREDICATE_TYPE_WILDCARD = 3
PREDICATE_TYPE_SYSTEM_EVERYTHING = 4
PREDICATE_TYPE_SYSTEM_INBOX = 5
PREDICATE_TYPE_SYSTEM_ARCHIVE = 6
PREDICATE_TYPE_SYSTEM_UNTAGGED = 7
PREDICATE_TYPE_SYSTEM_NUM_TAGS = 8
PREDICATE_TYPE_SYSTEM_LIMIT = 9
PREDICATE_TYPE_SYSTEM_SIZE = 10
PREDICATE_TYPE_SYSTEM_AGE = 11
PREDICATE_TYPE_SYSTEM_HASH = 12
PREDICATE_TYPE_SYSTEM_WIDTH = 13
PREDICATE_TYPE_SYSTEM_HEIGHT = 14
PREDICATE_TYPE_SYSTEM_RATIO = 15
PREDICATE_TYPE_SYSTEM_DURATION = 16
PREDICATE_TYPE_SYSTEM_MIME = 17
PREDICATE_TYPE_SYSTEM_RATING = 18
PREDICATE_TYPE_SYSTEM_SIMILAR_TO = 19
PREDICATE_TYPE_SYSTEM_LOCAL = 20
PREDICATE_TYPE_SYSTEM_NOT_LOCAL = 21
PREDICATE_TYPE_SYSTEM_NUM_WORDS = 22
PREDICATE_TYPE_SYSTEM_FILE_SERVICE = 23
PREDICATE_TYPE_SYSTEM_NUM_PIXELS = 24
PREDICATE_TYPE_SYSTEM_DIMENSIONS = 25
PREDICATE_TYPE_SYSTEM_DUPLICATE_RELATIONSHIP_COUNT = 26
PREDICATE_TYPE_SYSTEM_TAG_AS_NUMBER = 27
PREDICATE_TYPE_SYSTEM_KNOWN_URLS = 28
PREDICATE_TYPE_SYSTEM_FILE_VIEWING_STATS = 29
PREDICATE_TYPE_OR_CONTAINER = 30
SYSTEM_PREDICATES = [ PREDICATE_TYPE_SYSTEM_EVERYTHING, PREDICATE_TYPE_SYSTEM_INBOX, PREDICATE_TYPE_SYSTEM_ARCHIVE, PREDICATE_TYPE_SYSTEM_UNTAGGED, PREDICATE_TYPE_SYSTEM_NUM_TAGS, PREDICATE_TYPE_SYSTEM_LIMIT, PREDICATE_TYPE_SYSTEM_SIZE, PREDICATE_TYPE_SYSTEM_AGE, PREDICATE_TYPE_SYSTEM_HASH, PREDICATE_TYPE_SYSTEM_WIDTH, PREDICATE_TYPE_SYSTEM_HEIGHT, PREDICATE_TYPE_SYSTEM_RATIO, PREDICATE_TYPE_SYSTEM_DURATION, PREDICATE_TYPE_SYSTEM_MIME, PREDICATE_TYPE_SYSTEM_RATING, PREDICATE_TYPE_SYSTEM_SIMILAR_TO, PREDICATE_TYPE_SYSTEM_LOCAL, PREDICATE_TYPE_SYSTEM_NOT_LOCAL, PREDICATE_TYPE_SYSTEM_NUM_WORDS, PREDICATE_TYPE_SYSTEM_FILE_SERVICE, PREDICATE_TYPE_SYSTEM_NUM_PIXELS, PREDICATE_TYPE_SYSTEM_DIMENSIONS, PREDICATE_TYPE_SYSTEM_TAG_AS_NUMBER, PREDICATE_TYPE_SYSTEM_DUPLICATE_RELATIONSHIP_COUNT, PREDICATE_TYPE_SYSTEM_KNOWN_URLS, PREDICATE_TYPE_SYSTEM_FILE_VIEWING_STATS ]
SITE_TYPE_DEVIANT_ART = 0
SITE_TYPE_GIPHY = 1
SITE_TYPE_PIXIV = 2
SITE_TYPE_BOORU = 3
SITE_TYPE_TUMBLR = 4
SITE_TYPE_HENTAI_FOUNDRY = 5
SITE_TYPE_NEWGROUNDS = 6
SITE_TYPE_NEWGROUNDS_MOVIES = 7
SITE_TYPE_NEWGROUNDS_GAMES = 8
SITE_TYPE_HENTAI_FOUNDRY_ARTIST = 9
SITE_TYPE_HENTAI_FOUNDRY_ARTIST_PICTURES = 10
SITE_TYPE_HENTAI_FOUNDRY_ARTIST_SCRAPS = 11
SITE_TYPE_HENTAI_FOUNDRY_TAGS = 12
SITE_TYPE_PIXIV_ARTIST_ID = 13
SITE_TYPE_PIXIV_TAG = 14
SITE_TYPE_DEFAULT = 15
SITE_TYPE_WATCHER = 16
site_type_string_lookup = {}
site_type_string_lookup[ SITE_TYPE_DEFAULT ] = 'default'
site_type_string_lookup[ SITE_TYPE_BOORU ] = 'booru'
site_type_string_lookup[ SITE_TYPE_DEVIANT_ART ] = 'deviant art'
site_type_string_lookup[ SITE_TYPE_GIPHY ] = 'giphy'
site_type_string_lookup[ SITE_TYPE_HENTAI_FOUNDRY ] = 'hentai foundry'
site_type_string_lookup[ SITE_TYPE_HENTAI_FOUNDRY_ARTIST ] = 'hentai foundry artist'
site_type_string_lookup[ SITE_TYPE_HENTAI_FOUNDRY_ARTIST_PICTURES ] = 'hentai foundry artist pictures'
site_type_string_lookup[ SITE_TYPE_HENTAI_FOUNDRY_ARTIST_SCRAPS ] = 'hentai foundry artist scraps'
site_type_string_lookup[ SITE_TYPE_HENTAI_FOUNDRY_TAGS ] = 'hentai foundry tags'
site_type_string_lookup[ SITE_TYPE_NEWGROUNDS ] = 'newgrounds'
site_type_string_lookup[ SITE_TYPE_NEWGROUNDS_GAMES ] = 'newgrounds games'
site_type_string_lookup[ SITE_TYPE_NEWGROUNDS_MOVIES ] = 'newgrounds movies'
site_type_string_lookup[ SITE_TYPE_PIXIV ] = 'pixiv'
site_type_string_lookup[ SITE_TYPE_PIXIV_ARTIST_ID ] = 'pixiv artist id'
site_type_string_lookup[ SITE_TYPE_PIXIV_TAG ] = 'pixiv tag'
site_type_string_lookup[ SITE_TYPE_TUMBLR ] = 'tumblr'
site_type_string_lookup[ SITE_TYPE_WATCHER ] = 'watcher'
TIMESTAMP_TYPE_SOURCE = 0
TIMEZONE_GMT = 0
TIMEZONE_LOCAL = 1
TIMEZONE_OFFSET = 2
URL_TYPE_POST = 0
URL_TYPE_API = 1
URL_TYPE_FILE = 2
URL_TYPE_GALLERY = 3
URL_TYPE_WATCHABLE = 4
URL_TYPE_UNKNOWN = 5
URL_TYPE_NEXT = 6
URL_TYPE_DESIRED = 7
URL_TYPE_SOURCE = 8
url_type_string_lookup = {}
url_type_string_lookup[ URL_TYPE_POST ] = 'post url'
url_type_string_lookup[ URL_TYPE_API ] = 'api url'
url_type_string_lookup[ URL_TYPE_FILE ] = 'file url'
url_type_string_lookup[ URL_TYPE_GALLERY ] = 'gallery url'
url_type_string_lookup[ URL_TYPE_WATCHABLE ] = 'watchable url'
url_type_string_lookup[ URL_TYPE_UNKNOWN ] = 'unknown url'
url_type_string_lookup[ URL_TYPE_NEXT ] = 'next page url'
url_type_string_lookup[ URL_TYPE_DESIRED ] = 'downloadable/pursuable url'
url_type_string_lookup[ URL_TYPE_SOURCE ] = 'associable/source url'
# default options
DEFAULT_SERVER_ADMIN_PORT = 45870
DEFAULT_SERVICE_PORT = 45871
SERVER_ADMIN_KEY = b'server admin'
def construct_python_tuple( self, node ): return tuple( self.construct_sequence( node ) )
def represent_python_tuple( self, data ): return self.represent_sequence( 'tag:yaml.org,2002:python/tuple', data )
yaml.SafeLoader.add_constructor( 'tag:yaml.org,2002:python/tuple', construct_python_tuple )
yaml.SafeDumper.add_representer( tuple, represent_python_tuple )
with open('config.yaml') as f:
config = yaml.safe_load(f.read())
MYSQL_HOST = config['mysql_db']['host']
MYSQL_USER = config['mysql_db']['user']
MYSQL_PASSWORD = config['mysql_db']['password']
MYSQL_DB = config['mysql_db']['database']
JSON_PATH = config['json_path']
| 40.953642
| 868
| 0.810511
|
9009b876e8ef7329095c6051afbc5a7fafd8565b
| 5,598
|
py
|
Python
|
Lib/wsgiref/util.py
|
jasonadu/Python-2.5
|
93e24b88564de120b1296165b5c55975fdcb8a3c
|
[
"PSF-2.0"
] | 69
|
2015-01-16T13:12:55.000Z
|
2022-02-14T12:55:27.000Z
|
Lib/wsgiref/util.py
|
jasonadu/Python-2.5
|
93e24b88564de120b1296165b5c55975fdcb8a3c
|
[
"PSF-2.0"
] | 3
|
2019-07-19T18:02:02.000Z
|
2021-04-25T06:35:42.000Z
|
Lib/wsgiref/util.py
|
jasonadu/Python-2.5
|
93e24b88564de120b1296165b5c55975fdcb8a3c
|
[
"PSF-2.0"
] | 32
|
2015-02-06T12:10:32.000Z
|
2019-06-18T03:21:36.000Z
|
"""Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/')
return url
def request_uri(environ, include_query=1):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib import quote
path_info = quote(environ.get('PATH_INFO',''))
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p<>'.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from StringIO import StringIO
environ.setdefault('wsgi.input', StringIO(""))
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.has_key
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
#
| 27.174757
| 79
| 0.650947
|
6379780f573369cb05cfcd73318f05a55e3432c6
| 9,658
|
py
|
Python
|
venvlink/__init__.py
|
np-8/venvlink
|
099ec9845a51697fb15b40f483cd69d030efd518
|
[
"MIT"
] | 4
|
2021-01-31T12:55:07.000Z
|
2021-12-20T20:23:02.000Z
|
venvlink/__init__.py
|
np-8/venvlink
|
099ec9845a51697fb15b40f483cd69d030efd518
|
[
"MIT"
] | 9
|
2020-12-28T13:46:17.000Z
|
2022-01-05T17:10:44.000Z
|
venvlink/__init__.py
|
np-8/venvlink
|
099ec9845a51697fb15b40f483cd69d030efd518
|
[
"MIT"
] | 1
|
2020-12-24T00:40:14.000Z
|
2020-12-24T00:40:14.000Z
|
import logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s",
datefmt="%m-%d %H:%M",
)
from functools import partial
import os
from pathlib import Path
import subprocess
import sys
import shutil
from venvlink.__version__ import __version__
from venvlink.config import Configuration
from venvlink.exceptions import UserAborted
from venvlink.utils import is_in_accepted_values, get_input
logger = logging.getLogger(__name__)
def get_scripts_dir(venv_dir):
"""
Parameters
---------
venv_dir: pathlib.Path
The virtual environment root dir.
"""
return venv_dir / "Scripts"
def get_from_scripts_dir(venv_dir, file="activate"):
return get_scripts_dir(venv_dir) / file
def get_from_venv_dir(venv_dir, file="pyvenv.cfg"):
return venv_dir / file
def get_venvlink_header(entering=True, comment_symbol='#', shell='Powershell'):
tmp = 'entered' if entering else 'left'
text = f"{comment_symbol} This file is automatically generated with venvlink v.{__version__}\n"
text += f"{comment_symbol} The contents of this file are automatically executed\n"
text += f"{comment_symbol} when the project folder is {tmp} with {shell}.\n"
return text
def get_venvlink_env_text():
text = get_venvlink_header(entering=True, comment_symbol='#')
text += '\n' + r'& "$workdir\Scripts\Activate.ps1"'
return text
def get_venvlink_leave_text():
text = get_venvlink_header(entering=False, comment_symbol='#')
text += "\ntry { & deactivate }\n"
text += "catch { }"
return text
def get_activate_ps_text(venvdir_src, real_activate_ps1):
return f"""
Write-Host "[venvlink]" -NoNewline -ForegroundColor Cyan
Write-Host " activate " -NoNewline -ForegroundColor White
Write-Host "{venvdir_src}" -NoNewline -ForegroundColor White
& '{real_activate_ps1}'"""
class VenvLink:
def __init__(self, config_file=None):
config_file = Path(config_file) if config_file is not None else None
self.config = Configuration(config_file)
def delete_env(self, project_name):
"""
Removes virtual environment created with create_env.
Parameters
----------
project_name: str
The project name for which the virtual enviroment
was created. This should be the virtual environment
name in the folder where all the virtual environments
are located.
"""
folder = self.venv_folder / project_name
if folder.exists():
logger.info("Removing %s", folder)
shutil.rmtree(folder)
else:
logger.info("Could not remove %s! No such folder.", folder)
def create_venv(self, project_name, workdir, system_site_packages=False):
"""
Parameters
----------
project_name: str
The project name for which the virtual enviroment
is created. This will be the virtual environment
name in the folder where all the virtual environments
are located.
workdir: pathlib.Path
The working directory; the directory into
which the "linked virtual environment" is created
system_site_packages: bool
The --system_site_packages of "python -m venv"
"""
try:
self._check_no_venv_in_workdir(workdir, project_name)
except UserAborted:
print("Canceled.")
return
try:
# Create the virtual environment in the "centralized location"
# Note: project_name may change
project_name = self._create_venv_to_venv_folder(
project_name, system_site_packages=system_site_packages)
except UserAborted:
print("Canceled.")
return
# Create the "proxied" virtual environment in the workdir
self._create_venv_to_workdir(workdir, project_name)
def _create_venv_to_venv_folder(self,
project_name,
system_site_packages=False):
# Create the folder for all the virtual environments
self.venv_folder.mkdir(exist_ok=True, parents=True)
ret = self._check_that_venv_does_not_exist(project_name)
if ret == "skipcreate":
return project_name
if ret == "newname":
project_name = self._get_new_venv_name()
subprocess_cmd = [sys.executable, "-m", "venv", project_name]
if system_site_packages:
subprocess_cmd.append("--system-site-packages")
print('Running: "{}" with cwd="{}"'.format(" ".join(subprocess_cmd),
self.venv_folder))
subprocess.run(subprocess_cmd, cwd=self.venv_folder)
return project_name
def __venv_exists(self, project_name):
return (self.venv_folder / project_name).exists()
def _check_that_venv_does_not_exist(self, project_name):
"""
Returns
ret: str
'continue' 'skipcreate' or 'newname'
"""
if not self.__venv_exists(project_name):
return "continue"
return self._when_venv_exists_already(project_name)
def _get_new_venv_name(self):
prompt = "Give a new projectname: "
errortxt = "Sorry, that projectname exists already. "
return get_input(
prompt,
validator=lambda x: not self.__venv_exists(x),
on_validationerror=errortxt,
)
def _when_venv_exists_already(self, project_name):
"""
When venv already exists with the project_name, ask
user opinion.
Returns
-------
ret: str
'skipcreate' or 'newname'
Raises
------
UserAborted, if user aborted.
"""
text = f'The virtual environment for a projectname "{project_name}" exists already. '
text += f'If you use the name "{project_name}", you will SHARE the virtual environment '
text += "with the other project(s) using the same name." + os.linesep
text += "Continue?" + os.linesep
print(text)
prompt = "[Y] Yes [N] No, give new name. [A] Abort: "
validator = partial(is_in_accepted_values,
accepted_values={"Y", "N", "A"})
value = get_input(prompt, validator).upper()
if value == "Y":
return "skipcreate"
elif value == "N":
return "newname"
elif value == "A":
raise UserAborted()
def _check_no_venv_in_workdir(self,
workdir,
project_name,
venvname="venv"):
venvdir_dst = workdir / venvname
if venvdir_dst.exists():
print(f'The activate proxy inside "{venvdir_dst}" exists already!')
print(
"Do you want to remove it? (This will remove just the link, not the virtual environment it is linked to)"
)
prompt = "[Y] Yes [A] Abort: "
validator = partial(is_in_accepted_values,
accepted_values={"Y", "A"})
value = get_input(prompt, validator).upper()
if value == "A":
raise UserAborted()
elif value == "Y":
print("Removing ", str(venvdir_dst))
shutil.rmtree(venvdir_dst)
else: # should not ever happen.
ValueError()
return True
def _create_venv_to_workdir(self, workdir, project_name, venvname="venv"):
# Create the working directory if it does not
# exists (should probably always exist, but anyway.)
workdir.mkdir(exist_ok=True, parents=True)
venvdir_dst = workdir / venvname
venvdir_dst.mkdir()
# Make proxies to needed files
venvdir_src = self.venv_folder / project_name
real_activate_ps1 = venvdir_src / "Scripts" / "Activate.ps1"
real_activate_bat = venvdir_src / "Scripts" / "activate.bat"
files_and_contents = (
(
"Scripts" + os.path.sep + "Activate.ps1",
get_activate_ps_text(venvdir_src, real_activate_ps1),
),
(
"Scripts" + os.path.sep + "activate.bat",
("@echo off\n"
f"""echo venvlink: Activating virtual env in \"{venvdir_src}\"\n"""
f"""call \"{real_activate_bat}\""""),
), # Dummy file which makes possible to run simply "venv\Scripts\activate"
("Scripts" + os.path.sep + "activate", ""),
)
for file, content in files_and_contents:
file_dst = venvdir_dst / file
file_dst.parent.mkdir(exist_ok=True, parents=True)
with open(file_dst, "w", encoding="utf-8") as f:
print(content, file=f)
with open(venvdir_dst / "venvlink-autoenv.ps1", "w") as f:
print(get_venvlink_env_text(), file=f, end='')
with open(venvdir_dst / "venvlink-autoenv.leave.ps1", "w") as f:
print(get_venvlink_leave_text(), file=f, end='')
@property
def venv_folder(self):
return Path(self.config.venv_folder)
| 34.616487
| 122
| 0.583144
|
8ecb03639c0310faa3d006a6db97d86ec3af7e07
| 1,693
|
py
|
Python
|
workloads/tune_grid_cifar.py
|
SymbioticLab/Fluid
|
3598be4f640514a72b4d67e18d7cfbc4a8cd7fd8
|
[
"Apache-2.0"
] | 12
|
2021-03-10T15:36:04.000Z
|
2022-01-07T16:07:24.000Z
|
workloads/tune_grid_cifar.py
|
SymbioticLab/Fluid
|
3598be4f640514a72b4d67e18d7cfbc4a8cd7fd8
|
[
"Apache-2.0"
] | 2
|
2022-01-07T16:03:36.000Z
|
2022-01-14T08:21:06.000Z
|
workloads/tune_grid_cifar.py
|
SymbioticLab/Fluid
|
3598be4f640514a72b4d67e18d7cfbc4a8cd7fd8
|
[
"Apache-2.0"
] | 3
|
2021-05-16T03:15:08.000Z
|
2021-12-10T11:50:16.000Z
|
from pathlib import Path
from ray import tune
from ray.util.sgd.utils import BATCH_SIZE
import workloads.common as com
from fluid.trainer import TorchTrainer
from workloads.common import cifar as workload
DATA_PATH, RESULTS_PATH = com.detect_paths()
EXP_NAME = com.remove_prefix(Path(__file__).stem, "tune_")
def create_grid_search_space(exp_no):
# mutations = space.create_grid_space_1()
method_name = "space.create_grid_space_" + str(exp_no)
mutations = eval(method_name)()
return {key: tune.sample_from(val) for key, val in mutations.items()}
def setup_tune_scheduler(exp_no):
search_space = create_grid_search_space(exp_no)
sync_to_driver = not RESULTS_PATH.startswith("/nfs")
return dict(
config=search_space,
resources_per_trial={"gpu": 1},
sync_to_driver=sync_to_driver,
local_dir=RESULTS_PATH,
name=EXP_NAME + str(exp_no),
)
def main():
exp_no, sd = com.init_ray()
MyTrainable = TorchTrainer.as_trainable(
data_creator=workload.data_creator,
model_creator=workload.model_creator,
loss_creator=workload.loss_creator,
optimizer_creator=workload.optimizer_creator,
config={"seed": sd, BATCH_SIZE: 64, "extra_fluid_trial_resources": {}},
)
params = {
# **com.run_options(__file__),
# 'stop': workload.create_stopper(),
**setup_tune_scheduler(exp_no),
}
analysis = tune.run(MyTrainable, stop={"training_iteration": 81}, **params)
dfs = analysis.trial_dataframes
for logdir, df in dfs.items():
ld = Path(logdir)
df.to_csv(ld / "trail_dataframe.csv")
if __name__ == "__main__":
main()
| 28.216667
| 79
| 0.691672
|
72805f43ee059259d038e1a58381805f3b4dc571
| 2,875
|
py
|
Python
|
run_webcam.py
|
connor-john/tf-pose-estimation
|
0a1a07864a42e7cc75d15b739cd2452b5eabd8d4
|
[
"Apache-2.0"
] | 1
|
2021-02-02T03:08:44.000Z
|
2021-02-02T03:08:44.000Z
|
run_webcam.py
|
connor-john/tf-pose-estimation
|
0a1a07864a42e7cc75d15b739cd2452b5eabd8d4
|
[
"Apache-2.0"
] | null | null | null |
run_webcam.py
|
connor-john/tf-pose-estimation
|
0a1a07864a42e7cc75d15b739cd2452b5eabd8d4
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
import time
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
logger = logging.getLogger('TfPoseEstimator-WebCam')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fps_time = 0
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser.add_argument('--camera', type=str, default=0)
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
parser.add_argument('--tensorrt', type=str, default="False",
help='for tensorrt process.')
args = parser.parse_args()
logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resize)
if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h), trt_bool=str2bool(args.tensorrt))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368), trt_bool=str2bool(args.tensorrt))
logger.debug('cam read+')
cam = cv2.VideoCapture(args.camera)
ret_val, image = cam.read()
logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
while True:
ret_val, image = cam.read()
logger.debug('image process+')
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
logger.debug('postprocess+')
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
# TfPoseEstimator.logpunches()
logger.debug('show+')
cv2.putText(image,
"FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.imshow('tf-pose-estimation result', image)
fps_time = time.time()
if cv2.waitKey(1) == 27:
break
logger.debug('finished+')
cv2.destroyAllWindows()
| 38.333333
| 143
| 0.649391
|
4f324c94c410c97be07ee99a0a8f054c703c7f2b
| 740
|
py
|
Python
|
setup.py
|
rassamyjs/lambdata-20-try2
|
e334d52c6f48db31877a29c41c2879127338f9c6
|
[
"MIT"
] | null | null | null |
setup.py
|
rassamyjs/lambdata-20-try2
|
e334d52c6f48db31877a29c41c2879127338f9c6
|
[
"MIT"
] | null | null | null |
setup.py
|
rassamyjs/lambdata-20-try2
|
e334d52c6f48db31877a29c41c2879127338f9c6
|
[
"MIT"
] | null | null | null |
import setuptools
REQUIRED = ['numpy', 'pandas']
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="lambdata_rassamyjs", # Replace with your own username
version="0.0.4",
author="rassamyjs",
description="A collection of data science functions",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rassamyjs/lambdata_20",
packages=setuptools.find_packages(),
install_requires=REQUIRED,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 30.833333
| 64
| 0.655405
|
329d7611387ed90d67dc0349b145e484f2e97210
| 4,466
|
py
|
Python
|
analysis/anesthetized/deconv/deconvolve-anesthetized-ketamine.py
|
goldman-lab/oculomotor-response-timescales
|
fa9f73e6f6a256a5983248b30c6348a50f1fdfcf
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/anesthetized/deconv/deconvolve-anesthetized-ketamine.py
|
goldman-lab/oculomotor-response-timescales
|
fa9f73e6f6a256a5983248b30c6348a50f1fdfcf
|
[
"BSD-3-Clause"
] | null | null | null |
analysis/anesthetized/deconv/deconvolve-anesthetized-ketamine.py
|
goldman-lab/oculomotor-response-timescales
|
fa9f73e6f6a256a5983248b30c6348a50f1fdfcf
|
[
"BSD-3-Clause"
] | 1
|
2022-01-05T18:26:33.000Z
|
2022-01-05T18:26:33.000Z
|
import numpy as np
import sys
sys.path.append('../../../tools/')
import fitting_functions
import os
import scipy.io as sio
import scipy.optimize
import tqdm
def loadFitResult(filename):
fit_file = sio.loadmat(filename)
sses = fit_file['sses']
lls = fit_file['lls']
fits = fit_file['fits']
return fits, lls, sses
def loadBestFits(filename, n):
fit_file = sio.loadmat(filename)
lls = fit_file['lls']
fits = fit_file['fits']
best_trace_ind = np.argmax(lls[n-1,:])
best_fits = np.zeros((fits.shape[1], fits[n-1,0].shape[1]))
for i in range(fits.shape[1]):
best_fits[i,:] = fits[n-1,i][best_trace_ind,:]
return best_fits
short_duration = 15
long_duration = 90
num_ics = 50
if __name__ == "__main__":
ketamine_traces = [ '82411p', '82411r', '63011d','70911i', '70911l', '70911m', '82411p', '82411r']
if not os.path.isdir('results'):
os.makedirs('results')
if not os.path.isdir('results/Ketamine'):
os.makedirs('results/Ketamine')
if not os.path.isdir('results/Ketamine/distributed'):
os.makedirs('results/Ketamine/distributed')
for fish_num in tqdm.trange(len(ketamine_traces), desc='Fish no.'):
trange, pe_short, pe_long = fitting_functions.importDataKetamine('../../../data/anesthetized/fixed/Ketamine/'+ketamine_traces[fish_num]+'.mat')
best_fits = loadBestFits('../fit/results/Ketamine/'+ketamine_traces[fish_num]+'.mat', 4)
timeconstants = best_fits[0, 4:]
# Bridge the gap between release time and real data
trange_deconv_mid = np.arange(0, int(0.2304/(72*2e-4))+1)*72*2e-4
pe_short_deconv_mid = fitting_functions.exponentialModel(trange_deconv_mid, best_fits[0,:])
pe_long_deconv_mid = fitting_functions.exponentialModel(trange_deconv_mid, best_fits[1,:])
# Construct 10s hold
trange_short_deconv_pre = np.arange(0, short_duration, 72*2e-4)
pe_short_deconv_pre = np.ones(len(trange_short_deconv_pre))
trange_long_deconv_pre = np.arange(0, long_duration, 72*2e-4)
pe_long_deconv_pre = np.ones(len(trange_long_deconv_pre))
trange_short_deconv = np.concatenate((trange_short_deconv_pre, short_duration+trange_deconv_mid, short_duration+0.2304+trange[1:]))
trange_long_deconv = np.concatenate((trange_long_deconv_pre, long_duration+trange_deconv_mid, long_duration+0.2304+trange[1:]))
pe_short_deconv = np.concatenate((pe_short_deconv_pre, pe_short_deconv_mid, pe_short[1:]))
pe_long_deconv = np.concatenate((pe_long_deconv_pre, pe_long_deconv_mid, pe_long[1:]))
### For nonlinear fitting
if not os.path.isdir('results/Ketamine/distributed/'+ketamine_traces[fish_num]):
os.makedirs('results/Ketamine/distributed/'+ketamine_traces[fish_num])
cs_nonlin = np.zeros((num_ics, 4))
costs_nonlin = np.zeros(num_ics)
for i in tqdm.trange(start_trace_num[fish_num], num_ics, desc='IC no.'):
ics_ = np.random.rand(4)
ics_ /= np.sum(ics_)
f, p, c, cost, grad = fitting_functions.blindDeconvN_NonLin([trange_short_deconv, trange_long_deconv],\
[pe_short_deconv, pe_long_deconv],\
[len(trange_short_deconv_pre)+1, len(trange_long_deconv_pre)+1],\
72*2e-4, np.concatenate((ics_, timeconstants)), method='TNC')
sio.savemat('results/Ketamine/distributed/'+ketamine_traces[fish_num]+'/nonlinear'+str(i+1)+'.mat', {'c':c, 'cost':cost, 'grad':grad})
cs_nonlin[i,:] = c
costs_nonlin[i] = cost
### For linear fitting
f_linear, p_linear, c_linear, cost_f_linear, cost_p_linear, grad_linear = fitting_functions.blindDeconvN_Linear([trange_short_deconv, trange_long_deconv],\
[pe_short_deconv, pe_long_deconv],\
[len(trange_short_deconv_pre)+1, len(trange_long_deconv_pre)+1],\
72*2e-4, np.concatenate((cs_nonlin[np.argmin(costs_nonlin), :], timeconstants)), dense=True)
sio.savemat('results/Ketamine/distributed/'+ketamine_traces[fish_num]+'.mat', {'c':c_linear, 'cost_f':cost_f_linear, 'cost_p':cost_p_linear, 'grad':grad_linear})
| 52.541176
| 170
| 0.638603
|
9bfe888fd1b5f0a549232674c7480371613a0fd3
| 2,805
|
py
|
Python
|
barcode/upc.py
|
fxjung/python-barcode
|
926041158fea2edc83d32a4e6b332ad4eadd663c
|
[
"MIT"
] | null | null | null |
barcode/upc.py
|
fxjung/python-barcode
|
926041158fea2edc83d32a4e6b332ad4eadd663c
|
[
"MIT"
] | null | null | null |
barcode/upc.py
|
fxjung/python-barcode
|
926041158fea2edc83d32a4e6b332ad4eadd663c
|
[
"MIT"
] | null | null | null |
"""Module: barcode.upc
:Provided barcodes: UPC-A
"""
__docformat__ = "restructuredtext en"
from functools import reduce
from barcode.base import Barcode
from barcode.charsets import upc as _upc
from barcode.errors import IllegalCharacterError, NumberOfDigitsError
class UniversalProductCodeA(Barcode):
"""Initializes new UPC-A barcode.
:parameters:
upc : String
The upc number as string.
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
make_ean: boolean
"""
name = "UPC-A"
digits = 11
def __init__(self, upc, writer=None, make_ean=False):
self.ean = make_ean
upc = upc[: self.digits]
if not upc.isdigit():
raise IllegalCharacterError("UPC code can only contain numbers.")
if len(upc) != self.digits:
raise NumberOfDigitsError(
"UPC must have {0} digits, not " "{1}.".format(self.digits, len(upc))
)
self.upc = upc
self.upc = "{}{}".format(upc, self.calculate_checksum())
self.writer = writer or Barcode.default_writer()
def __unicode__(self):
if self.ean:
return "0" + self.upc
else:
return self.upc
__str__ = __unicode__
def get_fullcode(self):
if self.ean:
return "0" + self.upc
else:
return self.upc
def calculate_checksum(self):
"""Calculates the checksum for UPCA/UPC codes
:return: The checksum for 'self.upc'
:rtype: Integer
"""
def sum_(x, y):
return int(x) + int(y)
upc = self.upc[0 : self.digits]
oddsum = reduce(sum_, upc[::2])
evensum = reduce(sum_, upc[1::2])
check = (evensum + oddsum * 3) % 10
if check == 0:
return 0
else:
return 10 - check
def build(self):
"""Builds the barcode pattern from 'self.upc'
:return: The pattern as string
:rtype: String
"""
code = _upc.EDGE[:]
for _i, number in enumerate(self.upc[0:6]):
code += _upc.CODES["L"][int(number)]
code += _upc.MIDDLE
for number in self.upc[6:]:
code += _upc.CODES["R"][int(number)]
code += _upc.EDGE
return [code]
def to_ascii(self):
"""Returns an ascii representation of the barcode.
:rtype: String
"""
code = self.build()
for i, line in enumerate(code):
code[i] = line.replace("1", "|").replace("0", "_")
return "\n".join(code)
def render(self, text=None):
self.writer.set_options({"module_width": 0.33})
return Barcode.render(self, text)
UPCA = UniversalProductCodeA
| 25.044643
| 85
| 0.56328
|
d66f589759bdc1737645d7b5c8361b5940a33182
| 53,353
|
py
|
Python
|
tools/configure.py
|
brccabral/caer
|
2ddb84095202aa98224b04612eff9e97c8680309
|
[
"MIT"
] | 3
|
2021-01-23T19:53:28.000Z
|
2021-01-23T19:53:53.000Z
|
tools/configure.py
|
brccabral/caer
|
2ddb84095202aa98224b04612eff9e97c8680309
|
[
"MIT"
] | null | null | null |
tools/configure.py
|
brccabral/caer
|
2ddb84095202aa98224b04612eff9e97c8680309
|
[
"MIT"
] | 1
|
2021-01-01T10:37:55.000Z
|
2021-01-01T10:37:55.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_TENSORRT_VERSION = '6'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16, 17, 18]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
_TF_MIN_BAZEL_VERSION = '3.1.0'
_TF_MAX_BAZEL_VERSION = '3.99.0'
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
# List of files to configure when building Bazel on Apple platforms.
APPLE_BAZEL_FILES = [
'tensorflow/lite/experimental/ios/BUILD',
'tensorflow/lite/experimental/objc/BUILD',
'tensorflow/lite/experimental/swift/BUILD',
'tensorflow/lite/tools/benchmark/experimental/ios/BUILD'
]
# List of files to move when building for iOS.
IOS_FILES = [
'tensorflow/lite/experimental/objc/TensorFlowLiteObjC.podspec',
'tensorflow/lite/experimental/swift/TensorFlowLiteSwift.podspec',
]
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env {}="{}"'.format(var_name, str(var)))
def run_shell(cmd, allow_non_zero=False, stderr=None):
if stderr is None:
stderr = sys.stdout
if allow_non_zero:
try:
output = subprocess.check_output(cmd, stderr=stderr)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd, stderr=stderr)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
stderr = open(os.devnull, 'wb')
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
],
stderr=stderr).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'{}]: ').format(default_python_bin_path)
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: {} cannot be found.'.format(python_bin_path))
else:
print('{} is not executable. Is it the python binary?'.format(
python_bin_path))
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [{}]\n'.format(python_lib_paths[0]))
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
if python_major_version == '2':
write_to_bazelrc('build --host_force_python=PY2')
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"{}"'.format(python_bin_path))
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="{}"'.format(python_bin_path))
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with {} support?'.format(
query_item)
if not yes_reply:
yes_reply = '{} support will be enabled for TensorFlow.'.format(query_item)
if not no_reply:
no_reply = 'No {}'.format(yes_reply)
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: {}'.format(user_input_origin))
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version (must exist!).
max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(1)
stderr = open(os.devnull, 'wb')
curr_version = run_shell(['bazel', '--version'],
allow_non_zero=True,
stderr=stderr)
if curr_version.startswith('bazel '):
curr_version = curr_version.split('bazel ')[1]
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
resolve_symlinks=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
resolve_symlinks: (Bool) Translate symbolic links into the real filepath.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
if resolve_symlinks and os.path.islink(val):
val = os.path.realpath(val)
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' %
(android_ndk_home_path, ndk_version, _SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='21', # 21 is required for ARM64 support.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_version(environ_cp):
"""Set TF_TENSORRT_VERSION."""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
ask_tensorrt_version = (
'Please specify the TensorRT version you want to use. '
'[Leave empty to default to TensorRT %s]: ') % _DEFAULT_TENSORRT_VERSION
tf_tensorrt_version = get_from_env_or_user_or_default(
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
_DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
def set_tf_nccl_version(environ_cp):
"""Set TF_NCCL_VERSION."""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platform.')
if 'TF_NCCL_VERSION' in environ_cp:
return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
'[Leave empty to use http://github.com/nvidia/nccl]: ')
tf_nccl_version = get_from_env_or_user_or_default(environ_cp,
'TF_NCCL_VERSION',
ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated CUDA compute capabilities '
'you want to build with.\nYou can find the compute capability of your '
'device at: https://developer.nvidia.com/cuda-gpus. Each capability '
'can be specified as "x.y" or "compute_xy" to include both virtual and'
' binary GPU code, or as "sm_xy" to only include the binary '
'code.\nPlease note that each additional compute capability '
'significantly increases your build time and binary size, and that '
'TensorFlow only supports compute capabilities >= 3.5 [Default is: '
'%s]: ' % default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
# We now support sm_35,sm_50,sm_60,compute_70.
sm_compute_match = re.match('(sm|compute)_?([0-9]+[0-9]+)',
compute_capability)
if not sm_compute_match:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = int(sm_compute_match.group(2))
if ver < 30:
print(
'ERROR: TensorFlow only supports small CUDA compute'
' capabilities of sm_30 and higher. Please re-specify the list'
' of compute capabilities excluding version %s.' % ver)
all_valid = False
if ver < 35:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than sm_35. Disable XLA when running on older GPUs.')
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def system_specific_test_config(environ_cp):
"""Add default build and test flags required for TF tests to bazelrc."""
write_to_bazelrc('test --flaky_test_attempts=3')
write_to_bazelrc('test --test_size_filters=small,medium')
# Each instance of --test_tag_filters or --build_tag_filters overrides all
# previous instances, so we need to build up a complete list and write a
# single list of filters for the .bazelrc file.
# Filters to use with both --test_tag_filters and --build_tag_filters
test_and_build_filters = ['-benchmark-test', '-no_oss']
# Additional filters for --test_tag_filters beyond those in
# test_and_build_filters
test_only_filters = ['-oss_serial']
if is_windows():
test_and_build_filters.append('-no_windows')
if ((environ_cp.get('TF_NEED_CUDA', None) == '1') or
(environ_cp.get('TF_NEED_ROCM', None) == '1')):
test_and_build_filters += ['-no_windows_gpu', '-no_gpu']
else:
test_and_build_filters.append('-gpu')
elif is_macos():
test_and_build_filters += ['-gpu', '-nomac', '-no_mac']
elif is_linux():
if ((environ_cp.get('TF_NEED_CUDA', None) == '1') or
(environ_cp.get('TF_NEED_ROCM', None) == '1')):
test_and_build_filters.append('-no_gpu')
write_to_bazelrc('test --test_env=LD_LIBRARY_PATH')
else:
test_and_build_filters.append('-gpu')
# Disable tests with "v1only" tag in "v2" Bazel config, but not in "v1" config
write_to_bazelrc('test:v1 --test_tag_filters=%s' %
','.join(test_and_build_filters + test_only_filters))
write_to_bazelrc('test:v1 --build_tag_filters=%s' %
','.join(test_and_build_filters))
write_to_bazelrc(
'test:v2 --test_tag_filters=%s' %
','.join(test_and_build_filters + test_only_filters + ['-v1only']))
write_to_bazelrc('test:v2 --build_tag_filters=%s' %
','.join(test_and_build_filters + ['-v1only']))
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
for varname in ('PREFIX', 'LIBDIR', 'INCLUDEDIR', 'PROTOBUF_INCLUDE_PATH'):
if varname in environ_cp:
write_to_bazelrc('build --define=%s=%s' % (varname, environ_cp[varname]))
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# First available in VS 16.4. Speeds up Windows compile times by a lot. See
# https://groups.google.com/a/tensorflow.org/d/topic/build/SsW98Eo7l3o/discussion
# pylint: disable=line-too-long
write_to_bazelrc('build --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def configure_ios():
"""Configures TensorFlow for iOS builds.
This function will only be executed if `is_macos()` is true.
"""
if not is_macos():
return
for filepath in APPLE_BAZEL_FILES:
existing_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath + '.apple')
renamed_filepath = os.path.join(_TF_WORKSPACE_ROOT, filepath)
symlink_force(existing_filepath, renamed_filepath)
for filepath in IOS_FILES:
filename = os.path.basename(filepath)
new_filepath = os.path.join(_TF_WORKSPACE_ROOT, filename)
symlink_force(filepath, new_filepath)
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], 'third_party/gpus/find_cuda_config.py'] +
cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
try:
current_bazel_version = check_bazel_version(_TF_MIN_BAZEL_VERSION,
_TF_MAX_BAZEL_VERSION)
except subprocess.CalledProcessError as e:
print('Error checking bazel version: ', e.output.decode('UTF-8').strip())
raise e
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
else:
environ_cp['TF_CONFIGURE_IOS'] = '0'
if environ_cp.get('TF_ENABLE_XLA', '1') == '1':
write_to_bazelrc('build --config=xla')
set_action_env_var(
environ_cp, 'TF_NEED_ROCM', 'ROCm', False, bazel_config_name='rocm')
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
if (environ_cp.get('TF_NEED_ROCM') == '1' and environ_cp.get('ROCM_PATH')):
write_action_env_to_bazelrc('ROCM_PATH', environ_cp.get('ROCM_PATH'))
write_action_env_to_bazelrc('ROCM_ROOT', environ_cp.get('ROCM_PATH'))
if ((environ_cp.get('TF_NEED_ROCM') == '1') and
(environ_cp.get('TF_ENABLE_MLIR_GENERATED_GPU_KERNELS') == '1')):
write_to_bazelrc(
'build:rocm --define tensorflow_enable_mlir_generated_gpu_kernels=1')
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_action_env_var(
environ_cp,
'TF_NEED_TENSORRT',
'TensorRT',
False,
bazel_config_name='tensorrt')
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUBLAS_VERSION',
'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION',
'TF_NCCL_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
'NCCL_INSTALL_PATH',
'NCCL_HDR_PATH',
'TENSORRT_INSTALL_PATH'
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
# ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
system_specific_test_config(environ_cp)
set_action_env_var(environ_cp, 'TF_CONFIGURE_IOS', 'iOS', False)
if environ_cp.get('TF_CONFIGURE_IOS') == '1':
configure_ios()
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('mkl_aarch64', 'Build with oneDNN support for Aarch64.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('numa', 'Build with NUMA support.')
config_info_line(
'dynamic_kernels',
'(Experimental) Build kernels into separate shared objects.')
config_info_line('v2', 'Build TensorFlow 2.x instead of 1.x.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('nonccl', 'Disable NVIDIA NCCL support.')
if __name__ == '__main__':
main()
| 37.024983
| 110
| 0.678537
|
1f8ba258ae0a1c5754d0f1dcd71c148270e0b392
| 4,232
|
py
|
Python
|
gdal/swig/python/samples/val_at_coord.py
|
SarahVasher1/gdal
|
a370ec25625584856886f02fd04e540de048112f
|
[
"MIT"
] | null | null | null |
gdal/swig/python/samples/val_at_coord.py
|
SarahVasher1/gdal
|
a370ec25625584856886f02fd04e540de048112f
|
[
"MIT"
] | null | null | null |
gdal/swig/python/samples/val_at_coord.py
|
SarahVasher1/gdal
|
a370ec25625584856886f02fd04e540de048112f
|
[
"MIT"
] | 1
|
2021-04-26T14:47:38.000Z
|
2021-04-26T14:47:38.000Z
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL Python samples
# Purpose: Outputs the value of the raster bands at a given
# (longitude, latitude) or (X, Y) location.
# Author: Even Rouault
#
###############################################################################
# Copyright (c) 2010, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import gdal
from osgeo import osr
# =============================================================================
def Usage():
print('Usage: val_at_coord.py [-display_xy] [longitude latitude | -coordtype=georef X Y] filename')
print('')
print('By default, the 2 first arguments are supposed to be the location')
print('in longitude, latitude order. If -coordtype=georef is specified before')
print('the next 2 values will be interpreted as the X and Y coordinates')
print('in the dataset spatial reference system.')
sys.exit(1)
# =============================================================================
display_xy = False
coordtype_georef = False
longitude = None
latitude = None
filename = None
# =============================================================================
# Parse command line arguments.
# =============================================================================
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '-coordtype=georef':
coordtype_georef = True
elif arg == '-display_xy':
display_xy = True
elif longitude is None:
longitude = float(arg)
elif latitude is None:
latitude = float(arg)
elif filename is None:
filename = arg
else:
Usage()
i = i + 1
if longitude is None:
Usage()
if latitude is None:
Usage()
if filename is None:
filename()
# Open input dataset
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
print('Cannot open %s' % filename)
sys.exit(1)
# Build Spatial Reference object based on coordinate system, fetched from the
# opened dataset
if coordtype_georef:
X = longitude
Y = latitude
else:
srs = osr.SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
srsLatLong = srs.CloneGeogCS()
# Convert from (longitude,latitude) to projected coordinates
ct = osr.CoordinateTransformation(srsLatLong, srs)
(X, Y, height) = ct.TransformPoint(longitude, latitude)
# Read geotransform matrix and calculate corresponding pixel coordinates
geomatrix = ds.GetGeoTransform()
(success, inv_geometrix) = gdal.InvGeoTransform(geomatrix)
x = int(inv_geometrix[0] + inv_geometrix[1] * X + inv_geometrix[2] * Y)
y = int(inv_geometrix[3] + inv_geometrix[4] * X + inv_geometrix[5] * Y)
if display_xy:
print('x=%d, y=%d' % (x, y))
if x < 0 or x >= ds.RasterXSize or y < 0 or y >= ds.RasterYSize:
print('Passed coordinates are not in dataset extent')
sys.exit(1)
res = ds.ReadAsArray(x, y, 1, 1)
if len(res.shape) == 2:
print(res[0][0])
else:
for val in res:
print(val[0][0])
| 32.305344
| 103
| 0.61035
|
ec8df9b0c2464979a3176862070c6202b9198305
| 768
|
py
|
Python
|
sa/profiles/Alstec/7200/get_capabilities.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sa/profiles/Alstec/7200/get_capabilities.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sa/profiles/Alstec/7200/get_capabilities.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# Alstec.7200.get_capabilities
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_capabilities import Script as BaseScript
from noc.sa.profiles.Generic.get_capabilities import false_on_cli_error
class Script(BaseScript):
name = "Alstec.7200.get_capabilities"
@false_on_cli_error
def has_lldp_cli(self):
"""
Check box has lldp enabled
"""
r = self.cli("show lldp")
return "LLDP transmit/receive disabled on all interfaces." not in r
| 33.391304
| 75
| 0.514323
|
fbd5d1e2e6d9d69fb788b8e63269ede64d71f99b
| 6,796
|
py
|
Python
|
image_master.py
|
YOULOF2/OpenWater
|
646e7a344ccd6b872fe802852866989534931ddc
|
[
"MIT"
] | 1
|
2021-04-12T18:23:30.000Z
|
2021-04-12T18:23:30.000Z
|
image_master.py
|
YOULOF2/OpenWater
|
646e7a344ccd6b872fe802852866989534931ddc
|
[
"MIT"
] | null | null | null |
image_master.py
|
YOULOF2/OpenWater
|
646e7a344ccd6b872fe802852866989534931ddc
|
[
"MIT"
] | null | null | null |
import os
from PIL import Image, ImageFont, ImageDraw, ImageTk
import shutil
from time import sleep
from typing import Union
from pathlib import Path
from datetime import datetime
class ImageMaster:
def __init__(self):
self.default_img = "assets/images/default_imgs/default_img.png"
self.preview_img_loc = "assets/images/preview_imgs"
self.uploaded_img_loc = r"assets/images/uploaded/uploaded_img.png"
self.preview_watermarks_loc = "assets/images/preview_watermarks"
self.custom_fonts_loc = "assets/fonts/uploaded_fonts"
self.default_font = "Arial.ttf"
self.current_font_list = sorted(Path(self.custom_fonts_loc).iterdir(), key=os.path.getmtime)
self.preview_watermarks = sorted(Path(self.preview_watermarks_loc).iterdir(), key=os.path.getmtime)
self.width, self.height = self.get_internal_image()
self.image_scale = 1
self.default_sizing = (self.default_sizing())
def default_sizing(self):
if int(self.width) > 2000:
self.image_scale = 6
return int(self.width / 6), int(self.height / 6)
elif 800 <= int(self.width) < 2000:
self.image_scale = 2
return int(self.width / 2), int(self.height / 2)
elif self.height > 800:
self.image_scale = 1.5
return int(self.width / 1.5), int(self.height / 1.5)
else:
self.image_scale = 1
return self.width, self.height
def get_internal_image(self):
try:
image_tuple = Image.open(self.uploaded_img_loc).size
except FileNotFoundError:
image_tuple = Image.open(self.default_img).size
return image_tuple
else:
return image_tuple
def refresh_pre_watermarks(self):
self.preview_watermarks = sorted(Path(self.preview_watermarks_loc).iterdir(), key=os.path.getmtime)
def transfer_file(self, original_dir, file_type):
if file_type == "img":
original = original_dir
target = self.uploaded_img_loc
shutil.copyfile(original, target)
elif file_type == "font":
with open(original_dir) as file:
file_name = os.path.basename(file.name)
original = original_dir
target = f"{self.custom_fonts_loc}/{file_name}"
shutil.copyfile(original, target)
self.current_font_list = sorted(Path(self.custom_fonts_loc).iterdir(), key=os.path.getmtime)
def get_last_preview_watermarks(self):
self.refresh_pre_watermarks()
self.preview_watermarks.reverse()
last_watermark_name = self.preview_watermarks[0]
return last_watermark_name
def get_image_object(self):
"""
If no preview images found, returns Image object to default img
:return:
"""
self.refresh_pre_watermarks()
if len(self.preview_watermarks) > 0:
max_no = 0
for watermark_file in self.preview_watermarks:
splitted_file_no = int(watermark_file.stem.split("-")[1].split(".")[0])
if splitted_file_no > max_no:
max_no = splitted_file_no
return ImageTk.PhotoImage(Image.open(f"assets/images/preview_imgs/preview_img-{max_no}.png")
.resize(self.default_sizing, Image.ANTIALIAS))
elif len(os.listdir("assets/images/uploaded")) == 1:
return ImageTk.PhotoImage(Image.open(self.uploaded_img_loc)
.resize(self.default_sizing, Image.ANTIALIAS))
else:
return ImageTk.PhotoImage(Image.open(self.default_img)
.resize(self.default_sizing, Image.ANTIALIAS))
def text_to_image(self, font_name: str, text: str, coordinates: tuple, colour: Union[str, tuple], font_size: int,
rotation_angle):
"""
This method does 2 things:
1. It takes the parameters given and generates a transparent image with text ton it
2. It then merges the uploaded image with the generated transparent watermark image and saves it.
If colour arguement is None, defaults to the colour black
:param rotation_angle:
:param font_name:
:param text:
:param coordinates:
:param colour:
:param font_size:
:return:
"""
self.refresh_pre_watermarks()
font = ImageFont.truetype(f"assets/fonts/uploaded_fonts/{font_name}", font_size)
img = Image.new("RGBA", (self.width, self.height))
draw = ImageDraw.Draw(img)
draw.text(coordinates, text, colour, font=font)
final_img = img.rotate(rotation_angle)
if len(self.preview_watermarks) == 0:
final_img.save("assets/images/preview_watermarks/watermark-1.png")
else:
last_img_name = self.get_last_preview_watermarks()
splitted_img_name = last_img_name.stem.split("-")[1].split(".")[0]
new_str = int(splitted_img_name) + 1
final_img.save(f"assets/images/preview_watermarks/watermark-{new_str}.png")
self.preview_watermarks.reverse()
self.refresh_pre_watermarks()
last_watermark = str(self.get_last_preview_watermarks()).split("\\")[-1]
foreground = Image.open(f"assets/images/preview_watermarks/{last_watermark}")
raw_img = Image.open("assets/images/uploaded/uploaded_img.png")
self.preview_watermarks.reverse()
background = raw_img.convert("RGBA")
sleep(1)
max_no = 0
for file in self.preview_watermarks:
splitted_file_no = int(file.stem.split("-")[1].split(".")[0])
if splitted_file_no > max_no:
max_no = splitted_file_no
Image.alpha_composite(background, foreground).save(f"assets/images/preview_imgs/preview_img-{max_no}.png")
def save_image(self, save_to_loc):
time = str(datetime.now().strftime("%I%p-%d-%m"))
max_no = 1
for file in self.preview_watermarks:
splitted_file_no = int(file.stem.split("-")[1].split(".")[0])
if splitted_file_no > max_no:
max_no = splitted_file_no
original = f"{self.preview_img_loc}/preview_img-{max_no}.png"
target = f"{save_to_loc}/{time}-watermarked.png"
try:
shutil.copyfile(original, target)
self.clean_up()
except FileNotFoundError:
pass
def clean_up(self):
for file in os.listdir(self.preview_watermarks_loc):
os.remove(f"assets/images/preview_watermarks/{file}")
for file in os.listdir(self.preview_img_loc):
os.remove(f"assets/images/preview_imgs/{file}")
| 43.564103
| 117
| 0.631254
|
cef57cf50cf4f428bd015d470a55ea566aa2e652
| 2,912
|
py
|
Python
|
solargis/request/system.py
|
power-guard/solargis-py
|
42c919fccc6f9abf05441a1ffa75633cff1eab23
|
[
"MIT"
] | null | null | null |
solargis/request/system.py
|
power-guard/solargis-py
|
42c919fccc6f9abf05441a1ffa75633cff1eab23
|
[
"MIT"
] | null | null | null |
solargis/request/system.py
|
power-guard/solargis-py
|
42c919fccc6f9abf05441a1ffa75633cff1eab23
|
[
"MIT"
] | null | null | null |
import datetime as dt
import xml.etree.ElementTree as ET
from enum import Enum
from solargis.abstractelement import AbstractElement
from solargis.validator import Validator
class InstallationType(Enum):
"""This property of the PV system helps to estimate how
modules are cooled by air. For sloped roof with PV modules
on rails tilted at the same angle as the roof choose
'ROOF_MOUNTED' value. For PV modules incorporated into
building facade choose 'BUILDING_INTEGRATED' value.
This option is considered as the worst ventilated.
As the best ventilated option is considered 'FREE_STANDING'
installation. This typically means stand-alone installation
on tilted racks anchored into the ground. Also choose this
option if a PV system is installed on a flat roof."""
FREE_STANDING = 'FREE_STANDING'
ROOF_MOUNTED = 'ROOF_MOUNTED'
BUILDING_INTEGRATED = 'BUILDING_INTEGRATED'
class System(AbstractElement):
"""Parametrization of the PV system. Required for simulating PVOUT parameter."""
DATE_FORMAT = '%Y-%m-%d'
def __init__(self,
installed_power: float,
module,
inverter,
losses,
topology=None,
installation_type: InstallationType = None,
date_startup: dt.date = None,
self_shading: bool = None):
Validator.not_none(module, 'module')
Validator.not_none(inverter, 'inverter')
Validator.not_none(losses, 'losses')
Validator.greater_than(installed_power, 0, 'installed power')
if installation_type is not None:
Validator.value_in_enum(installation_type, InstallationType)
self.installed_power = installed_power
self.module = module
self.inverter = inverter
self.losses = losses
self.topology = topology
self.installation_type = installation_type
self.date_startup = date_startup
self.self_shading = self_shading
self.element_name = 'system'
self.prefix = 'pv'
def to_element(self):
attributes = dict()
attributes['installedPower'] = str(self.installed_power)
if self.installation_type is not None:
attributes['installationType'] = self.installation_type.value
if self.date_startup is not None:
attributes['dateStartup'] = self.date_startup.strftime(self.DATE_FORMAT)
if self.self_shading is not None:
attributes['selfShading'] = str(self.self_shading).lower()
system = ET.Element(self.get_element_name(), attrib=attributes)
system.append(self.module.to_element())
system.append(self.inverter.to_element())
system.append(self.losses.to_element())
if self.topology is not None:
system.append(self.topology.to_element())
return system
| 35.950617
| 84
| 0.673077
|
84ce3ed06e5c4fe52d1ab48ea94e10c9f8a015a8
| 2,370
|
py
|
Python
|
src/lm_based/extract_start_end_from_lm.py
|
vered1986/time_expressions
|
32d182d7f741eec007141f5ca89c0d419e23a9a7
|
[
"Apache-2.0"
] | 1
|
2022-02-25T15:00:42.000Z
|
2022-02-25T15:00:42.000Z
|
src/lm_based/extract_start_end_from_lm.py
|
vered1986/time_expressions
|
32d182d7f741eec007141f5ca89c0d419e23a9a7
|
[
"Apache-2.0"
] | null | null | null |
src/lm_based/extract_start_end_from_lm.py
|
vered1986/time_expressions
|
32d182d7f741eec007141f5ca89c0d419e23a9a7
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import argparse
from transformers import pipeline
from src.lm_based.common import compute_distribution
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--out_dir", default="output/lm_based", type=str, required=False, help="Output directory")
parser.add_argument("--device", default=-1, type=int, required=False, help="GPU device or -1 for CPU")
parser.add_argument("--lang", default=None, type=str, required=False,
help="Language code. If not specified, computes for all")
args = parser.parse_args()
# Load multilingual BERT
unmasker = pipeline('fill-mask', model='bert-base-multilingual-cased', device=args.device)
# Iterate over languages
if args.lang is not None:
langs = [args.lang]
else:
langs = [file.replace(".txt", "") for file in os.listdir("data/templates/distribution")]
# Iterate over languages
for lang in langs:
print(lang)
templates = json.load(open(f"data/templates/start_end/{lang}.json"))
templates = {edge: [template for template in curr_templates if "[MASK]" in template]
for edge, curr_templates in templates.items()}
ampm_map = None
# This language uses 12hr clock
if os.path.exists(f"data/ampm/{lang}.json"):
ampm_map = json.load(open(f"data/ampm/{lang}.json"))
max_num = 12
else:
max_num = 23
# Build the numbers map
numbers_map = {str(num): num for num in range(0, max_num + 1)}
numbers_map.update({"0" + str(num): num for num in range(0, 10)})
time_expressions = [line.strip().split("\t") for line in open(f"data/time_expressions/{lang}.txt")]
time_expressions_map = {en: other.split("|") for en, other in time_expressions}
# Compute the distribution
grounding = {}
for edge, curr_templates in templates.items():
grounding[edge] = compute_distribution(
unmasker, curr_templates, numbers_map, time_expressions_map, ampm_map)
grounding = {exp: {edge: grounding[edge][exp] for edge in ["start", "end"]} for exp in grounding["end"].keys()}
with open(f"{args.out_dir}/{lang}_start_end.json", "w") as f_out:
json.dump(grounding, f_out)
if __name__ == '__main__':
main()
| 38.225806
| 119
| 0.637553
|
f13ecd4c6902d12617217a60efc2e87bcef0f621
| 805
|
py
|
Python
|
openstack_dashboard/dashboards/admin/volume_groups/panel.py
|
stackhpc/horizon
|
0899f67657e0be62dd9e6be327c63bccb4607dc6
|
[
"Apache-2.0"
] | 930
|
2015-01-04T08:06:03.000Z
|
2022-03-13T18:47:13.000Z
|
openstack_dashboard/dashboards/admin/volume_groups/panel.py
|
stackhpc/horizon
|
0899f67657e0be62dd9e6be327c63bccb4607dc6
|
[
"Apache-2.0"
] | 26
|
2015-02-23T16:37:31.000Z
|
2020-07-02T08:37:41.000Z
|
openstack_dashboard/dashboards/admin/volume_groups/panel.py
|
stackhpc/horizon
|
0899f67657e0be62dd9e6be327c63bccb4607dc6
|
[
"Apache-2.0"
] | 1,040
|
2015-01-01T18:48:28.000Z
|
2022-03-19T08:35:18.000Z
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.dashboards.project.volume_groups \
import panel as project_panel
class VolumeGroups(project_panel.VolumeGroups):
policy_rules = (("volume", "context_is_admin"),)
| 38.333333
| 78
| 0.742857
|
1d769523375fb89448159c201d312ff54454f67d
| 1,193
|
py
|
Python
|
src/pylero/wiki_page_attachment.py
|
yuxisun1217/pylero
|
0210eb2243e02ab218f19a224e94eb63081f13e3
|
[
"MIT"
] | null | null | null |
src/pylero/wiki_page_attachment.py
|
yuxisun1217/pylero
|
0210eb2243e02ab218f19a224e94eb63081f13e3
|
[
"MIT"
] | null | null | null |
src/pylero/wiki_page_attachment.py
|
yuxisun1217/pylero
|
0210eb2243e02ab218f19a224e94eb63081f13e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from pylero.base_polarion import BasePolarion
from pylero.user import User
class WikiPageAttachment(BasePolarion):
"""Object to handle the Polarion WSDL tns3:WikiPageAttachment class
Attributes:
author (User)
file_name (string)
wiki_page_attachment_id (string)
length (long)
title (string)
updated (dateTime)
url (string)
"""
_cls_suds_map = {"author":
{"field_name": "author",
"cls": User},
"file_name": "fileName",
"wiki_page_attachment_id": "id",
"length": "length",
"title": "title",
"updated": "updated",
"url": "url",
"uri": "_uri",
"_unresolved": "_unresolved"}
_obj_client = "tracker_client"
_obj_struct = "tns3:WikiPageAttachment"
class ArrayOfWikiPageAttachment(BasePolarion):
_obj_client = "tracker_client"
_obj_struct = "tns3:ArrayOfWikiPageAttachment"
| 31.394737
| 71
| 0.576697
|
523c44d83408edb8429c86bd3708146454206d4b
| 1,708
|
py
|
Python
|
profilesapi/migrations/0001_initial.py
|
diyajaiswal11/Profile-rest-api
|
b5acbd97be7b1186c9ee9063a96efd1148419229
|
[
"MIT"
] | 3
|
2020-06-04T05:22:39.000Z
|
2020-09-23T19:44:07.000Z
|
profilesapi/migrations/0001_initial.py
|
diyajaiswal11/Profile-rest-api
|
b5acbd97be7b1186c9ee9063a96efd1148419229
|
[
"MIT"
] | null | null | null |
profilesapi/migrations/0001_initial.py
|
diyajaiswal11/Profile-rest-api
|
b5acbd97be7b1186c9ee9063a96efd1148419229
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-05-05 19:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.235294
| 266
| 0.638759
|
85c038a796dbb37dd83276f7c0667fa4a560ce58
| 1,499
|
py
|
Python
|
db/sqlite.py
|
DeepCISO/demo-aitextgen-2022-01-31
|
9aee1d7413e8ef4a672dfa7f3efbd4dd2151b285
|
[
"MIT"
] | null | null | null |
db/sqlite.py
|
DeepCISO/demo-aitextgen-2022-01-31
|
9aee1d7413e8ef4a672dfa7f3efbd4dd2151b285
|
[
"MIT"
] | null | null | null |
db/sqlite.py
|
DeepCISO/demo-aitextgen-2022-01-31
|
9aee1d7413e8ef4a672dfa7f3efbd4dd2151b285
|
[
"MIT"
] | null | null | null |
import sqlite3
import time
from sqlite3 import Error
from pprint import pprint
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
conn.row_factory = sqlite3.Row
except Error as e:
print(e)
return conn
def get_all_tweets(conn):
cursor = conn.cursor()
cursor.execute("SELECT * FROM tweets")
rows = [dict(row) for row in cursor.fetchall()]
return rows
def postprocess_tweets_to_common_format(rows):
common = []
for row in rows:
common_row = create_common_format_dict(
"twitter",
time.mktime(time.strptime(row["created_at"], "%Y-%m-%d %H:%M:%S %Z")),
row["screen_name"],
( # 10x weight for RTs, arbitrarily set
(row["retweets_count"] * 10) + row["likes_count"]
),
row["tweet"],
)
common.append(common_row)
return common
def create_common_format_dict(source, epoch, author, score, content):
return {
"source": source,
"epoch": epoch,
"author": author,
"score": score,
"content": content,
}
def get_sqlite_twint(db_file):
print("Loading tweets from sqlite database")
conn = create_connection(db_file)
rows = get_all_tweets(conn)
print("Got tweets from sqlite database, preprocessing")
common = postprocess_tweets_to_common_format(rows)
print("Returning common-format tweet data from sqlite")
return common
| 24.983333
| 82
| 0.624416
|
55a6c9f34c504d90f8f386ba771b55dff7a795a9
| 70,980
|
py
|
Python
|
cerberus/base.py
|
intrinseca/cerberus
|
5c267b554a39b5a8650659d3eea0cf383e47a166
|
[
"0BSD"
] | 1
|
2020-07-24T04:23:14.000Z
|
2020-07-24T04:23:14.000Z
|
cerberus/base.py
|
intrinseca/cerberus
|
5c267b554a39b5a8650659d3eea0cf383e47a166
|
[
"0BSD"
] | null | null | null |
cerberus/base.py
|
intrinseca/cerberus
|
5c267b554a39b5a8650659d3eea0cf383e47a166
|
[
"0BSD"
] | null | null | null |
import re
import typing
from ast import literal_eval
from collections import abc, ChainMap
from datetime import date, datetime
from typing import (
Any,
Callable,
ClassVar,
Container,
Dict,
Generic,
Iterable,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
from warnings import warn
from cerberus import errors
from cerberus.platform import get_type_args, get_type_origin, ForwardRef, _GenericAlias
from cerberus.typing import (
AllowUnknown,
Document,
DocumentPath,
ErrorHandlerConfig,
FieldName,
NoneType,
RegistryItem,
RegistryItems,
RulesSet,
Schema,
TypesMapping,
)
from cerberus.utils import drop_item_from_tuple, readonly_classproperty, schema_hash
RULE_SCHEMA_SEPARATOR = "The rule's arguments are validated against this schema:"
toy_error_handler = errors.ToyErrorHandler()
_ellipsis = typing.Tuple[int, ...].__args__[-1]
def dummy_for_rule_validation(rule_constraints: str) -> Callable:
def dummy(self, constraint, field, value):
raise RuntimeError(
'Dummy method called. Its purpose is to hold just'
'validation constraints for a rule in its '
'docstring.'
)
f = dummy
f.__doc__ = rule_constraints
return f
# Exceptions
class DocumentError(Exception):
""" Raised when the target document is missing or has the wrong format """
class SchemaError(Exception):
""" Raised when the validation schema is missing, has the wrong format or
contains errors. """
# Schema mangling
_normalized_rulesset_cache = {} # type: Dict[int, Dict[str, Any]]
def normalize_rulesset(rules: RulesSet) -> RulesSet:
""" Transforms a set of rules into a canonical form. """
if not isinstance(rules, abc.Mapping):
return rules
_hash = schema_hash(rules)
if _hash in _normalized_rulesset_cache:
return _normalized_rulesset_cache[_hash]
rules = dict(rules)
rules_with_whitespace = [x for x in rules if " " in x]
if rules_with_whitespace:
for rule in rules_with_whitespace:
rules[rule.replace(" ", "_")] = rules.pop(rule)
if isinstance(rules.get("dependencies"), str):
rules["dependencies"] = (rules["dependencies"],)
if "excludes" in rules:
constraint = rules["excludes"]
if isinstance(constraint, str) or not isinstance(constraint, Container):
rules["excludes"] = (constraint,)
if "type" in rules:
constraint = rules["type"]
if not (isinstance(constraint, Iterable) and not isinstance(constraint, str)):
rules["type"] = (constraint,)
_expand_generic_type_aliases(rules)
_expand_composed_of_rules(rules)
_normalize_contained_rulessets(rules)
_normalized_rulesset_cache[_hash] = rules
return rules
def normalize_schema(schema: Schema) -> Schema:
""" Transforms a schema into a canonical form. """
return {field: normalize_rulesset(rules) for field, rules in schema.items()}
def _expand_generic_type_aliases(rules: Dict[str, Any]) -> None:
compound_types = []
plain_types = []
is_nullable = False
for constraint in _flatten_Union_and_Optional(rules.pop("type")):
if isinstance(constraint, _GenericAlias):
origin = get_type_origin(constraint)
args = get_type_args(constraint)
# mappings, e.g. Mapping[int, str]
if issubclass(origin, abc.Mapping) and not constraint.__parameters__:
compound_types.append(
{
"type": origin,
"keysrules": {"type": args[0]},
"valuesrules": {"type": args[1]},
}
)
# list-like and sets, e.g. List[str]
elif (
issubclass(origin, (abc.MutableSequence, abc.Set))
and not constraint.__parameters__
):
compound_types.append({"type": origin, "itemsrules": {"type": args[0]}})
# tuples
elif issubclass(origin, tuple) and args:
# e.g. Tuple[str, ...]
if args[-1] is _ellipsis:
compound_types.append(
{"type": origin, "itemsrules": {"type": args[0]}}
)
# e.g. Tuple[int, str, Tuple]
else:
compound_types.append(
{"type": origin, "items": tuple({"type": x} for x in args)}
)
else:
plain_types.append(origin)
# from typing.Optional
elif constraint is NoneType: # type: ignore
is_nullable = True
elif isinstance(constraint, ForwardRef):
plain_types.append(constraint.__forward_arg__)
else:
plain_types.append(constraint)
if compound_types or is_nullable:
if "anyof" in rules:
raise SchemaError(
"The usage of the `anyof` rule is not possible in a rulesset where the"
"`type` rule specifies compound types as constraints."
)
if plain_types:
compound_types.append({"type": tuple(plain_types)})
if is_nullable:
compound_types.append({"nullable": True})
rules["anyof"] = tuple(compound_types)
else:
rules["type"] = tuple(plain_types)
def _flatten_Union_and_Optional(type_constraints):
for constraint in type_constraints:
if get_type_origin(constraint) is typing.Union:
yield from _flatten_Union_and_Optional(get_type_args(constraint))
else:
yield constraint
def _expand_composed_of_rules(rules: Dict[str, Any]) -> None:
""" Expands of-rules that have another rule agglutinated in a rules set. """
composed_rules = [
x for x in rules if x.startswith(('allof_', 'anyof_', 'noneof_', 'oneof_'))
]
if not composed_rules:
return
for composed_rule in composed_rules:
of_rule, rule = composed_rule.split('_', 1)
rules[of_rule] = tuple({rule: x} for x in rules[composed_rule])
for rule in composed_rules:
rules.pop(rule)
def _normalize_contained_rulessets(rules: Dict[str, Any]) -> None:
if isinstance(rules.get("schema"), abc.Mapping):
rules['schema'] = normalize_schema(rules['schema'])
for rule in ("allow_unknown", "itemsrules", "keysrules", "valuesrules"):
if rule in rules:
rules[rule] = normalize_rulesset(rules[rule])
for rule in ('allof', 'anyof', 'items', 'noneof', 'oneof'):
if not isinstance(rules.get(rule), Sequence):
continue
rules[rule] = tuple(normalize_rulesset(x) for x in rules[rule])
# Registries
class Registry(Generic[RegistryItem]):
""" A registry to store and retrieve schemas and parts of it by a name
that can be used in validation schemas.
:param definitions: Optional, initial definitions.
"""
def __init__(
self, definitions: Union[RegistryItems, Iterable[Tuple[str, RegistryItem]]] = ()
):
self._storage = {} # type: Dict[str, RegistryItem]
self.extend(definitions)
def add(self, name: str, definition: RegistryItem) -> None:
""" Register a definition to the registry. Existing definitions are
replaced silently.
:param name: The name which can be used as reference in a validation
schema.
:param definition: The definition.
"""
if not isinstance(definition, abc.Mapping):
raise TypeError("Value must be of Mapping type.")
# TODO add `_normalize_value: staticmethod` as class attribute declaration when
# Python3.5 was dropped and remove this # type: ignore
self._storage[name] = self._normalize_value(definition) # type: ignore
def all(self) -> RegistryItems:
""" Returns a :class:`dict` with all registered definitions mapped to
their name. """
return self._storage
def clear(self):
""" Purge all definitions in the registry. """
self._storage.clear()
def extend(
self, definitions: Union[RegistryItems, Iterable[Tuple[str, RegistryItem]]]
) -> None:
""" Add several definitions at once. Existing definitions are
replaced silently.
:param definitions: The names and definitions.
"""
for name, definition in dict(definitions).items():
self.add(name, definition)
def get(
self, name: str, default: Optional[RegistryItem] = None
) -> Optional[RegistryItem]:
""" Retrieve a definition from the registry.
:param name: The reference that points to the definition.
:param default: Return value if the reference isn't registered. """
return self._storage.get(name, default)
def remove(self, *names: str) -> None:
""" Unregister definitions from the registry.
:param names: The names of the definitions that are to be
unregistered. """
for name in names:
self._storage.pop(name, None)
class SchemaRegistry(Registry):
_normalize_value = staticmethod(normalize_schema)
class RulesSetRegistry(Registry):
_normalize_value = staticmethod(normalize_rulesset)
schema_registry, rules_set_registry = SchemaRegistry(), RulesSetRegistry()
# Defining types
TypeDefinition = NamedTuple(
'TypeDefinition',
(
('name', str),
('included_types', Tuple[Type[Any], ...]),
('excluded_types', Tuple[Type[Any], ...]),
),
)
"""
This class is used to define types that can be used as value in the
:attr:`~cerberus.Validator.types_mapping` property.
The ``name`` should be descriptive and match the key it is going to be assigned
to.
A value that is validated against such definition must be an instance of any of
the types contained in ``included_types`` and must not match any of the types
contained in ``excluded_types``.
"""
# The Validator
class ValidatorMeta(type):
""" Metaclass for all validators """
def __new__(mcls, name, bases, namespace):
if '__doc__' not in namespace:
namespace['__doc__'] = bases[0].__doc__
return super().__new__(mcls, name, bases, namespace)
def __init__(cls, name, bases, namespace):
def attributes_with_prefix(prefix):
return tuple(
x[len(prefix) + 2 :]
for x in dir(cls)
if x.startswith('_' + prefix + '_')
)
super().__init__(name, bases, namespace)
validation_rules = {
attribute: cls.__get_rule_schema('_validate_' + attribute)
for attribute in attributes_with_prefix('validate')
}
cls.checkers = tuple(x for x in attributes_with_prefix('check_with'))
x = validation_rules['check_with']['oneof']
x[1]['itemsrules']['oneof'][1]['allowed'] = x[2]['allowed'] = cls.checkers
for rule in (x for x in cls.mandatory_validations if x != 'nullable'):
validation_rules[rule]['required'] = True
cls.coercers, cls.default_setters, normalization_rules = (), (), {}
for attribute in attributes_with_prefix('normalize'):
if attribute.startswith('coerce_'):
cls.coercers += (attribute[len('coerce_') :],)
elif attribute.startswith('default_setter_'):
cls.default_setters += (attribute[len('default_setter_') :],)
else:
normalization_rules[attribute] = cls.__get_rule_schema(
'_normalize_' + attribute
)
for rule in ('coerce', 'rename_handler'):
x = normalization_rules[rule]['oneof']
x[1]['itemsrules']['oneof'][1]['allowed'] = x[2]['allowed'] = cls.coercers
normalization_rules['default_setter']['oneof'][1][
'allowed'
] = cls.default_setters
cls.normalization_rules = normalize_schema(normalization_rules)
cls.validation_rules = normalize_schema(validation_rules)
cls.rules = ChainMap(cls.normalization_rules, cls.validation_rules)
def __get_rule_schema(mcls, method_name):
docstring = getattr(mcls, method_name).__doc__
if docstring is None:
result = {}
else:
if RULE_SCHEMA_SEPARATOR in docstring:
docstring = docstring.split(RULE_SCHEMA_SEPARATOR)[1]
try:
result = literal_eval(docstring.strip())
except Exception:
result = {}
if not result and method_name != '_validate_meta':
warn(
"No validation schema is defined for the arguments of rule "
"'%s'" % method_name.split('_', 2)[-1]
)
return result
class UnconcernedValidator(metaclass=ValidatorMeta):
""" Validator class. Normalizes and/or validates any mapping against a
validation-schema which is provided as an argument at class instantiation
or upon calling the :meth:`~cerberus.Validator.validate`,
:meth:`~cerberus.Validator.validated` or
:meth:`~cerberus.Validator.normalized` method. An instance itself is
callable and executes a validation.
All instantiation parameters are optional.
There are the introspective properties :attr:`types`, :attr:`validators`,
:attr:`coercers`, :attr:`default_setters`, :attr:`rules`,
:attr:`normalization_rules` and :attr:`validation_rules`.
The attributes reflecting the available rules are assembled considering
constraints that are defined in the docstrings of rules' methods and is
effectively used as validation schema for :attr:`schema`.
:param schema: See :attr:`~cerberus.Validator.schema`.
Defaults to :obj:`None`.
:param ignore_none_values: See :attr:`~cerberus.Validator.ignore_none_values`.
Defaults to ``False``.
:param allow_unknown: See :attr:`~cerberus.Validator.allow_unknown`.
Defaults to ``False``.
:param require_all: See :attr:`~cerberus.Validator.require_all`.
Defaults to ``False``.
:param purge_unknown: See :attr:`~cerberus.Validator.purge_unknown`.
Defaults to to ``False``.
:param purge_readonly: Removes all fields that are defined as ``readonly`` in the
normalization phase.
:param error_handler: The error handler that formats the result of
:attr:`~cerberus.Validator.errors`.
When given as two-value tuple with an error-handler
class and a dictionary, the latter is passed to the
initialization of the error handler.
Default: :class:`~cerberus.errors.BasicErrorHandler`.
"""
mandatory_validations = ('nullable',) # type: ClassVar[Tuple[str, ...]]
""" Rules that are evaluated on any field, regardless whether defined in
the schema or not."""
priority_validations = (
'nullable',
'readonly',
'type',
'empty',
) # type: ClassVar[Tuple[str, ...]]
""" Rules that will be processed in that order before any other. """
types_mapping = {
'boolean': TypeDefinition('boolean', (bool,), ()),
'bytearray': TypeDefinition('bytearray', (bytearray,), ()),
'bytes': TypeDefinition('bytes', (bytes,), ()),
'complex': TypeDefinition('complex', (complex,), ()),
'date': TypeDefinition('date', (date,), (datetime,)),
'datetime': TypeDefinition('datetime', (datetime,), ()),
'dict': TypeDefinition('dict', (Mapping,), ()),
'float': TypeDefinition('float', (float,), ()),
'frozenset': TypeDefinition('frozenset', (frozenset,), ()),
'integer': TypeDefinition('integer', (int,), (bool,)),
'list': TypeDefinition('list', (list,), ()),
'number': TypeDefinition('number', (int, float), (bool,)),
'set': TypeDefinition('set', (set,), ()),
'string': TypeDefinition('string', (str,), ()),
'tuple': TypeDefinition('tuple', (tuple,), ()),
'type': TypeDefinition('type', (type,), ()),
} # type: ClassVar[TypesMapping]
""" This mapping holds all available constraints for the type rule and
their assigned :class:`~cerberus.TypeDefinition`. """
types_mapping.update(
(x, TypeDefinition(x, (getattr(abc, x),), ()))
for x in abc.__all__ # type: ignore
)
_valid_schemas = set() # type: ClassVar[Set[Tuple[int, int]]]
""" A :class:`set` of hashes derived from validation schemas that are
legit for a particular ``Validator`` class. """
# these will be set by the metaclass, here type hints are given:
checkers = () # type: ClassVar[Tuple[str, ...]]
coercers = () # type: ClassVar[Tuple[str, ...]]
default_setters = () # type: ClassVar[Tuple[str, ...]]
normalization_rules = {} # type: ClassVar[Schema]
rules = {} # type: ClassVar[Dict[str, RulesSet]]
validation_rules = {} # type: ClassVar[Schema]
def __init__(
self,
schema: Schema = None,
*,
allow_unknown: AllowUnknown = False,
error_handler: ErrorHandlerConfig = errors.BasicErrorHandler,
ignore_none_values: bool = False,
purge_unknown: bool = False,
purge_readonly: bool = False,
require_all: bool = False,
rules_set_registry: RulesSetRegistry = rules_set_registry,
schema_registry: SchemaRegistry = schema_registry,
**extra_config: Any
):
self._config = extra_config # type: Dict[str, Any]
""" This dictionary holds the configuration arguments that were used to
initialize the :class:`Validator` instance except the ``error_handler``. """
self._config.update(
{
"error_handler": error_handler,
"ignore_none_values": ignore_none_values,
"purge_readonly": purge_readonly,
"purge_unknown": purge_unknown,
"require_all": require_all,
"rules_set_registry": rules_set_registry,
"schema_registry": schema_registry,
}
)
self.document = None # type: Optional[Document]
""" The document that is or was recently processed.
Type: any :term:`mapping` """
self._errors = errors.ErrorList()
""" The list of errors that were encountered since the last document
processing was invoked.
Type: :class:`~cerberus.errors.ErrorList` """
self.recent_error = None # type: Optional[errors.ValidationError]
""" The last individual error that was submitted.
Type: :class:`~cerberus.errors.ValidationError` or ``None`` """
self.document_error_tree = errors.DocumentErrorTree()
""" A tree representiation of encountered errors following the
structure of the document.
Type: :class:`~cerberus.errors.DocumentErrorTree` """
self.schema_error_tree = errors.SchemaErrorTree()
""" A tree representiation of encountered errors following the
structure of the schema.
Type: :class:`~cerberus.errors.SchemaErrorTree` """
self.document_path = () # type: DocumentPath
""" The path within the document to the current sub-document.
Type: :class:`tuple` """
self.schema_path = () # type: DocumentPath
""" The path within the schema to the current sub-schema.
Type: :class:`tuple` """
self.update = False
self.error_handler = self.__init_error_handler(error_handler)
""" The error handler used to format :attr:`~cerberus.Validator.errors`
and process submitted errors with
:meth:`~cerberus.Validator._error`.
Type: :class:`~cerberus.errors.BaseErrorHandler` """
self.schema = schema
self.allow_unknown = allow_unknown
self._remaining_rules = [] # type: List[str]
""" Keeps track of the rules that are next in line to be evaluated during the
validation of a field. Type: :class:`list` """
super().__init__()
@staticmethod
def __init_error_handler(config: ErrorHandlerConfig) -> errors.BaseErrorHandler:
if isinstance(config, errors.BaseErrorHandler):
return config
if isinstance(config, tuple):
error_handler, eh_config = config
else:
error_handler, eh_config = config, {}
if isinstance(error_handler, type) and issubclass(
error_handler, errors.BaseErrorHandler
):
return error_handler(**eh_config)
else:
raise RuntimeError('Invalid error_handler configuration.')
@classmethod
def clear_caches(cls):
""" Purge the cache of known valid schemas. """
cls._valid_schemas.clear()
def _error(self, *args):
""" Creates and adds one or multiple errors.
:param args: Accepts different argument's signatures.
*1. Bulk addition of errors:*
- :term:`iterable` of
:class:`~cerberus.errors.ValidationError`-instances
The errors will be added to
:attr:`~cerberus.Validator._errors`.
*2. Custom error:*
- the invalid field's name
- the error message
A custom error containing the message will be created and
added to :attr:`~cerberus.Validator._errors`.
There will however be fewer information contained in the
error (no reference to the violated rule and its
constraint).
*3. Defined error:*
- the invalid field's name
- the error-reference, see :mod:`cerberus.errors`
- arbitrary, supplemental information about the error
A :class:`~cerberus.errors.ValidationError` instance will
be created and added to
:attr:`~cerberus.Validator._errors`.
"""
if len(args) == 1:
self._errors.extend(args[0])
for error in args[0]:
self.document_error_tree.add(error)
self.schema_error_tree.add(error)
self.error_handler.emit(error)
elif len(args) == 2 and isinstance(args[1], str):
self._error(args[0], errors.CUSTOM, args[1])
elif len(args) >= 2:
field = args[0]
code = args[1].code
rule = args[1].rule
info = args[2:]
document_path = self.document_path + (field,)
schema_path = self.schema_path
if code != errors.UNKNOWN_FIELD.code and rule is not None:
schema_path += (field, rule)
if not rule:
constraint = None
else:
field_definitions = self._resolve_rules_set(self.schema[field])
if rule == 'nullable':
constraint = field_definitions.get(rule, False)
elif rule == 'required':
constraint = field_definitions.get(rule, self.require_all)
if rule not in field_definitions:
schema_path = "__require_all__"
else:
constraint = field_definitions[rule]
value = self.document.get(field)
self.recent_error = errors.ValidationError(
document_path, schema_path, code, rule, constraint, value, info
)
self._error([self.recent_error])
def _get_child_validator(
self,
document_crumb: Union[FieldName, Iterable[FieldName], None] = None,
schema_crumb: Union[FieldName, Iterable[FieldName], None] = None,
**kwargs: Any
) -> 'UnconcernedValidator':
""" Creates a new instance of Validator-(sub-)class. All initial parameters of
the parent are passed to the initialization, unless a parameter is given as
an explicit *keyword*-parameter.
:param document_crumb: Extends the :attr:`~cerberus.Validator.document_path`
of the child-validator.
:param schema_crumb: Extends the :attr:`~cerberus.Validator.schema_path`
of the child-validator.
:param kwargs: Overriding keyword-arguments for initialization.
"""
child_config = ChainMap(kwargs, self._config)
if not self.is_child:
child_config = child_config.new_child(
{
'is_child': True,
'error_handler': toy_error_handler,
'root_allow_unknown': self.allow_unknown,
'root_document': self.document,
'root_schema': self.schema,
}
)
child_validator = self.__class__(**child_config)
if document_crumb is None:
child_validator.document_path = self.document_path
else:
if not isinstance(document_crumb, tuple):
document_crumb = (document_crumb,)
child_validator.document_path = self.document_path + document_crumb
if schema_crumb is None:
child_validator.schema_path = self.schema_path
else:
if not isinstance(schema_crumb, tuple):
schema_crumb = (schema_crumb,)
child_validator.schema_path = self.schema_path + schema_crumb
return child_validator
def __get_rule_handler(self, domain, rule):
methodname = '_{0}_{1}'.format(domain, rule.replace(' ', '_'))
result = getattr(self, methodname, None)
if result is None:
raise RuntimeError(
"There's no handler for '{}' in the '{}' "
"domain.".format(rule, domain)
)
return result
def _drop_nodes_from_errorpaths(
self,
_errors: errors.ErrorList,
dp_items: Iterable[int],
sp_items: Iterable[int],
) -> None:
""" Removes nodes by index from an errorpath, relatively to the
basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
"""
dp_basedepth = len(self.document_path)
sp_basedepth = len(self.schema_path)
for error in _errors:
for i in sorted(dp_items, reverse=True):
error.document_path = drop_item_from_tuple(
error.document_path, dp_basedepth + i
)
for i in sorted(sp_items, reverse=True):
error.schema_path = drop_item_from_tuple(
error.schema_path, sp_basedepth + i
)
if error.child_errors:
self._drop_nodes_from_errorpaths(error.child_errors, dp_items, sp_items)
def _lookup_field(self, path):
""" Searches for a field as defined by path. This method is used by the
``dependency`` evaluation logic.
:param path: Path elements are separated by a ``.``. A leading ``^``
indicates that the path relates to the document root,
otherwise it relates to the currently evaluated document,
which is possibly a subdocument.
The sequence ``^^`` at the start will be interpreted as a
literal ``^``.
:type path: :class:`str`
:returns: Either the found field name and its value or :obj:`None` for
both.
:rtype: A two-value :class:`tuple`.
"""
if path.startswith('^'):
path = path[1:]
context = self.document if path.startswith('^') else self.root_document
else:
context = self.document
parts = path.split('.')
for part in parts:
if part not in context:
return None, None
context = context.get(part, {})
return parts[-1], context
def _resolve_rules_set(self, rules_set):
if isinstance(rules_set, Mapping):
return rules_set
elif isinstance(rules_set, str):
return self.rules_set_registry.get(rules_set)
return None
def _resolve_schema(self, schema):
if isinstance(schema, Mapping):
return schema
elif isinstance(schema, str):
return self.schema_registry.get(schema)
return None
# Properties
# TODO replace a lot with __getattr__ and __setattr__
@property
def allow_unknown(self) -> AllowUnknown:
""" If ``True`` unknown fields that are not defined in the schema will
be ignored. If a mapping with a validation schema is given, any
undefined field will be validated against its rules.
Also see :ref:`allowing-the-unknown`.
Type: :class:`bool` or any :term:`mapping` """
return self._config.get('allow_unknown', False)
@allow_unknown.setter
def allow_unknown(self, value: AllowUnknown) -> None:
if isinstance(value, Mapping):
self._config['allow_unknown'] = normalize_rulesset(value)
elif isinstance(value, bool):
self._config['allow_unknown'] = value
else:
raise TypeError
@property
def errors(self) -> Any:
""" The errors of the last processing formatted by the handler that is
bound to :attr:`~cerberus.Validator.error_handler`. """
return self.error_handler(self._errors)
@property
def ignore_none_values(self) -> bool:
""" Whether to not process :obj:`None`-values in a document or not.
Type: :class:`bool` """
return self._config.get('ignore_none_values', False)
@ignore_none_values.setter
def ignore_none_values(self, value: bool) -> None:
self._config['ignore_none_values'] = value
@property
def is_child(self) -> bool:
""" ``True`` for child-validators obtained with
:meth:`~cerberus.Validator._get_child_validator`.
Type: :class:`bool` """
return self._config.get('is_child', False)
@property
def _is_normalized(self) -> bool:
""" ``True`` if the document is already normalized. """
return self._config.get('_is_normalized', False)
@_is_normalized.setter
def _is_normalized(self, value: bool) -> None:
self._config['_is_normalized'] = value
@property
def purge_unknown(self) -> bool:
""" If ``True``, unknown fields will be deleted from the document
unless a validation is called with disabled normalization.
Also see :ref:`purging-unknown-fields`. Type: :class:`bool` """
return self._config.get('purge_unknown', False)
@purge_unknown.setter
def purge_unknown(self, value: bool) -> None:
self._config['purge_unknown'] = value
@property
def purge_readonly(self) -> bool:
""" If ``True``, fields declared as readonly will be deleted from the
document unless a validation is called with disabled normalization.
Type: :class:`bool` """
return self._config.get('purge_readonly', False)
@purge_readonly.setter
def purge_readonly(self, value: bool) -> None:
self._config['purge_readonly'] = value
@property
def require_all(self) -> bool:
""" If ``True`` known fields that are defined in the schema will
be required. Type: :class:`bool` """
return self._config["require_all"]
@require_all.setter
def require_all(self, value: bool) -> None:
self._config['require_all'] = value
@property
def root_allow_unknown(self) -> AllowUnknown:
""" The :attr:`~cerberus.Validator.allow_unknown` attribute of the
first level ancestor of a child validator. """
return self._config.get('root_allow_unknown', self.allow_unknown)
@property
def root_require_all(self) -> bool:
""" The :attr:`~cerberus.Validator.require_all` attribute of
the first level ancestor of a child validator. """
return self._config.get('root_require_all', self.require_all)
@property
def root_document(self) -> Document:
""" The :attr:`~cerberus.Validator.document` attribute of the
first level ancestor of a child validator. """
return self._config.get('root_document', self.document)
@property
def rules_set_registry(self) -> RulesSetRegistry:
""" The registry that holds referenced rules sets.
Type: :class:`~cerberus.Registry` """
return self._config["rules_set_registry"]
@rules_set_registry.setter
def rules_set_registry(self, registry: RulesSetRegistry) -> None:
self._config['rules_set_registry'] = registry
@property
def root_schema(self) -> Optional[Schema]:
""" The :attr:`~cerberus.Validator.schema` attribute of the
first level ancestor of a child validator. """
return self._config.get('root_schema', self.schema)
@property # type: ignore
def schema(self):
""" The validation schema of a validator. When a schema is passed to
a validator method (e.g. ``validate``), it replaces this attribute.
Type: any :term:`mapping` or :obj:`None` """
return self._schema
@schema.setter
def schema(self, schema):
if schema is None:
self._schema = None
elif self.is_child:
self._schema = schema
else:
self._schema = normalize_schema(schema)
@property
def schema_registry(self) -> SchemaRegistry:
""" The registry that holds referenced schemas.
Type: :class:`~cerberus.Registry` """
return self._config["schema_registry"]
@schema_registry.setter
def schema_registry(self, registry: SchemaRegistry) -> None:
self._config['schema_registry'] = registry
# FIXME the returned method has the correct docstring, but doesn't appear
# in the API docs
@readonly_classproperty
def types(cls) -> Tuple[str, ...]:
""" The constraints that can be used for the 'type' rule.
Type: A tuple of strings. """
return tuple(cls.types_mapping)
# Document processing
def __init_processing(self, document, schema=None):
self._errors = errors.ErrorList()
self.recent_error = None
self.document_error_tree = errors.DocumentErrorTree()
self.schema_error_tree = errors.SchemaErrorTree()
if not self.is_child:
self._is_normalized = False
if schema is not None:
self.schema = schema
if self.schema is None:
if isinstance(self.allow_unknown, Mapping):
self.schema = {}
else:
raise SchemaError(errors.MISSING_SCHEMA)
if document is None:
raise DocumentError(errors.DOCUMENT_MISSING)
if not isinstance(document, Mapping):
raise DocumentError(errors.DOCUMENT_FORMAT.format(document))
self.document = document
self.error_handler.start(self)
def _drop_remaining_rules(self, *rules):
""" Drops rules from the queue of the rules that still need to be
evaluated for the currently processed field.
If no arguments are given, the whole queue is emptied.
"""
if rules:
for rule in (x for x in rules if x in self._remaining_rules):
self._remaining_rules.remove(rule)
else:
self._remaining_rules.clear()
# # Normalizing
def normalized(
self,
document: Document,
schema: Optional[Schema] = None,
always_return_document: bool = False,
) -> Optional[Document]:
"""
Returns the document normalized according to the specified rules of a schema.
:param document: The document to normalize.
:param schema: The validation schema. Defaults to :obj:`None`. If not
provided here, the schema must have been provided at
class instantiation.
:param always_return_document: Return the document, even if an error
occurred. Defaults to: ``False``.
:return: A normalized copy of the provided mapping or :obj:`None` if an
error occurred during normalization.
"""
self.__init_processing(document, schema)
self.document = self.__normalize_mapping(document, self.schema)
self.error_handler.end(self)
self._errors.sort()
if self._errors and not always_return_document:
return None
else:
return self.document
def __normalize_mapping(self, mapping, schema):
mapping = mapping.copy()
if isinstance(schema, str):
schema = self._resolve_schema(schema)
schema = {k: self._resolve_rules_set(v) for k, v in schema.items()}
self.__normalize_rename_fields(mapping, schema)
if self.purge_unknown and not self.allow_unknown:
self._normalize_purge_unknown(mapping, schema)
if self.purge_readonly:
self.__normalize_purge_readonly(mapping, schema)
# Check `readonly` fields before applying default values because
# a field's schema definition might contain both `readonly` and
# `default`.
self.__validate_readonly_fields(mapping, schema)
self.__normalize_default_fields(mapping, schema)
self._normalize_coerce(mapping, schema)
self.__normalize_containers(mapping, schema)
self._is_normalized = True
return mapping
def _normalize_coerce(self, mapping, schema):
""" {'oneof': [
{'type': 'Callable'},
{'type': 'Iterable',
'itemsrules': {'oneof': [{'type': 'Callable'},
{'type': 'string'}]}},
{'type': 'string'}
]} """
error = errors.COERCION_FAILED
for field in mapping:
if field in schema and 'coerce' in schema[field]:
mapping[field] = self.__normalize_coerce(
schema[field]['coerce'],
field,
mapping[field],
schema[field].get('nullable', False),
error,
)
elif (
isinstance(self.allow_unknown, Mapping)
and 'coerce' in self.allow_unknown
):
mapping[field] = self.__normalize_coerce(
self.allow_unknown['coerce'],
field,
mapping[field],
self.allow_unknown.get('nullable', False),
error,
)
def __normalize_coerce(self, processor, field, value, nullable, error):
if isinstance(processor, str):
processor = self.__get_rule_handler('normalize_coerce', processor)
elif isinstance(processor, Iterable):
result = value
for p in processor:
result = self.__normalize_coerce(p, field, result, nullable, error)
if (
errors.COERCION_FAILED
in self.document_error_tree.fetch_errors_from(
self.document_path + (field,)
)
):
break
return result
try:
return processor(value)
except RuntimeError:
raise
except Exception as e:
if not (nullable and value is None):
self._error(field, error, str(e))
return value
def __normalize_containers(self, mapping, schema):
for field in mapping:
rules = set(schema.get(field, ()))
if isinstance(mapping[field], Mapping):
if 'keysrules' in rules:
self.__normalize_mapping_per_keysrules(
field, mapping, schema[field]['keysrules']
)
if 'valuesrules' in rules:
self.__normalize_mapping_per_valuesrules(
field, mapping, schema[field]['valuesrules']
)
if any(
x in rules for x in ('allow_unknown', 'purge_unknown', 'schema')
) or isinstance(self.allow_unknown, Mapping):
self.__normalize_mapping_per_schema(field, mapping, schema)
elif isinstance(mapping[field], str):
continue
elif isinstance(mapping[field], Sequence):
if 'itemsrules' in rules:
self.__normalize_sequence_per_itemsrules(field, mapping, schema)
elif 'items' in rules:
self.__normalize_sequence_per_items(field, mapping, schema)
def __normalize_mapping_per_keysrules(self, field, mapping, property_rules):
schema = {k: property_rules for k in mapping[field]}
document = {k: k for k in mapping[field]}
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'keysrules'), schema=schema
)
result = validator.normalized(document, always_return_document=True)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4])
self._error(validator._errors)
for _in, out in ((k, v) for k, v in result.items() if k != v):
if out in mapping[field]:
warn(
"Normalizing keys of {path}: {key} already exists, "
"its value is replaced.".format(
path='.'.join(str(x) for x in self.document_path + (field,)),
key=_in,
)
)
mapping[field][out] = mapping[field][_in]
else:
mapping[field][out] = mapping[field][_in]
del mapping[field][_in]
def __normalize_mapping_per_valuesrules(self, field, mapping, value_rules):
schema = {k: value_rules for k in mapping[field]}
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'valuesrules'), schema=schema
)
mapping[field] = validator.normalized(
mapping[field], always_return_document=True
)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
def __normalize_mapping_per_schema(self, field, mapping, schema):
rules = schema.get(field, {})
if not rules and isinstance(self.allow_unknown, Mapping):
rules = self.allow_unknown
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'schema'),
schema=rules.get('schema', {}),
allow_unknown=rules.get('allow_unknown', self.allow_unknown), # noqa: E501
purge_unknown=rules.get('purge_unknown', self.purge_unknown),
require_all=rules.get('require_all', self.require_all),
) # noqa: E501
value_type = type(mapping[field])
result_value = validator.normalized(mapping[field], always_return_document=True)
mapping[field] = value_type(result_value)
if validator._errors:
self._error(validator._errors)
def __normalize_sequence_per_items(self, field, mapping, schema):
rules, values = schema[field]['items'], mapping[field]
if len(rules) != len(values):
return
schema = {k: v for k, v in enumerate(rules)}
document = {k: v for k, v in enumerate(values)}
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'items'), schema=schema
)
value_type = type(mapping[field])
result = validator.normalized(document, always_return_document=True)
mapping[field] = value_type(result.values())
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
def __normalize_sequence_per_itemsrules(self, field, mapping, schema):
constraint = schema[field]['itemsrules']
schema = {k: constraint for k in range(len(mapping[field]))}
document = {k: v for k, v in enumerate(mapping[field])}
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'itemsrules'), schema=schema
)
value_type = type(mapping[field])
result = validator.normalized(document, always_return_document=True)
mapping[field] = value_type(result.values())
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
@staticmethod
def __normalize_purge_readonly(mapping, schema):
for field in [x for x in mapping if schema.get(x, {}).get('readonly', False)]:
mapping.pop(field)
return mapping
@staticmethod
def _normalize_purge_unknown(mapping, schema):
""" {'type': 'boolean'} """
for field in [x for x in mapping if x not in schema]:
mapping.pop(field)
return mapping
def __normalize_rename_fields(self, mapping, schema):
for field in tuple(mapping):
if field in schema:
self._normalize_rename(mapping, schema, field)
self._normalize_rename_handler(mapping, schema, field)
elif (
isinstance(self.allow_unknown, Mapping)
and 'rename_handler' in self.allow_unknown
):
self._normalize_rename_handler(
mapping, {field: self.allow_unknown}, field
)
return mapping
def _normalize_rename(self, mapping, schema, field):
""" {'type': 'Hashable'} """
if 'rename' in schema[field]:
mapping[schema[field]['rename']] = mapping[field]
del mapping[field]
def _normalize_rename_handler(self, mapping, schema, field):
""" {'oneof': [
{'type': 'Callable'},
{'type': 'Iterable',
'itemsrules': {'oneof': [{'type': 'Callable'},
{'type': 'string'}]}},
{'type': 'string'}
]} """
if 'rename_handler' not in schema[field]:
return
new_name = self.__normalize_coerce(
schema[field]['rename_handler'], field, field, False, errors.RENAMING_FAILED
)
if new_name != field:
mapping[new_name] = mapping[field]
del mapping[field]
def __validate_readonly_fields(self, mapping, schema):
for field in (
x
for x in schema
if x in mapping and self._resolve_rules_set(schema[x]).get('readonly')
):
self._validate_readonly(schema[field]['readonly'], field, mapping[field])
def __normalize_default_fields(self, mapping, schema):
empty_fields = [
x
for x in schema
if x not in mapping
or (
mapping[x] is None # noqa: W503
and not schema[x].get('nullable', False)
) # noqa: W503
]
for field in (x for x in empty_fields if 'default' in schema[x]):
self._normalize_default(mapping, schema, field)
known_fields_states = set()
fields_with_default_setter = [
x for x in empty_fields if 'default_setter' in schema[x]
]
while fields_with_default_setter:
field = fields_with_default_setter.pop(0)
try:
self._normalize_default_setter(mapping, schema, field)
except KeyError:
fields_with_default_setter.append(field)
except RuntimeError:
raise
except Exception as e:
self._error(field, errors.SETTING_DEFAULT_FAILED, str(e))
fields_processing_state = hash(tuple(fields_with_default_setter))
if fields_processing_state in known_fields_states:
for field in fields_with_default_setter:
self._error(
field,
errors.SETTING_DEFAULT_FAILED,
'Circular dependencies of default setters.',
)
break
else:
known_fields_states.add(fields_processing_state)
def _normalize_default(self, mapping, schema, field):
""" {'nullable': True} """
mapping[field] = schema[field]['default']
def _normalize_default_setter(self, mapping, schema, field):
""" {'oneof': [
{'type': 'Callable'},
{'type': 'string'}
]} """
if 'default_setter' in schema[field]:
setter = schema[field]['default_setter']
if isinstance(setter, str):
setter = self.__get_rule_handler('normalize_default_setter', setter)
mapping[field] = setter(mapping)
# # Validating
def validate(
self,
document: Document,
schema: Optional[Schema] = None,
update: bool = False,
normalize: bool = True,
) -> bool:
"""
Normalizes and validates a mapping against a validation-schema of defined rules.
:param document: The document to normalize.
:param schema: The validation schema. Defaults to :obj:`None`. If not provided
here, the schema must have been provided at class instantiation.
:param update: If ``True``, required fields won't be checked.
:param normalize: If ``True``, normalize the document before validation.
:return: ``True`` if validation succeeds, otherwise ``False``. Check
the :func:`errors` property for a list of processing errors.
"""
self.update = update
self._unrequired_by_excludes = set() # type: Set[FieldName]
self.__init_processing(document, schema)
del document, schema
if normalize:
self.document = self.__normalize_mapping(self.document, self.schema)
for field in self.document: # type: ignore
definitions = self.schema.get(field) # type: ignore
if definitions is not None:
self.__validate_definitions(definitions, field)
else:
self.__validate_unknown_fields(field)
if not self.update:
self.__validate_required_fields(self.document)
self.error_handler.end(self)
self._errors.sort()
return not bool(self._errors)
__call__ = validate
def validated(
self,
document: Document,
schema: Optional[Schema] = None,
update: bool = False,
normalize: bool = True,
always_return_document: bool = False,
) -> Optional[Document]:
"""
Wrapper around :meth:`~cerberus.Validator.validate` that returns the normalized
and validated document or :obj:`None` if validation failed.
"""
self.validate(
document=document, schema=schema, update=update, normalize=normalize
)
if self._errors and not always_return_document:
return None
else:
return self.document
def __validate_unknown_fields(self, field):
if self.allow_unknown:
value = self.document[field]
if isinstance(self.allow_unknown, (Mapping, str)):
# validate that unknown fields matches the schema
# for unknown_fields
schema_crumb = 'allow_unknown' if self.is_child else '__allow_unknown__'
validator = self._get_child_validator(
schema_crumb=schema_crumb, schema={field: self.allow_unknown}
)
if not validator({field: value}, normalize=False):
self._error(validator._errors)
else:
self._error(field, errors.UNKNOWN_FIELD)
def __validate_definitions(self, definitions, field):
""" Validate a field's value against its defined rules. """
definitions = self._resolve_rules_set(definitions)
value = self.document[field]
rules_queue = [
x
for x in self.priority_validations
if x in definitions or x in self.mandatory_validations
]
rules_queue.extend(
x for x in self.mandatory_validations if x not in rules_queue
)
rules_queue.extend(
x
for x in definitions
if x not in rules_queue
and x not in self.normalization_rules
and x not in ('allow_unknown', 'require_all', 'meta', 'required')
)
self._remaining_rules = rules_queue
while self._remaining_rules:
rule = self._remaining_rules.pop(0)
rule_handler = self.__get_rule_handler('validate', rule)
rule_handler(definitions.get(rule, None), field, value)
# Remember to keep the validation methods below this line
# sorted alphabetically
_validate_allow_unknown = dummy_for_rule_validation(
""" {'oneof': [{'type': 'boolean'},
{'type': ['dict', 'string'],
'check_with': 'rulesset'}]} """
)
def _validate_allowed(self, allowed_values, field, value):
""" {'type': 'container_but_not_string'} """
if isinstance(value, Iterable) and not isinstance(value, str):
unallowed = tuple(x for x in value if x not in allowed_values)
if unallowed:
self._error(field, errors.UNALLOWED_VALUES, unallowed)
else:
if value not in allowed_values:
self._error(field, errors.UNALLOWED_VALUE, value)
def _validate_check_with(self, checks, field, value):
""" {'oneof': [
{'type': 'Callable'},
{'type': 'Iterable',
'itemsrules': {'oneof': [{'type': 'Callable'},
{'type': 'string'}]}},
{'type': 'string'}
]}
"""
if isinstance(checks, str):
value_checker = self.__get_rule_handler('check_with', checks)
value_checker(field, value)
elif isinstance(checks, Iterable):
for v in checks:
self._validate_check_with(v, field, value)
else:
checks(field, value, self._error)
def _validate_contains(self, expected_values, field, value):
""" {'empty': False } """
if not isinstance(value, Container):
return
if not isinstance(expected_values, Iterable) or isinstance(
expected_values, str
):
expected_values = set((expected_values,))
else:
expected_values = set(expected_values)
missing_values = expected_values - set(value)
if missing_values:
self._error(field, errors.MISSING_MEMBERS, missing_values)
def _validate_dependencies(self, dependencies, field, value):
""" {'type': ('Hashable', 'Iterable', 'Mapping'),
'check_with': 'dependencies'} """
if isinstance(dependencies, str):
dependencies = (dependencies,)
if isinstance(dependencies, Sequence):
self.__validate_dependencies_sequence(dependencies, field)
elif isinstance(dependencies, Mapping):
self.__validate_dependencies_mapping(dependencies, field)
if (
self.document_error_tree.fetch_node_from(
self.schema_path + (field, 'dependencies')
)
is not None
):
return True
def __validate_dependencies_mapping(self, dependencies, field):
validated_dependencies_counter = 0
error_info = {}
for dependency_name, dependency_values in dependencies.items():
if not isinstance(dependency_values, Sequence) or isinstance(
dependency_values, str
):
dependency_values = [dependency_values]
wanted_field, wanted_field_value = self._lookup_field(dependency_name)
if wanted_field_value in dependency_values:
validated_dependencies_counter += 1
else:
error_info.update({dependency_name: wanted_field_value})
if validated_dependencies_counter != len(dependencies):
self._error(field, errors.DEPENDENCIES_FIELD_VALUE, error_info)
def __validate_dependencies_sequence(self, dependencies, field):
for dependency in dependencies:
if self._lookup_field(dependency)[0] is None:
self._error(field, errors.DEPENDENCIES_FIELD, dependency)
def _validate_empty(self, empty, field, value):
""" {'type': 'boolean'} """
if isinstance(value, Sized) and len(value) == 0:
self._drop_remaining_rules(
'allowed',
'forbidden',
'items',
'minlength',
'maxlength',
'regex',
'check_with',
)
if not empty:
self._error(field, errors.EMPTY)
def _validate_excludes(self, excluded_fields, field, value):
""" {'type': ('Hashable', 'Iterable'),
'itemsrules': {'type': 'Hashable'}} """
if isinstance(excluded_fields, str) or not isinstance(
excluded_fields, Container
):
excluded_fields = (excluded_fields,)
# Mark the currently evaluated field as not required for now if it actually is.
# One of the so marked will be needed to pass when required fields are checked.
if self.schema[field].get('required', self.require_all):
self._unrequired_by_excludes.add(field)
for excluded_field in excluded_fields:
if excluded_field in self.schema and self.schema[field].get(
'required', self.require_all
):
self._unrequired_by_excludes.add(excluded_field)
if any(excluded_field in self.document for excluded_field in excluded_fields):
exclusion_str = ', '.join(
"'{0}'".format(field) for field in excluded_fields
)
self._error(field, errors.EXCLUDES_FIELD, exclusion_str)
def _validate_forbidden(self, forbidden_values, field, value):
""" {'type': 'Container'} """
if isinstance(value, str):
if value in forbidden_values:
self._error(field, errors.FORBIDDEN_VALUE, value)
elif isinstance(value, Iterable):
forbidden = set(value) & set(forbidden_values)
if forbidden:
self._error(field, errors.FORBIDDEN_VALUES, list(forbidden))
else:
if value in forbidden_values:
self._error(field, errors.FORBIDDEN_VALUE, value)
def _validate_items(self, items, field, values):
""" {'type': 'Sequence', 'check_with': 'items'} """
if len(items) != len(values):
self._error(field, errors.ITEMS_LENGTH, len(items), len(values))
else:
schema = {i: definition for i, definition in enumerate(items)}
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'items'), # noqa: E501
schema=schema,
)
if not validator(
{i: value for i, value in enumerate(values)},
update=self.update,
normalize=False,
):
self._error(field, errors.ITEMS, validator._errors)
def _validate_itemsrules(self, rulesset, field, value):
""" {'type': ('dict', 'string'),
'check_with': 'rulesset'} """
if not isinstance(value, Sequence):
return
schema = {i: rulesset for i in range(len(value))}
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'itemsrules'),
schema=schema,
allow_unknown=self.allow_unknown,
)
validator(
{i: v for i, v in enumerate(value)}, update=self.update, normalize=False
)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(field, errors.ITEMSRULES, validator._errors)
def __validate_logical(self, operator, definitions, field, value):
""" Validates value against all definitions and logs errors according
to the operator. """
valid_counter = 0
_errors = errors.ErrorList()
for i, definition in enumerate(definitions):
schema = {field: definition.copy()}
for rule in ('allow_unknown', 'type'):
if rule not in definition and rule in self.schema[field]:
schema[field][rule] = self.schema[field][rule]
if 'allow_unknown' not in definition:
schema[field]['allow_unknown'] = self.allow_unknown
validator = self._get_child_validator(
schema_crumb=(field, operator, i), schema=schema, allow_unknown=True
)
if validator(self.document, update=self.update, normalize=False):
valid_counter += 1
else:
self._drop_nodes_from_errorpaths(validator._errors, [], [3])
_errors.extend(validator._errors)
return valid_counter, _errors
def _validate_anyof(self, definitions, field, value):
""" {'type': 'Sequence', 'logical': 'anyof'} """
valids, _errors = self.__validate_logical('anyof', definitions, field, value)
if valids < 1:
self._error(field, errors.ANYOF, _errors, valids, len(definitions))
def _validate_allof(self, definitions, field, value):
""" {'type': 'Sequence', 'logical': 'allof'} """
valids, _errors = self.__validate_logical('allof', definitions, field, value)
if valids < len(definitions):
self._error(field, errors.ALLOF, _errors, valids, len(definitions))
def _validate_noneof(self, definitions, field, value):
""" {'type': 'Sequence', 'logical': 'noneof'} """
valids, _errors = self.__validate_logical('noneof', definitions, field, value)
if valids > 0:
self._error(field, errors.NONEOF, _errors, valids, len(definitions))
def _validate_oneof(self, definitions, field, value):
""" {'type': 'Sequence', 'logical': 'oneof'} """
valids, _errors = self.__validate_logical('oneof', definitions, field, value)
if valids != 1:
self._error(field, errors.ONEOF, _errors, valids, len(definitions))
def _validate_max(self, max_value, field, value):
""" {'nullable': False } """
try:
if value > max_value:
self._error(field, errors.MAX_VALUE)
except TypeError:
pass
def _validate_min(self, min_value, field, value):
""" {'nullable': False } """
try:
if value < min_value:
self._error(field, errors.MIN_VALUE)
except TypeError:
pass
def _validate_maxlength(self, max_length, field, value):
""" {'type': 'integer'} """
if isinstance(value, Iterable) and len(value) > max_length:
self._error(field, errors.MAX_LENGTH, len(value))
_validate_meta = dummy_for_rule_validation('')
def _validate_minlength(self, min_length, field, value):
""" {'type': 'integer'} """
if isinstance(value, Iterable) and len(value) < min_length:
self._error(field, errors.MIN_LENGTH, len(value))
def _validate_nullable(self, nullable, field, value):
""" {'type': 'boolean'} """
if value is None:
if not (nullable or self.ignore_none_values):
self._error(field, errors.NULLABLE)
self._drop_remaining_rules(
'allowed',
'empty',
'forbidden',
'items',
'keysrules',
'min',
'max',
'minlength',
'maxlength',
'regex',
'schema',
'type',
'valuesrules',
)
def _validate_keysrules(self, schema, field, value):
""" {'type': ('Mapping', 'string'), 'check_with': 'rulesset',
'forbidden': ('rename', 'rename_handler')} """
if isinstance(value, Mapping):
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'keysrules'),
schema={k: schema for k in value.keys()},
)
if not validator({k: k for k in value.keys()}, normalize=False):
self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4])
self._error(field, errors.KEYSRULES, validator._errors)
def _validate_readonly(self, readonly, field, value):
""" {'type': 'boolean'} """
if readonly:
if not self._is_normalized:
self._error(field, errors.READONLY_FIELD)
# If the document was normalized (and therefore already been
# checked for readonly fields), we still have to return True
# if an error was filed.
has_error = (
errors.READONLY_FIELD
in self.document_error_tree.fetch_errors_from(
self.document_path + (field,)
)
)
if self._is_normalized and has_error:
self._drop_remaining_rules()
def _validate_regex(self, pattern, field, value):
""" {'type': 'string'} """
if not isinstance(value, str):
return
if not pattern.endswith('$'):
pattern += '$'
re_obj = re.compile(pattern)
if not re_obj.match(value):
self._error(field, errors.REGEX_MISMATCH)
_validate_required = dummy_for_rule_validation(""" {'type': 'boolean'} """)
_validate_require_all = dummy_for_rule_validation(""" {'type': 'boolean'} """)
def __validate_required_fields(self, document):
""" Validates that required fields are not missing.
:param document: The document being validated.
"""
required = set(
field
for field, definition in self.schema.items()
if self._resolve_rules_set(definition).get('required', self.require_all)
)
required -= self._unrequired_by_excludes
missing = required - set(
field
for field in document
if document.get(field) is not None or not self.ignore_none_values
)
for field in missing:
self._error(field, errors.REQUIRED_FIELD)
# At least one field from self._unrequired_by_excludes should be present in
# document.
if self._unrequired_by_excludes:
fields = set(field for field in document if document.get(field) is not None)
if self._unrequired_by_excludes.isdisjoint(fields):
for field in self._unrequired_by_excludes - fields:
self._error(field, errors.REQUIRED_FIELD)
def _validate_schema(self, schema, field, value):
""" {'type': ('Mapping', 'string'),
'check_with': 'schema'} """
if not isinstance(value, Mapping):
return
schema = self._resolve_schema(schema)
allow_unknown = self.schema[field].get('allow_unknown', self.allow_unknown)
require_all = self.schema[field].get('require_all', self.require_all)
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'schema'),
schema=schema,
allow_unknown=allow_unknown,
require_all=require_all,
)
if not validator(value, update=self.update, normalize=False):
self._error(field, errors.SCHEMA, validator._errors)
def _validate_type(self, data_type, field, value):
""" {'type': 'tuple',
'itemsrules': {
'oneof': (
{'type': 'string', 'check_with': 'type_names'},
{'type': ('type', 'generic_type_alias')}
)}} """
if not data_type:
return
for _type in data_type:
if isinstance(_type, str):
type_definition = self.types_mapping[_type]
if isinstance(value, type_definition.included_types) and not isinstance(
value, type_definition.excluded_types
):
return
else:
if isinstance(value, _type):
return
self._error(field, errors.TYPE)
self._drop_remaining_rules()
def _validate_valuesrules(self, schema, field, value):
""" {'type': ['dict', 'string'], 'check_with': 'rulesset',
'forbidden': ['rename', 'rename_handler']} """
if isinstance(value, Mapping):
schema_crumb = (field, 'valuesrules')
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=schema_crumb,
schema={k: schema for k in value},
)
validator(value, update=self.update, normalize=False)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(field, errors.VALUESRULES, validator._errors)
| 38.555133
| 88
| 0.590786
|
293f6f9ad7220e3b81db7ff1321d7c7a4a0a3b22
| 3,494
|
py
|
Python
|
examples/reaktor_lazerbass.py
|
rawbengal/python-osc
|
18c836fc4cab49f1b662e0b47f25831e58b84c3a
|
[
"Unlicense"
] | 2
|
2015-12-14T19:33:37.000Z
|
2018-08-24T08:03:42.000Z
|
examples/reaktor_lazerbass.py
|
rawbengal/python-osc
|
18c836fc4cab49f1b662e0b47f25831e58b84c3a
|
[
"Unlicense"
] | null | null | null |
examples/reaktor_lazerbass.py
|
rawbengal/python-osc
|
18c836fc4cab49f1b662e0b47f25831e58b84c3a
|
[
"Unlicense"
] | 4
|
2015-11-25T00:23:49.000Z
|
2017-11-06T19:17:53.000Z
|
"""Example to drive/show reaktor's lazerbass instrument in pygame."""
import argparse
import pygame
import multiprocessing
import queue
import logging
from pygame.locals import *
from pythonosc import dispatcher
from pythonosc import osc_server
logging.basicConfig(
level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',
)
_BLACK = pygame.Color(0, 0, 0)
_WHITE = pygame.Color(255, 255, 255)
class ReaktorDisplay(multiprocessing.Process):
def __init__(self, bq):
multiprocessing.Process.__init__(self)
self._bq = bq
def run(self):
pygame.init()
font = pygame.font.SysFont("monospace", 15)
screen = pygame.display.set_mode((640, 480)) # FULLSCREEN
running = True
dirty = True
# OSC controlled parameters.
self._parameters = {
'beating': 0.0,
'blocks': 0.0,
'basic_Model': 0.0,
'Do!': 0.0,
}
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
if dirty:
screen.fill(_BLACK)
# Draw a gauge using rectangles.
# Left, top, width, height.
pygame.draw.rect(
screen, _WHITE, [10, 10, 50, 100], 2)
pygame.draw.rect(
screen, _WHITE, [10, 110, 50, -int(self._parameters['beating'] * 100)])
# Draw a button-like square for on/off display.
pygame.draw.rect(
screen, _WHITE, [10, 200, 50, 50], 2)
pygame.draw.rect(
screen, _WHITE, [10, 200, 50, 50 if self._parameters['blocks'] >= 0.5 else 0])
# Show actual values.
for index, [key, val] in enumerate(self._parameters.items()):
label = font.render("{0}: {1}".format(key, val), 1, _WHITE)
screen.blit(label, (200, index * 15))
pygame.display.flip()
dirty = False
try:
what, value = self._bq.get(True)
self._parameters[what] = value
dirty = True
logging.debug('Received new value {0} = {1}'.format(what, value))
except queue.Empty:
running = False
pygame.quit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--server_ip", default="0.0.0.0",
help="The ip to listen to for reaktor OSC messages")
parser.add_argument(
"--server_port", type=int, default=8000,
help="The port to listen on for reaktor OSC messages")
#parser.add_argument("--client_ip",
# default="127.0.0.1", help="The ip to listen on")
#parser.add_argument("--client_port",
# type=int, default=5005, help="The port to listen on")
args = parser.parse_args()
#client = udp_client.UDPClient(args.client_ip, args.client_port)
bq = multiprocessing.Queue()
reaktor = ReaktorDisplay(bq)
def put_in_queue(args, value):
"""Put a named argument in the queue to be able to use a single queue."""
bq.put([args[0], value])
dispatcher = dispatcher.Dispatcher()
dispatcher.map("/debug", logging.debug)
dispatcher.map("/beating", put_in_queue, "beating")
dispatcher.map("/blocks", put_in_queue, "blocks")
dispatcher.map("/basic_Model", put_in_queue, "basic_Model")
dispatcher.map("/Do!", put_in_queue, "Do!")
server = osc_server.ThreadingOSCUDPServer(
(args.server_ip, args.server_port), dispatcher)
logging.info("Serving on {}".format(server.server_address))
# Exit thread when the main thread terminates.
reaktor.daemon = True
reaktor.start()
server.serve_forever()
| 30.382609
| 90
| 0.638523
|
a507ce8f8848c201fbbb6269ffde042bd8814669
| 2,193
|
py
|
Python
|
circuitPython/examples/ev-charge-point/ev-charge-point/eve_helper.py
|
BRTSG-FOSS/pico-bteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | 1
|
2022-01-29T03:16:57.000Z
|
2022-01-29T03:16:57.000Z
|
circuitPython/examples/ev-charge-point/ev-charge-point/eve_helper.py
|
BRTSG-FOSS/pico-brteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | 15
|
2021-09-22T08:36:08.000Z
|
2022-01-26T08:51:42.000Z
|
circuitPython/examples/ev-charge-point/ev-charge-point/eve_helper.py
|
BRTSG-FOSS/pico-bteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | null | null | null |
import time
from brteve.brt_eve_bt817_8 import BrtEve
EVE_CMD_FIFO_SIZE = ((4) * 1024)
EVE_CMD_FIFO_MASK = (EVE_CMD_FIFO_SIZE - 1)
class eve_helper():
RAM_G_SIZE = (1024*1024)
def __init__(self, eve: BrtEve):
self.eve = eve
self.VertextFormat=4
self.VertextPrecision=16
def set_precision(self, VertextFormat):
lst={0:1, 1:2, 2:4, 3:8, 4:16}
self.VertextFormat=VertextFormat
self.VertextPrecision=lst[VertextFormat]
def get_precision(self, v):
return (int)(v*self.VertextPrecision)
def cmd_(self):
pass
def cmd_rp(self):
return self.eve.rd32 (self.eve.REG_CMD_READ) & EVE_CMD_FIFO_MASK
def cmd_wp(self):
return self.eve.rd32 (self.eve.REG_CMD_WRITE) & EVE_CMD_FIFO_MASK
def wait_flush(self):
rp = self.cmd_rp()
wp = self.cmd_wp()
while rp != wp:
rp = self.cmd_rp()
wp = self.cmd_wp()
time.sleep(0.01)
def flash_switch_fullmode(self):
self.eve.cmd_flashdetach()
self.wait_flush()
self.eve.cmd_flashattach()
self.wait_flush()
self.eve.cmd_flashfast()
self.wait_flush()
def Display_Start(self):
self.eve.cmd_dlstart()
self.eve.ClearColorRGB(255, 255, 255)
self.eve.Clear(1, 1, 1)
self.eve.ColorRGB(255, 255, 255)
self.eve.VertexFormat(self.VertextFormat)
def Display_End(self):
self.eve.Display()
self.eve.swap()
self.wait_flush()
def draw_image(self, img, addr, x, y):
self.eve.BitmapHandle(1)
self.eve.cmd_setbitmap(addr, img["format"], img["width"], img["height"])
self.eve.Begin(self.eve.BITMAPS)
self.eve.Vertex2f((x), (y))
self.eve.End()
def draw_image_with_tag(self, img, addr, x, y, tag):
self.eve.BitmapHandle(1)
self.eve.cmd_setbitmap(addr, img["format"], img["width"], img["height"])
self.eve.Tag(tag)
self.eve.Begin(self.eve.BITMAPS)
self.eve.Vertex2f((x),(y))
self.eve.End()
self.eve.Tag(0)
def current_milli_time(self):
return round(time.time() * 1000)
| 27.074074
| 80
| 0.603739
|
3ef31950a6d0c5adfa29208684aa3a3a56e3c3bd
| 23,710
|
py
|
Python
|
cryspy/procedure_rhochi/rhochi_pd.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
cryspy/procedure_rhochi/rhochi_pd.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
cryspy/procedure_rhochi/rhochi_pd.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
import numpy
import scipy
import scipy.interpolate
from cryspy.A_functions_base.matrix_operations import calc_m1_m2_m1t, calc_m_v
from cryspy.A_functions_base.unit_cell import \
calc_sthovl_by_unit_cell_parameters, calc_matrix_t
from cryspy.A_functions_base.structure_factor import \
calc_f_nucl_by_dictionary, \
calc_f_charge_by_dictionary, \
calc_sft_ccs_by_dictionary, \
calc_index_hkl_multiplicity_in_range, \
calc_f_m_perp_ordered_by_dictionary
from cryspy.A_functions_base.integrated_intensity_powder_diffraction import \
calc_powder_iint_1d_para, calc_powder_iint_1d_ordered, calc_powder_iint_1d_mix
from cryspy.A_functions_base.preferred_orientation import calc_preferred_orientation_pd
from cryspy.A_functions_base.powder_diffraction_const_wavelength import \
calc_profile_pseudo_voight, calc_lorentz_factor
from .rhochi_diffrn import get_flags
na = numpy.newaxis
def calc_background(ttheta, background_ttheta, background_intensity, flag_background_intensity: bool = False):
x_p = numpy.copy(background_ttheta)
y_p = numpy.copy(background_intensity)
x_min = ttheta.min()
x_max = ttheta.max()
if x_p.min() > x_min:
y_0 = (y_p[1]-y_p[0])*(x_min - x_p[0])/(x_p[1]-x_p[0]) + y_p[0]
x_p = numpy.insert(x_p, 0, x_min)
y_p = numpy.insert(y_p, 0, y_0)
if x_p.max() <= x_max:
x_max = x_max + 1.
y_last = (y_p[-1]-y_p[-2])*(x_max - x_p[-2])/(x_p[-1]-x_p[-2]) + y_p[-2]
x_p = numpy.append(x_p, x_max)
y_p = numpy.append(y_p, y_last)
x_left = x_p[:-1]
x_right = x_p[1:]
flags = numpy.logical_and(ttheta[:, na] >= x_left[na, :], ttheta[:, na] < x_right[na, :])
p0 = numpy.argwhere(flags)[:,1]
p1 = p0 + 1
intensity = (y_p[p1]-y_p[p0]) * (ttheta-x_p[p0])/(x_p[p1]-x_p[p0]) + y_p[p0]
# f = scipy.interpolate.interp1d(
# background_ttheta, background_intensity, kind="linear", fill_value="extrapolate")
# intensity = f(ttheta)
dder = {}
if flag_background_intensity:
ttheta_shift = ttheta[:, na] - background_ttheta[na, :]
diff_b_tth = background_ttheta[1:]-background_ttheta[:-1]
# y_n + (y_np1-y_np)*(x-x_n)/(x_np1-x_n)
# 1 -(x-x_n)/(x_np1-x_n) + (x-x_nm1)/(x_n-x_nm1)
dder_bkgr = numpy.zeros((ttheta.size, background_ttheta.size), dtype=float)
dder["background_intensity"] = dder_bkgr
return intensity, dder
def calc_chi_sq_for_pd_by_dictionary(
dict_pd, dict_crystals, dict_in_out: dict = None, flag_use_precalculated_data: bool=False,
flag_calc_analytical_derivatives: bool = False):
"""Calculate chi_sq for diffrn experiment.
"""
if dict_in_out is None:
flag_dict = False
flag_use_precalculated_data = False
dict_in_out_keys = []
else:
flag_dict = True
dict_in_out_keys = dict_in_out.keys()
dict_pd_keys = dict_pd.keys()
phase_name = [hh["name"].lower() for hh in dict_crystals]
excluded_points = dict_pd["excluded_points"]
ttheta = dict_pd["ttheta"]
offset_ttheta = dict_pd["offset_ttheta"]
ttheta_zs = ttheta - offset_ttheta
flags_offset_ttheta = dict_pd["flags_offset_ttheta"]
if flag_dict:
dict_in_out["ttheta"] = ttheta_zs
dict_in_out["excluded_points"] = excluded_points
wavelength = dict_pd["wavelength"]
flags_wavelength = dict_pd["flags_wavelength"]
radiation = dict_pd["radiation"]
if "beam_polarization" in dict_pd_keys:
beam_polarization = dict_pd["beam_polarization"]
flipper_efficiency = dict_pd["flipper_efficiency"]
magnetic_field = dict_pd["magnetic_field"]
flags_beam_polarization = dict_pd["flags_beam_polarization"]
flags_flipper_efficiency = dict_pd["flags_flipper_efficiency"]
else:
beam_polarization, flipper_efficiency, magnetic_field = 0., 0., 0.
flags_beam_polarization, flags_flipper_efficiency = False, False
sthovl_min = numpy.sin(0.5*ttheta_zs.min() - numpy.pi/90.)/wavelength
if sthovl_min <= 0:
sthovl_min = 0.0001
sthovl_max = numpy.sin(0.5*ttheta_zs.max() + numpy.pi/90.)/wavelength
if sthovl_max <= sthovl_min:
sthovl_max = sthovl_min+0.01
if sthovl_max >= 1.:
sthovl_max = 0.99999/wavelength
background_ttheta = dict_pd["background_ttheta"]
background_intensity = dict_pd["background_intensity"]
flags_background_intensity = dict_pd["flags_background_intensity"]
flag_background_intensity = numpy.any(flags_background_intensity)
if (flag_use_precalculated_data and ("signal_background" in dict_in_out) and not(flag_background_intensity)):
signal_background = dict_in_out["signal_background"]
else:
signal_background, dder_s_bkgr = calc_background(ttheta, background_ttheta, background_intensity,
flag_background_intensity= (flag_background_intensity and flag_calc_analytical_derivatives))
dict_in_out["signal_background"] = signal_background
pd_phase_name = dict_pd["phase_name"]
pd_phase_scale = dict_pd["phase_scale"]
pd_phase_resolution_parameters = dict_pd["phase_resolution_parameters"] # U_phase, V_phase, W_phase, X_phase, Y_phase
pd_phase_ig = dict_pd["phase_ig"] # IG_phase
flags_pd_phase_scale = dict_pd["flags_phase_scale"]
flags_pd_phase_resolution_parameters = dict_pd["flags_phase_resolution_parameters"] # U_phase, V_phase, W_phase, X_phase, Y_phase
flags_pd_phase_ig = dict_pd["flags_phase_ig"] # IG_phase
resolution_parameters = dict_pd["resolution_parameters"] # U, V, W, X, Y
asymmetry_parameters = dict_pd["asymmetry_parameters"] # p1, p2, p3, p4
flags_resolution_parameters = dict_pd["flags_resolution_parameters"]
flags_asymmetry_parameters = dict_pd["flags_asymmetry_parameters"]
flag_asymmetry_parameters = numpy.any(flags_asymmetry_parameters)
if "texture_name" in dict_pd_keys:
flag_texture = True
pd_texture_name = dict_pd["texture_name"]
pd_texture_g1 = dict_pd["texture_g1"]
pd_texture_g2 = dict_pd["texture_g2"]
pd_texture_axis = dict_pd["texture_axis"]
pd_flags_texture_g1 = dict_pd["flags_texture_g1"]
pd_flags_texture_g2 = dict_pd["flags_texture_g2"]
pd_flags_texture_axis = dict_pd["flags_texture_axis"]
else:
flag_texture = False
k = dict_pd["k"]
cthm = dict_pd["cthm"]
lorentz_factor, dder_lf = calc_lorentz_factor(ttheta_zs, k=k, cthm=cthm, flag_ttheta=flags_offset_ttheta)
dict_in_out["lorentz_factor"] = lorentz_factor
total_signal_plus = numpy.zeros_like(ttheta_zs)
total_signal_minus = numpy.zeros_like(ttheta_zs)
for p_name, p_scale, p_resolution, p_ig, flags_p_scale, flags_p_resolution, flags_p_ig in zip(pd_phase_name,
pd_phase_scale, pd_phase_resolution_parameters.transpose(), pd_phase_ig,
flags_pd_phase_scale, flags_pd_phase_resolution_parameters.transpose(), flags_pd_phase_ig):
p_name = p_name.lower()
flag_phase_texture = False
if flag_texture:
ind_texture = numpy.argwhere(pd_texture_name==p_name)
if ind_texture.shape[0] != 0:
texture_g1 = pd_texture_g1[ind_texture[0]]
texture_g2 = pd_texture_g2[ind_texture[0]]
texture_axis = pd_texture_axis[:, ind_texture[0]]
flag_phase_texture = True
flags_texture_g1 = pd_flags_texture_g1[ind_texture[0]]
flags_texture_g2 = pd_flags_texture_g2[ind_texture[0]]
flags_texture_axis = pd_flags_texture_axis[:, ind_texture[0]]
ind_phase = phase_name.index(p_name)
dict_crystal = dict_crystals[ind_phase]
dict_in_out_keys = dict_in_out.keys()
if f"dict_in_out_{p_name:}" in dict_in_out_keys:
dict_in_out_phase = dict_in_out[f"dict_in_out_{p_name:}"]
else:
dict_in_out_phase = {}
dict_in_out[f"dict_in_out_{p_name:}"] = dict_in_out_phase
dict_in_out_phase_keys = dict_in_out_phase.keys()
dict_crystal_keys = dict_crystal.keys()
if "reduced_symm_elems" in dict_crystal_keys:
reduced_symm_elems = dict_crystal["reduced_symm_elems"]
translation_elems = dict_crystal["translation_elems"]
elif "full_symm_elems" in dict_crystal_keys:
full_symm_elems = dict_crystal["full_symm_elems"]
elif "full_mcif_elems" in dict_crystal_keys:
full_mcif_elems = dict_crystal["full_mcif_elems"]
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
flags_unit_cell_parameters = dict_crystal["flags_unit_cell_parameters"]
flag_unit_cell_parameters = numpy.any(flags_unit_cell_parameters)
if flag_unit_cell_parameters:
sc_uc = dict_crystal["sc_uc"]
v_uc = dict_crystal["v_uc"]
unit_cell_parameters = numpy.dot(sc_uc, unit_cell_parameters) + v_uc
if (flag_use_precalculated_data and
("index_hkl" in dict_in_out_phase_keys) and
("multiplicity_hkl" in dict_in_out_phase_keys) and not(flag_unit_cell_parameters or flags_offset_ttheta)):
index_hkl = dict_in_out_phase["index_hkl"]
multiplicity_hkl = dict_in_out_phase["multiplicity_hkl"]
else:
if flag_phase_texture:
reduced_symm_elems_p1 = numpy.array([[0], [0], [0], [1], [1], [0], [0], [0], [1], [0], [0], [0], [1]], dtype=int)
translation_elems_p1 = numpy.array([[0], [0], [0], [1]], dtype=int)
index_hkl, multiplicity_hkl = calc_index_hkl_multiplicity_in_range(
sthovl_min, sthovl_max, unit_cell_parameters, reduced_symm_elems_p1, translation_elems_p1, False)
else:
if "reduced_symm_elems" in dict_crystal_keys:
centrosymmetry = dict_crystal["centrosymmetry"]
index_hkl, multiplicity_hkl = calc_index_hkl_multiplicity_in_range(
sthovl_min, sthovl_max, unit_cell_parameters, reduced_symm_elems, translation_elems, centrosymmetry)
else:
translation_elems_p1 = numpy.array([[0], [0], [0], [1]], dtype=int)
index_hkl, multiplicity_hkl = calc_index_hkl_multiplicity_in_range(
sthovl_min, sthovl_max, unit_cell_parameters, full_mcif_elems[:13], translation_elems_p1, False)
if (("index_hkl" in dict_in_out_phase_keys) and flag_use_precalculated_data):
if index_hkl.shape != dict_in_out_phase["index_hkl"].shape:
flag_use_precalculated_data = False
else:
flag_use_precalculated_data = numpy.all(numpy.logical_and(dict_in_out_phase["index_hkl"], index_hkl))
dict_in_out_phase["index_hkl"] = index_hkl
dict_in_out_phase["multiplicity_hkl"] = multiplicity_hkl
flag_sthovl_hkl = flag_unit_cell_parameters
sthovl_hkl, dder_sthovl_hkl = calc_sthovl_by_unit_cell_parameters(index_hkl,
unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
flag_ttheta_hkl = flag_sthovl_hkl or flags_wavelength
ttheta_hkl = 2*numpy.arcsin(sthovl_hkl*wavelength)
dict_in_out_phase["ttheta_hkl"] = ttheta_hkl
if radiation[0].startswith("neutrons"):
f_nucl, dder_f_nucl = calc_f_nucl_by_dictionary(
dict_crystal, dict_in_out_phase, flag_use_precalculated_data=flag_use_precalculated_data)
flag_f_nucl = len(dder_f_nucl.keys()) > 0
flag_para = False
if "atom_para_index" in dict_crystal_keys:
sft_ccs, dder_sft_ccs = calc_sft_ccs_by_dictionary(
dict_crystal, dict_in_out_phase, flag_use_precalculated_data=flag_use_precalculated_data)
flag_sft_ccs = len(dder_sft_ccs.keys()) > 0
flag_matrix_t = flag_unit_cell_parameters
matrix_t, dder_matrix_t = calc_matrix_t(
index_hkl, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
flag_tensor_sigma = flag_sft_ccs or flag_unit_cell_parameters
tensor_sigma, dder_tensor_sigma = calc_m1_m2_m1t(matrix_t, sft_ccs, flag_m1=flag_sft_ccs, flag_m2=flag_unit_cell_parameters)
flag_para = True
flag_ordered = False
if "atom_ordered_index" in dict_crystal_keys:
f_m_perp_o_ccs, dder_f_m_perp_o_ccs = calc_f_m_perp_ordered_by_dictionary(
dict_crystal, dict_in_out_phase, flag_use_precalculated_data=flag_use_precalculated_data)
flag_f_m_perp_o = len(dder_f_m_perp_o_ccs.keys()) > 0
flag_ordered = True
flag_matrix_t = flag_unit_cell_parameters
matrix_t, dder_matrix_t = calc_matrix_t(
index_hkl, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
f_m_perp_o, dder_f_m_perp_o = calc_m_v(matrix_t, f_m_perp_o_ccs, flag_m=flag_unit_cell_parameters, flag_v=flag_f_m_perp_o)
if flag_para and not(flag_ordered):
flag_iint_plus_minus = flag_f_nucl or flag_tensor_sigma or flags_beam_polarization or flags_flipper_efficiency
if (("iint_plus" in dict_in_out_phase_keys) and ("iint_minu" in dict_in_out_phase_keys) and
flag_use_precalculated_data and not(flag_iint_plus_minus)):
iint_plus, iint_minus = dict_in_out_phase["iint_plus"], dict_in_out_phase["iint_minus"]
else:
iint_plus, iint_minus, dder_plus, dder_minus = calc_powder_iint_1d_para(
f_nucl, tensor_sigma, beam_polarization, flipper_efficiency, magnetic_field,
flag_f_nucl=flag_f_nucl, flag_tensor_sigma=flag_tensor_sigma,
flag_polarization=flags_beam_polarization, flag_flipper=flags_flipper_efficiency)
elif not(flag_para) and flag_ordered:
flag_iint_plus_minus = flag_f_nucl or flag_f_m_perp_o or flags_beam_polarization or flags_flipper_efficiency
if (("iint_plus" in dict_in_out_phase_keys) and ("iint_minus" in dict_in_out_phase_keys) and
flag_use_precalculated_data and not(flag_iint_plus_minus)):
iint_plus, iint_minus = dict_in_out_phase["iint_plus"], dict_in_out_phase["iint_minus"]
else:
iint_plus, dder_plus = calc_powder_iint_1d_ordered(
f_nucl, f_m_perp_o,
flag_f_nucl=flag_f_nucl and flag_calc_analytical_derivatives,
flag_f_m_perp=flag_f_m_perp_o and flag_calc_analytical_derivatives)
iint_minus = iint_plus
dder_minus = dder_plus
elif flag_para and flag_ordered:
flag_iint_plus_minus = flag_f_nucl or flag_tensor_sigma or flag_f_m_perp_o or flags_beam_polarization or flags_flipper_efficiency
if (("iint_plus" in dict_in_out_phase_keys) and ("iint_minu" in dict_in_out_phase_keys) and
flag_use_precalculated_data and not(flag_iint_plus_minus)):
iint_plus, iint_minus = dict_in_out_phase["iint_plus"], dict_in_out_phase["iint_minus"]
else:
iint_plus, iint_minus, dder_plus, dder_minus = calc_powder_iint_1d_mix(
f_nucl, f_m_perp_o, tensor_sigma, beam_polarization, flipper_efficiency, magnetic_field,
flag_f_nucl=flag_f_nucl and flag_calc_analytical_derivatives,
flag_f_m_perp_ordered=flag_f_m_perp_o and flag_calc_analytical_derivatives,
flag_tensor_sigma=flag_tensor_sigma and flag_calc_analytical_derivatives,
flag_polarization=flags_beam_polarization and flag_calc_analytical_derivatives,
flag_flipper=flags_flipper_efficiency and flag_calc_analytical_derivatives)
else:
iint_plus = numpy.square(numpy.abs(f_nucl))
iint_minus = numpy.square(numpy.abs(f_nucl))
dict_in_out_phase["iint_plus"] = iint_plus
dict_in_out_phase["iint_minus"] = iint_minus
elif radiation[0].startswith("X-rays"):
f_charge, dder_f_charge = calc_f_charge_by_dictionary(
dict_crystal, wavelength, dict_in_out_phase, flag_use_precalculated_data=flag_use_precalculated_data)
flag_f_charge = len(dder_f_charge.keys()) > 0
iint = numpy.square(numpy.abs(f_charge))
# FIXME: preparation for XMD
iint_plus = iint
iint_minus = iint
dict_in_out_phase["iint_plus"] = iint_plus
dict_in_out_phase["iint_minus"] = iint_minus
if flag_phase_texture:
flag_texture_g1 = numpy.any(flags_texture_g1)
flag_texture_g2 = numpy.any(flags_texture_g2)
flag_texture_axis = numpy.any(flags_texture_axis)
flag_hh = numpy.any([flag_texture_g1, flag_texture_g2, flag_texture_axis])
if (flag_use_precalculated_data and
("preferred_orientation" in dict_in_out_phase_keys) and
not(flag_hh)):
preferred_orientation = dict_in_out_phase["preferred_orientation"]
else:
preferred_orientation, dder_po = calc_preferred_orientation_pd(
index_hkl, texture_g1, texture_g2, texture_axis, unit_cell_parameters,
flag_texture_g1=flag_texture_g1 and flag_calc_analytical_derivatives,
flag_texture_g2=flag_texture_g2 and flag_calc_analytical_derivatives,
flag_texture_axis=flag_texture_axis and flag_calc_analytical_derivatives)
dict_in_out_phase["preferred_orientation"] = preferred_orientation
flag_rp = numpy.any(flags_p_resolution) or numpy.any(flags_resolution_parameters)
hh = resolution_parameters + p_resolution
u, v, w, x, y = hh[0], hh[1], hh[2], hh[3], hh[4]
p_1, p_2, p_3, p_4 = asymmetry_parameters[0], asymmetry_parameters[1], asymmetry_parameters[2], asymmetry_parameters[3]
profile_pv, dder_pv = calc_profile_pseudo_voight(ttheta_zs, ttheta_hkl, u, v, w, p_ig, x, y,
p_1, p_2, p_3, p_4,
flag_ttheta=flags_offset_ttheta,
flag_ttheta_hkl=flag_ttheta_hkl, flag_u=flag_rp,
flag_v=flag_rp, flag_w=flag_rp, flag_i_g=flags_p_ig,
flag_x=flag_rp, flag_y=flag_rp,
flag_p_1=flag_asymmetry_parameters, flag_p_2=flag_asymmetry_parameters,
flag_p_3=flag_asymmetry_parameters, flag_p_4=flag_asymmetry_parameters)
dict_in_out_phase["profile_pv"] = profile_pv
# flags_p_scale
iint_m_plus = iint_plus * multiplicity_hkl
iint_m_minus = iint_minus * multiplicity_hkl
lf = calc_lorentz_factor(ttheta_hkl, k=k, cthm=cthm, flag_ttheta=None)[0]
dict_in_out_phase["iint_plus_with_factors"] = 0.5 * p_scale * lf * iint_m_plus
dict_in_out_phase["iint_minus_with_factors"] = 0.5 * p_scale * lf * iint_m_minus
if flag_texture:
# 0.5 to have the same meaning for the scale factor as in FullProf
signal_plus = 0.5 * p_scale * lorentz_factor * (profile_pv * (iint_m_plus * preferred_orientation)[na, :]).sum(axis=1) # sum over hkl
signal_minus = 0.5 * p_scale * lorentz_factor * (profile_pv * (iint_m_minus * preferred_orientation)[na, :]).sum(axis=1)
dict_in_out_phase["iint_plus_with_factors"] *= preferred_orientation
dict_in_out_phase["iint_minus_with_factors"] *= preferred_orientation
else:
signal_plus = 0.5 * p_scale * lorentz_factor * (profile_pv * iint_m_plus[na, :]).sum(axis=1)
signal_minus = 0.5 * p_scale * lorentz_factor * (profile_pv * iint_m_minus[na, :]).sum(axis=1)
dict_in_out_phase["signal_plus"] = signal_plus
dict_in_out_phase["signal_minus"] = signal_minus
total_signal_plus += signal_plus
total_signal_minus += signal_minus
if flag_dict:
dict_in_out["signal_plus"] = total_signal_plus
dict_in_out["signal_minus"] = total_signal_minus
if ("signal_exp_plus" in dict_pd_keys) and ("signal_exp_minus" in dict_pd_keys):
signal_exp_plus = dict_pd["signal_exp_plus"]
signal_exp_minus = dict_pd["signal_exp_minus"]
if flag_dict:
dict_in_out["signal_exp_plus"] = signal_exp_plus
dict_in_out["signal_exp_minus"] = signal_exp_minus
flag_chi_sq_sum, flag_chi_sq_difference = True, True
if "flag_chi_sq_sum" in dict_pd_keys:
flag_chi_sq_sum = dict_pd["flag_chi_sq_sum"]
if "flag_chi_sq_difference" in dict_pd_keys:
flag_chi_sq_difference = dict_pd["flag_chi_sq_difference"]
if flag_chi_sq_sum:
signal_exp = signal_exp_plus[0, :] + signal_exp_minus[0, :]
signal_sigma = numpy.sqrt(numpy.square(signal_exp_plus[1, :]) + numpy.square(signal_exp_minus[1, :]))
else:
signal_exp = dict_pd["signal_exp"][0,:]
signal_sigma = dict_pd["signal_exp"][1,:]
if flag_dict:
dict_in_out["signal_exp"] = dict_pd["signal_exp"]
flag_chi_sq_sum = True
flag_chi_sq_difference = False
chi_sq = 0.
n_point = 0
if flag_chi_sq_sum:
in_points = numpy.logical_not(excluded_points)
total_signal_sum = total_signal_plus + total_signal_minus + signal_background
chi_sq_sum = ((numpy.square((signal_exp - total_signal_sum)/signal_sigma)*in_points)).sum(axis=0)
chi_sq += chi_sq_sum
n_point += numpy.sum(in_points)
if flag_chi_sq_difference:
signal_exp_diff = signal_exp_plus[0, :] - signal_exp_minus[0, :]
signal_sigma_diff = numpy.sqrt(numpy.square(signal_exp_plus[1, :]) + numpy.square(signal_exp_minus[1, :]))
total_signal_diff = total_signal_plus - total_signal_minus
chi_sq_diff = (numpy.square((signal_exp_diff - total_signal_diff)/signal_sigma_diff)).sum(axis=0)
chi_sq += chi_sq_diff
n_point += signal_exp_diff.shape[0]
if numpy.isnan(chi_sq):
chi_sq = 1e30
flags_pd = get_flags(dict_pd)
l_flags_crystal = [get_flags(dict_crystal) for dict_crystal in dict_crystals]
l_parameter_name = []
for way, flags in flags_pd.items():
pd_type_name = dict_pd["type_name"]
ind_1d = numpy.atleast_1d(numpy.argwhere(flags)) #.flatten()
parameter_name = [(pd_type_name, ) + way + (tuple(ind_1d[ind,:]), ) for ind in range(ind_1d.shape[0])]
l_parameter_name.extend(parameter_name)
for flags_crystal, dict_crystal in zip(l_flags_crystal, dict_crystals):
for way, flags in flags_crystal.items():
crystal_type_name = dict_crystal["type_name"]
ind_1d = numpy.atleast_1d(numpy.argwhere(flags)) #.flatten()
parameter_name = [(crystal_type_name, ) + way + (tuple(ind_1d[ind,:]), ) for ind in range(ind_1d.shape[0])]
l_parameter_name.extend(parameter_name)
if flag_background_intensity:
pass
der_chi_sq = numpy.zeros((len(l_parameter_name), ), dtype=float)
dder_chi_sq = numpy.zeros((len(l_parameter_name), len(l_parameter_name)), dtype=float)
return chi_sq, n_point, der_chi_sq, dder_chi_sq, l_parameter_name
| 51.655773
| 145
| 0.680051
|
8197be9c56c0ed01500155a45f1181e855d5dc2e
| 2,865
|
py
|
Python
|
ml/rl/types.py
|
baek-jinoo/Horizon
|
30c75bed0e328e18bfc324f010101fac930c3abc
|
[
"BSD-3-Clause"
] | null | null | null |
ml/rl/types.py
|
baek-jinoo/Horizon
|
30c75bed0e328e18bfc324f010101fac930c3abc
|
[
"BSD-3-Clause"
] | null | null | null |
ml/rl/types.py
|
baek-jinoo/Horizon
|
30c75bed0e328e18bfc324f010101fac930c3abc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
EXPERIMENTAL
These generic types define common interface between modules. Above all, these
facilitates model exporting through ONNX. ONNX doesn't trace dictionary so we
use NamedTuple to act in place of dictionaries. NamedTuple is also more compact
than dictionary; so, this should be good overall.
"""
from typing import Any, NamedTuple, Optional, Union
import numpy as np
import torch
from caffe2.python import core
"""
We use a mix of frameworks in our system. Therefore, we can't pinpoint the exact
type of value
"""
ValueType = Union[torch.Tensor, core.BlobReference, np.ndarray]
class IdListFeature(NamedTuple):
lengths: ValueType
values: ValueType
class FeatureVector(NamedTuple):
float_features: Optional[ValueType] = None
# id_list_features should ideally be Mapping[str, IdListFeature]; however,
# that doesn't work well with ONNX.
# User is expected to dynamically define the type of id_list_features based
# on the actual features used in the model.
id_list_features: Optional[NamedTuple] = None
DiscreteAction = ValueType
ParametricAction = FeatureVector
class ActorOutput(NamedTuple):
action: ValueType
log_prob: Optional[ValueType] = None
Action = Union[
DiscreteAction, ParametricAction
] # One-hot vector for discrete action DQN and feature vector for everyone else
State = FeatureVector
class StateInput(NamedTuple):
"""
This class makes it easier to plug modules into predictor
"""
state: State
class StateAction(NamedTuple):
state: State
action: Action
class MaxQLearningInput(NamedTuple):
state: State
action: Action
next_action: Action
next_state: Optional[State] # Available in case of discrete action
tiled_next_state: Optional[State] # Available in case of parametric action
possible_actions: Optional[Action]
possible_actions_mask: ValueType
possible_next_actions: Optional[Action]
possible_next_actions_mask: ValueType
reward: ValueType
not_terminal: ValueType
step: Optional[ValueType]
time_diff: ValueType
class SARSAInput(NamedTuple):
state: State
action: Action
next_state: State
next_action: Action
reward: ValueType
not_terminal: ValueType
step: Optional[ValueType]
time_diff: ValueType
class ExtraData(NamedTuple):
action_probability: Optional[ValueType] = None
class TrainingBatch(NamedTuple):
training_input: Union[MaxQLearningInput, SARSAInput]
extras: Any
class SingleQValue(NamedTuple):
q_value: ValueType
class AllActionQValues(NamedTuple):
q_values: ValueType
class CappedContinuousAction(NamedTuple):
"""
Continuous action in range [-1, 1], e.g., the output of DDPG actor
"""
action: ValueType
| 23.483607
| 80
| 0.74555
|
cef3b259a23fd83f5525280d0fdad775727ff12c
| 3,195
|
py
|
Python
|
nbkode/testsuite/test_against_scipy.py
|
Yash-10/numbakit-ode
|
aa5a0f417a2218bd471db754b35cc61996b2461e
|
[
"BSD-3-Clause"
] | 37
|
2020-11-07T08:53:49.000Z
|
2021-12-24T00:01:16.000Z
|
nbkode/testsuite/test_against_scipy.py
|
Yash-10/numbakit-ode
|
aa5a0f417a2218bd471db754b35cc61996b2461e
|
[
"BSD-3-Clause"
] | 24
|
2020-11-04T02:05:28.000Z
|
2022-03-28T21:14:08.000Z
|
nbkode/testsuite/test_against_scipy.py
|
Yash-10/numbakit-ode
|
aa5a0f417a2218bd471db754b35cc61996b2461e
|
[
"BSD-3-Clause"
] | 4
|
2020-12-24T09:19:50.000Z
|
2022-03-04T16:45:49.000Z
|
"""
nbkode.testsuite.test_against_scipy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Comparisons using SciPy as a gold standard.
:copyright: 2020 by nbkode Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy import integrate
import nbkode
from nbkode.nbcompat import numba
equivalents = [
(nbkode.RungeKutta23, integrate.RK23),
(nbkode.RungeKutta45, integrate.RK45),
(nbkode.DOP853, integrate.DOP853),
]
@numba.njit
def exponential1(t, x):
return -0.01 * x
@numba.njit
def exponential2(t, x):
return np.asarray([-0.01, -0.05]) * x
y0_1 = np.atleast_1d(1.0)
y0_2 = np.atleast_1d([1.0, 2.0])
@pytest.mark.parametrize("nbkode_cls, scipy_cls", equivalents)
def test_exponential1(nbkode_cls, scipy_cls):
nbkode_sol = nbkode_cls(exponential1, 0, y0_1)
scipy_sol = scipy_cls(exponential1, 0, y0_1, t_bound=30)
assert_allclose(nbkode_sol.f, scipy_sol.f)
assert_allclose(nbkode_sol.h, scipy_sol.h_abs)
ndx = 0
while True:
ndx += 1
nbkode_sol.step()
scipy_sol.step()
if scipy_sol.status != "running":
break
# We do not compare the last state as Scipy solvers are bound within step
# and nbkode are not.
msg = f"Step: {ndx}, Time: {scipy_sol.t}"
assert_allclose(nbkode_sol.t, scipy_sol.t, err_msg=msg)
assert_allclose(nbkode_sol.y, scipy_sol.y, err_msg=msg)
assert_allclose(nbkode_sol.f, scipy_sol.f, err_msg=msg)
assert_allclose(nbkode_sol.h, scipy_sol.h_abs, err_msg=msg)
assert_allclose(nbkode_sol.K, scipy_sol.K, err_msg=msg)
@pytest.mark.parametrize("nbkode_cls, scipy_cls", equivalents)
def test_exponential2(nbkode_cls, scipy_cls):
nbkode_sol = nbkode_cls(exponential2, 0, y0_2)
scipy_sol = scipy_cls(exponential2, 0, y0_2, t_bound=30)
assert_allclose(nbkode_sol.f, scipy_sol.f)
assert_allclose(nbkode_sol.h, scipy_sol.h_abs)
ndx = 0
while True:
ndx += 1
nbkode_sol.step()
scipy_sol.step()
if scipy_sol.status != "running":
break
# We do not compare the last state as Scipy solvers are bound within step
# and nbkode are not.
msg = f"Step: {ndx}, Time: {scipy_sol.t}"
assert_allclose(nbkode_sol.t, scipy_sol.t, err_msg=msg)
assert_allclose(nbkode_sol.y, scipy_sol.y, err_msg=msg)
assert_allclose(nbkode_sol.f, scipy_sol.f, err_msg=msg)
assert_allclose(nbkode_sol.h, scipy_sol.h_abs, err_msg=msg)
assert_allclose(nbkode_sol.K, scipy_sol.K, err_msg=msg)
# TODO: RK23 interpolation is not working correctly, the results do no match SciPy
@pytest.mark.parametrize("nbkode_cls, scipy_cls", equivalents[1:])
def test_interpolate(nbkode_cls, scipy_cls):
t_eval = np.linspace(0, 300, 160)
nb_t, nb_y = nbkode_cls(exponential2, 0, y0_2, t_bound=500).run(t_eval)
scipy_sol = integrate.solve_ivp(
exponential2, [0, 500], y0_2, t_eval=t_eval, method=scipy_cls.__name__
)
assert_allclose(nb_t, scipy_sol.t)
assert_allclose(nb_y, scipy_sol.y.T)
| 32.602041
| 82
| 0.683568
|
a861847cb01ded5597035cc3b66e4eb713b76a67
| 4,979
|
py
|
Python
|
apps/admin/views/user.py
|
panla/kesousou
|
df6751b0a2e8eeec5122418cd29fe1f99e23f39e
|
[
"MIT"
] | 1
|
2021-04-27T01:18:45.000Z
|
2021-04-27T01:18:45.000Z
|
apps/admin/views/user.py
|
panla/kesousou
|
df6751b0a2e8eeec5122418cd29fe1f99e23f39e
|
[
"MIT"
] | 5
|
2020-08-26T14:47:47.000Z
|
2021-03-22T08:56:43.000Z
|
apps/admin/views/user.py
|
panla/kesousou
|
df6751b0a2e8eeec5122418cd29fe1f99e23f39e
|
[
"MIT"
] | null | null | null |
import datetime
from django.db.models import Q
from django.contrib.auth.views import get_user_model
from rest_framework import status
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from drf_yasg.utils import swagger_auto_schema
from common.users import authenticate, IsAdminUser
from common.page import page_params, get_results
from admin.parameters.user import create_token_response, create_token_body, token_parameters
from admin.parameters.user import user_create_parameters, user_update_parameters, user_filter_params
from admin.serializers.user import UserSerializer
User = get_user_model()
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
class UserJWTView(APIView):
@swagger_auto_schema(
request_body=create_token_body, operation_id='create_token', responses={201: create_token_response},
tags=['users']
)
def post(self, request, *args, **kwargs):
"""
登录
"""
user = authenticate(request.data.get('username'), request.data.get('password'))
if user:
token = jwt_encode_handler(jwt_payload_handler(user))
response = Response({'token': token, 'user_id': user.id}, status=status.HTTP_201_CREATED)
if api_settings.JWT_AUTH_COOKIE:
expiration = (datetime.datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA)
response.set_cookie(api_settings.JWT_AUTH_COOKIE, token, expires=expiration, httponly=True)
return response
return Response({'detail': '该用户校验不通过'}, status=status.HTTP_400_BAD_REQUEST)
class UsersView(APIView):
authentication_classes = [JSONWebTokenAuthentication]
permission_classes = [IsAdminUser]
dic = {
'false': False,
'true': True
}
@swagger_auto_schema(
manual_parameters=page_params + token_parameters + user_filter_params, operation_id='user_list',
responses={200: UserSerializer(many=True)}, tags=['users']
)
def get(self, request, *args, **kwargs):
"""
查看用户列表,
查询参数 text, is_superuser, is_active,排序参数 order
查询字段包括 name, mobile, email
"""
text = request.query_params.get('text')
is_superuser = request.query_params.get('is_superuser')
is_active = request.query_params.get('is_active')
order = request.query_params.get('order')
queryset = User.objects.all()
if text:
queryset = queryset.filter(Q(name=text) | Q(mobile=text) | Q(email=text))
if is_superuser:
queryset = queryset.filter(is_superuser=self.dic[is_superuser])
if is_active:
queryset = queryset.filter(is_active=self.dic[is_active])
if order:
queryset = queryset.order_by(order)
data = get_results(request, queryset, self, UserSerializer)
return Response({'count': queryset.count(), 'users': data}, status.HTTP_200_OK)
@swagger_auto_schema(
manual_parameters=token_parameters, request_body=user_create_parameters, operation_id='user_create',
responses={201: UserSerializer()},
tags=['users']
)
def post(self, request, *args, **kwargs):
"""
创建用户
"""
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response({'user': serializer.data}, status.HTTP_201_CREATED)
else:
return Response({'error': f'{serializer.errors}'}, status.HTTP_400_BAD_REQUEST)
class UserView(generics.RetrieveUpdateAPIView):
authentication_classes = [JSONWebTokenAuthentication]
permission_classes = [IsAdminUser]
queryset = User.objects.all()
serializer_class = UserSerializer
@swagger_auto_schema(
manual_parameters=token_parameters, operation_id='user_retrieve',
responses={200: UserSerializer()}, tags=['users']
)
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
@swagger_auto_schema(
manual_parameters=token_parameters, request_body=user_update_parameters, operation_id='user_update',
responses={201: UserSerializer()}, tags=['users']
)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
@swagger_auto_schema(
manual_parameters=token_parameters, request_body=user_update_parameters, operation_id='user_partial_update',
responses={201: UserSerializer()}, tags=['users']
)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
| 40.153226
| 116
| 0.702952
|
c794c827ec735387c66892378a68c03ebc09fc87
| 1,265
|
py
|
Python
|
brian2/utils/arrays.py
|
CharleeSF/brian2
|
d2be1ed33a8ac51b1891f89a2544123a937c43ff
|
[
"BSD-2-Clause"
] | 2
|
2020-03-20T13:30:19.000Z
|
2020-03-20T13:30:57.000Z
|
brian2/utils/arrays.py
|
CharleeSF/brian2
|
d2be1ed33a8ac51b1891f89a2544123a937c43ff
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/utils/arrays.py
|
CharleeSF/brian2
|
d2be1ed33a8ac51b1891f89a2544123a937c43ff
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Helper module containing functions that operate on numpy arrays.
'''
import numpy as np
def calc_repeats(delay):
'''
Calculates offsets corresponding to an array, where repeated values are
subsequently numbered, i.e. if there n identical values, the returned array
will have values from 0 to n-1 at their positions.
The code is complex because tricks are needed for vectorisation.
This function is used in the Python `SpikeQueue` to calculate the offset
array for the insertion of spikes with their respective delays into the
queue and in the numpy code for synapse creation to calculate how many
synapses for each source-target pair exist.
Examples
--------
>>> import numpy as np
>>> print(calc_repeats(np.array([7, 5, 7, 3, 7, 5])))
[0 0 1 0 2 1]
'''
# We use merge sort because it preserves the input order of equal
# elements in the sorted output
I = np.argsort(delay, kind='mergesort')
xs = delay[I]
J = (xs[1:] != xs[:-1])
A = np.hstack((0, np.cumsum(J)))
B = np.hstack((0, np.cumsum(np.logical_not(J))))
BJ = np.hstack((0, B[:-1][J]))
ei = B-BJ[A]
ofs = np.zeros_like(delay, dtype=np.int32)
ofs[I] = np.array(ei, dtype=ofs.dtype)
return ofs
| 34.189189
| 79
| 0.660079
|
47cc44573810267dc4096e66619e724bafdf4900
| 2,207
|
py
|
Python
|
signalr/transports/_ws_transport.py
|
hakimo-ai/signalr-client-py
|
a5035d92bfd9e1eabed7c7bcf08f711e2e7cccb7
|
[
"Apache-2.0"
] | null | null | null |
signalr/transports/_ws_transport.py
|
hakimo-ai/signalr-client-py
|
a5035d92bfd9e1eabed7c7bcf08f711e2e7cccb7
|
[
"Apache-2.0"
] | null | null | null |
signalr/transports/_ws_transport.py
|
hakimo-ai/signalr-client-py
|
a5035d92bfd9e1eabed7c7bcf08f711e2e7cccb7
|
[
"Apache-2.0"
] | null | null | null |
import json
import sys
import gevent
import ssl
if sys.version_info[0] < 3:
from urlparse import urlparse, urlunparse
else:
from urllib.parse import urlparse, urlunparse
from websocket import create_connection
from ._transport import Transport
class WebSocketsTransport(Transport):
def __init__(self, session, connection):
Transport.__init__(self, session, connection)
self.ws = None
self.__requests = {}
def _get_name(self):
return 'webSockets'
@staticmethod
def __get_ws_url_from(url):
parsed = urlparse(url)
scheme = 'wss' if parsed.scheme == 'https' else 'ws'
url_data = (scheme, parsed.netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
return urlunparse(url_data)
def start(self):
ws_url = self.__get_ws_url_from(self._get_url('connect'))
self.ws = create_connection(ws_url,
header=self.__get_headers(),
cookie=self.__get_cookie_str(),
enable_multithread=True,
sslopt={'cert_reqs': ssl.CERT_NONE})
self._session.get(self._get_url('start'))
def _receive():
for notification in self.ws:
self._handle_notification(notification)
return _receive
def send(self, data):
self.ws.send(json.dumps(data))
gevent.sleep()
def close(self):
self.ws.close()
def accept(self, negotiate_data):
return bool(negotiate_data['TryWebSockets'])
class HeadersLoader(object):
def __init__(self, headers):
self.headers = headers
def __get_headers(self):
headers = self._session.headers
loader = WebSocketsTransport.HeadersLoader(headers)
if self._session.auth:
self._session.auth(loader)
return ['%s: %s' % (name, headers[name]) for name in headers]
def __get_cookie_str(self):
return '; '.join([
'%s=%s' % (name, value)
for name, value in self._session.cookies.items()
])
| 28.662338
| 101
| 0.588129
|
3a14e5bb847ed62ced791a4395ae823ce504c5b2
| 138
|
py
|
Python
|
irispy/__init__.py
|
zpodushkin/irispy
|
e801c0c8ece0e5ad7ff1fae3cf9adb6e360cd655
|
[
"MIT"
] | 5
|
2020-03-16T19:26:00.000Z
|
2020-04-04T09:25:50.000Z
|
irispy/__init__.py
|
zpodushkin/irispy
|
e801c0c8ece0e5ad7ff1fae3cf9adb6e360cd655
|
[
"MIT"
] | 1
|
2020-04-05T00:06:05.000Z
|
2020-04-10T22:28:16.000Z
|
irispy/__init__.py
|
zpodushkin/irispy
|
e801c0c8ece0e5ad7ff1fae3cf9adb6e360cd655
|
[
"MIT"
] | null | null | null |
from .dispatcher.dispatcher import Dispatcher
from .types.methods import Method
from .types import objects
from .types.exceptions import *
| 34.5
| 45
| 0.833333
|
8f490888b8cc7090f1365691167a1f3895fc3df1
| 4,922
|
py
|
Python
|
tests/test_solver_crt_tau.py
|
astrojhgu/ares
|
42008c8e4bf79f0b000cc833e02a86510bce7611
|
[
"MIT"
] | 1
|
2019-01-04T15:13:18.000Z
|
2019-01-04T15:13:18.000Z
|
tests/test_solver_crt_tau.py
|
astrojhgu/ares
|
42008c8e4bf79f0b000cc833e02a86510bce7611
|
[
"MIT"
] | null | null | null |
tests/test_solver_crt_tau.py
|
astrojhgu/ares
|
42008c8e4bf79f0b000cc833e02a86510bce7611
|
[
"MIT"
] | null | null | null |
"""
test_solver_crt_tau.py
Author: Jordan Mirocha
Affiliation: UCLA
Created on: Tue May 31 20:42:40 PDT 2016
Description:
"""
import os
import time
import ares
import numpy as np
import matplotlib.pyplot as pl
from ares.physics.Constants import c, ev_per_hz, erg_per_ev
def test(tol=5e-2):
alpha = -2.
beta = -6.
zi = 10.
zf = 6.
# Initialize radiation background
pars = \
{
'include_He': 0,
'approx_He': 0,
'initial_redshift': zi,
'final_redshift': zf,
'pop_sfr_model': 'sfrd-func',
'pop_sfrd': lambda z: 0.1 * (1. + z)**beta, # for analytic solution to work this must be const
'pop_sfrd_units': 'msun/yr/mpc^3',
'pop_sed': 'pl',
'pop_alpha': alpha,
'pop_Emin': 5e2,
'pop_Emax': 1e3,
'pop_EminNorm': 2e2,
'pop_EmaxNorm': 3e4,
'pop_logN': -np.inf,
'pop_solve_rte': True,
'pop_approx_tau': False,
'pop_tau_Nz': 1e2,
}
colors = 'k', 'b'
for i, include_He in enumerate([0, 1]):
pars['include_He'] = include_He
pars['approx_He'] = include_He
pars['pop_approx_tau'] = False
# Create OpticalDepth instance
igm = ares.solvers.OpticalDepth(**pars)
# Impose an ionization history: neutral for all times
igm.ionization_history = lambda z: 0.0
# Tabulate tau
tau = igm.TabulateOpticalDepth()
igm.save(prefix='tau_test', suffix='pkl', clobber=True)
# Run radiation background calculation
pars['tau_table'] = 'tau_test.pkl'
sim_1 = ares.simulations.MetaGalacticBackground(**pars)
sim_1.run()
os.remove('tau_test.pkl')
# Compare to transparent IGM solution
pars['pop_approx_tau'] = True
sim_2 = ares.simulations.MetaGalacticBackground(**pars)
sim_2.run()
# Grab histories
z1, E1, f1 = sim_1.get_history(0, flatten=True)
z2, E2, f2 = sim_2.get_history(0, flatten=True)
# Impose a transparent IGM to check tau I/O
if i == 0:
igm.ionization_history = lambda z: 1.0
# Tabulate tau
tau = igm.TabulateOpticalDepth()
igm.save(prefix='tau_test', suffix='pkl', clobber=True)
pars['tau_table'] = 'tau_test.pkl'
pars['pop_approx_tau'] = False
sim_3 = ares.simulations.MetaGalacticBackground(**pars)
sim_3.run()
z3, E3, f3 = sim_3.get_history(0, flatten=True)
os.remove('tau_test.pkl')
# Check at *lowest* redshift
assert np.allclose(f3[0], f2[0]), "Problem with tau I/O."
# Compare to analytic solution
if i == 0:
# Grab GalaxyPopulation
E = E1
pop = sim_2.pops[0]
# Cosmologically-limited solution to the RTE
# [Equation A1 in Mirocha (2014)]
f_an = np.array(map(lambda E: pop.Emissivity(zf, E), E))
f_an *= (1. + zf)**(4.5 - (alpha + beta)) / 4. / np.pi \
/ pop.cosm.HubbleParameter(zf) / (alpha + beta - 1.5)
f_an *= ((1. + zi)**(alpha + beta - 1.5) - (1. + zf)**(alpha + beta - 1.5))
f_an *= c * ev_per_hz / E / erg_per_ev
pl.semilogy(E2, f_an, ls=':', color='k', label='analytic')
label = 'neutral'
pl.semilogy(E2, f2[0], ls='--', color=colors[i], label='ionized')
pl.annotate('H only', (0.05, 0.97), xycoords='axes fraction',
ha='left', va='top')
# Check analytic solution at *lowest* redshift
diff = np.abs(f_an - f2[0]) / f_an
# Loose tolerance in this example since tau table is coarse
assert diff[0] < tol, \
"Relative error between analytical and numerical solutions exceeds %.3g." % tol
else:
label = None
pl.annotate('H+He', (0.05, 0.9), xycoords='axes fraction',
ha='left', va='top', color='b')
# Plot solution assuming neutral IGM
pl.semilogy(E1, f1[0], ls='-', color=colors[i], label=label)
# Assert that transparent IGM -> larger fluxes in soft X-rays
assert np.all(f2[0] >= f1[0])
# Make sure X-ray background is harder when helium is included
if i == 0:
f_H = f1[0].copy()
else:
assert np.all(f1[0] <= f_H), \
"XRB should be harder when He is included!"
pl.xlabel(ares.util.labels['E'])
pl.ylabel(ares.util.labels['flux'])
pl.legend(fontsize=14)
pl.savefig('%s.png' % __file__.rstrip('.py'))
pl.close()
if __name__ == '__main__':
test()
| 30.571429
| 100
| 0.530882
|
319870ef05ff3369f6c84a7694ade6d751ad580c
| 381
|
py
|
Python
|
sphinx/source/docs/examples/line_server.py
|
timelyportfolio/bokeh
|
a976a85535cf137c6238ce9e90b41ab14ae8ce22
|
[
"BSD-3-Clause"
] | 2
|
2021-09-01T12:36:06.000Z
|
2021-11-17T10:48:36.000Z
|
sphinx/source/docs/examples/line_server.py
|
brian15co/bokeh
|
6cecb7211277b9d838039d0eb15e50a10f9ac3d1
|
[
"BSD-3-Clause"
] | null | null | null |
sphinx/source/docs/examples/line_server.py
|
brian15co/bokeh
|
6cecb7211277b9d838039d0eb15e50a10f9ac3d1
|
[
"BSD-3-Clause"
] | 1
|
2021-08-01T08:38:53.000Z
|
2021-08-01T08:38:53.000Z
|
from bokeh.plotting import figure, output_server, show
#prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# output to static HTML file
output_server("simple_line")
# Plot a `line` renderer setting the color, line thickness, title, and legend value.
p = figure(title="simple line server example")
p.line(x, y, legend="Temp.", x_axis_label='x', y_axis_label='y')
show(p)
| 25.4
| 84
| 0.698163
|
d0986c5ef189fb92c0285cb42d49de07b40e59da
| 1,687
|
py
|
Python
|
jdcloud_sdk/services/captcha/models/Scene.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
jdcloud_sdk/services/captcha/models/Scene.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
jdcloud_sdk/services/captcha/models/Scene.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Scene(object):
def __init__(self, appId, sceneName, sceneType, sceneAvgQps, sceneMaxQps, sceneId=None, sceneSecret=None, appName=None, description=None, createTime=None):
"""
:param sceneId: (Optional) 场景id,更新时必填
:param sceneSecret: (Optional) 场景密钥
:param appId: 所属应用id
:param appName: (Optional) 所属应用名称
:param sceneName: 场景名称
:param sceneType: 场景类型:account:账号场景(登录、注册等)activity:活动场景(秒杀、领券等)content:内容场景(发帖评论、签到投票等)other:其它
:param sceneAvgQps: 平均qps
:param sceneMaxQps: 高峰期qps
:param description: (Optional) 场景描述
:param createTime: (Optional) 创建时间
"""
self.sceneId = sceneId
self.sceneSecret = sceneSecret
self.appId = appId
self.appName = appName
self.sceneName = sceneName
self.sceneType = sceneType
self.sceneAvgQps = sceneAvgQps
self.sceneMaxQps = sceneMaxQps
self.description = description
self.createTime = createTime
| 36.673913
| 159
| 0.691168
|
26251631750c73f8fdf874dd1d9081f1d72f87e0
| 2,424
|
py
|
Python
|
app.py
|
technetbytes/Pythonic-Template
|
78b565359d640b208ed6189ebefc5760751c16d7
|
[
"Apache-2.0"
] | 2
|
2020-04-06T07:59:02.000Z
|
2020-04-06T07:59:04.000Z
|
app.py
|
technetbytes/Pythonic-Template
|
78b565359d640b208ed6189ebefc5760751c16d7
|
[
"Apache-2.0"
] | 2
|
2020-04-06T12:13:34.000Z
|
2020-04-06T13:49:29.000Z
|
app.py
|
technetbytes/Pythonic-Template
|
78b565359d640b208ed6189ebefc5760751c16d7
|
[
"Apache-2.0"
] | null | null | null |
from config import configuration
from config import database
from db import mssql
import pandas as pd
from assets import file_storage
from cache import data_cache
from bridge.databse_bridge import DbBridge
'''Get configuration data from the .resource/config.ini file'''
# use configuration class get DATABASE Section from ini file
#config = configuration.Configuration(None)
# db_config = config.get_section("DATABASE")
# print(db_config['HOST'])
# use DatabaseConfiguration class user directly get DATABASE host using property
#ds = database.DatabaseConfiguration(None)
# print(ds.host)
'''Insert & Get record in the database'''
#obj_sql = mssql.MsSqlDb(None)
# conn = obj_sql.get_connection()
# data = pd.read_sql_query("select top 1 * from [knowHow].[dbo].[tModels]", conn)
# print(data)
# data2 = obj_sql.get_data("select top 1 * from [knowHow].[dbo].[tModels]")
# print(data2)
#obj_sql.insert_data("INSERT INTO KnowHow.dbo.tModels (Name, ModelName, Description, CreatedBy, CreatedOn, ModifiedBy, ModifiedOn, IsActive, CategoryId) VALUES('test1', 'test-1', 'test1 desc', 10, getdate(), NULL, '', 0, 1);")
'''Set & Read data in the cloud storage'''
#file_manager = file_storage.FileManager(None)
#file_manager.load_config()
#file_manager.upload_file("/home/saqib/Pictures/ijmal.png","ijmalbhai")
#s = file_manager.get_file_url("ijmalbhai")
#print(s)
#img = file_manager.get_image("https://res.cloudinary.com/dnbcbz9eu/image/upload/v1586115769/ijmalbhai.png")
#print(img)
'''Set & Read data in the cache'''
#redis_cache = data_cache.DataDeposit(None)
#redis_cache.load_config()
#redis_cache.set_item("Afa","Hello world")
#print(redis_cache.get_item("Afa"))
def main():
direct_db()
orm_model_call()
def direct_db():
print("Direct database call ...")
bridge = DbBridge()
print(bridge.get_application())
bridge.load_db()
print(bridge.get_data("select top 1 * from [knowHow].[dbo].[tModels]"))
def orm_model_call():
print("ORM mapper call ...")
bridge = DbBridge()
bridge.load_db()
from models import projects
projects = bridge.get_data_forModel(projects.Project)
print('\n### All Projects:')
for prj in projects:
print(f'project id {prj.id} with keys {prj.webKey} and {prj.mobileKey}')
print("done")
if __name__ == "__main__":
print("Start Pythonic-Template Application")
main()
print("End Pythonic-Template Application")
| 31.076923
| 226
| 0.723185
|
f729e714918038b9f396ec89a8394119a0377539
| 460
|
py
|
Python
|
tests/dsfs/test_neural_networks.py
|
dbradf/dsfs
|
efcd08ca56b4e14b926cc824f15474b04a9d94cb
|
[
"Apache-2.0"
] | null | null | null |
tests/dsfs/test_neural_networks.py
|
dbradf/dsfs
|
efcd08ca56b4e14b926cc824f15474b04a9d94cb
|
[
"Apache-2.0"
] | null | null | null |
tests/dsfs/test_neural_networks.py
|
dbradf/dsfs
|
efcd08ca56b4e14b926cc824f15474b04a9d94cb
|
[
"Apache-2.0"
] | null | null | null |
import dsfs.neural_networks as under_test
def test_xor_network():
xor_network = [[[20.0, 20, -30], [20.0, 20, -10]], [[-60.0, 60, -30]]]
assert 0.000 < under_test.feed_forward(xor_network, [0, 0])[-1][0] < 0.001
assert 0.999 < under_test.feed_forward(xor_network, [1, 0])[-1][0] < 1.000
assert 0.999 < under_test.feed_forward(xor_network, [0, 1])[-1][0] < 1.000
assert 0.000 < under_test.feed_forward(xor_network, [1, 1])[-1][0] < 0.001
| 41.818182
| 78
| 0.634783
|
d467b821208e58ba66b028cf6e5b408fcb714c59
| 20,617
|
py
|
Python
|
sdk/lusid_asyncio/models/a2_b_movement_record.py
|
finbourne/lusid-sdk-python-asyncio-preview
|
290f93590ab5485661216c8622d3de9f7af0ed60
|
[
"MIT"
] | null | null | null |
sdk/lusid_asyncio/models/a2_b_movement_record.py
|
finbourne/lusid-sdk-python-asyncio-preview
|
290f93590ab5485661216c8622d3de9f7af0ed60
|
[
"MIT"
] | null | null | null |
sdk/lusid_asyncio/models/a2_b_movement_record.py
|
finbourne/lusid-sdk-python-asyncio-preview
|
290f93590ab5485661216c8622d3de9f7af0ed60
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3923
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid_asyncio.configuration import Configuration
class A2BMovementRecord(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'portfolio_id': 'ResourceId',
'holding_type': 'str',
'instrument_scope': 'str',
'instrument_uid': 'str',
'sub_holding_keys': 'dict(str, PerpetualProperty)',
'currency': 'str',
'transaction_id': 'str',
'movement_name': 'str',
'effective_date': 'datetime',
'units': 'float',
'start': 'A2BCategory',
'flows': 'A2BCategory',
'gains': 'A2BCategory',
'carry': 'A2BCategory',
'end': 'A2BCategory',
'properties': 'dict(str, ModelProperty)',
'group_id': 'str'
}
attribute_map = {
'portfolio_id': 'portfolioId',
'holding_type': 'holdingType',
'instrument_scope': 'instrumentScope',
'instrument_uid': 'instrumentUid',
'sub_holding_keys': 'subHoldingKeys',
'currency': 'currency',
'transaction_id': 'transactionId',
'movement_name': 'movementName',
'effective_date': 'effectiveDate',
'units': 'units',
'start': 'start',
'flows': 'flows',
'gains': 'gains',
'carry': 'carry',
'end': 'end',
'properties': 'properties',
'group_id': 'groupId'
}
required_map = {
'portfolio_id': 'optional',
'holding_type': 'optional',
'instrument_scope': 'optional',
'instrument_uid': 'optional',
'sub_holding_keys': 'optional',
'currency': 'optional',
'transaction_id': 'optional',
'movement_name': 'optional',
'effective_date': 'optional',
'units': 'optional',
'start': 'optional',
'flows': 'optional',
'gains': 'optional',
'carry': 'optional',
'end': 'optional',
'properties': 'optional',
'group_id': 'optional'
}
def __init__(self, portfolio_id=None, holding_type=None, instrument_scope=None, instrument_uid=None, sub_holding_keys=None, currency=None, transaction_id=None, movement_name=None, effective_date=None, units=None, start=None, flows=None, gains=None, carry=None, end=None, properties=None, group_id=None, local_vars_configuration=None): # noqa: E501
"""A2BMovementRecord - a model defined in OpenAPI"
:param portfolio_id:
:type portfolio_id: lusid_asyncio.ResourceId
:param holding_type: The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc.
:type holding_type: str
:param instrument_scope: The unique Lusid Instrument Id (LUID) of the instrument that the holding is in.
:type instrument_scope: str
:param instrument_uid: The unique Lusid Instrument Id (LUID) of the instrument that the holding is in.
:type instrument_uid: str
:param sub_holding_keys: The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created.
:type sub_holding_keys: dict[str, lusid_asyncio.PerpetualProperty]
:param currency: The holding currency.
:type currency: str
:param transaction_id: The unique identifier for the transaction.
:type transaction_id: str
:param movement_name: The name of the movement.
:type movement_name: str
:param effective_date: The date of the movement.
:type effective_date: datetime
:param units: The number of units of the instrument that are affected by the movement.
:type units: float
:param start:
:type start: lusid_asyncio.A2BCategory
:param flows:
:type flows: lusid_asyncio.A2BCategory
:param gains:
:type gains: lusid_asyncio.A2BCategory
:param carry:
:type carry: lusid_asyncio.A2BCategory
:param end:
:type end: lusid_asyncio.A2BCategory
:param properties: The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' domain.
:type properties: dict[str, lusid_asyncio.ModelProperty]
:param group_id: Arbitrary string that can be used to cross reference an entry in the A2B report with activity in the A2B-Movements. This should be used purely as a token. The content should not be relied upon.
:type group_id: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._portfolio_id = None
self._holding_type = None
self._instrument_scope = None
self._instrument_uid = None
self._sub_holding_keys = None
self._currency = None
self._transaction_id = None
self._movement_name = None
self._effective_date = None
self._units = None
self._start = None
self._flows = None
self._gains = None
self._carry = None
self._end = None
self._properties = None
self._group_id = None
self.discriminator = None
if portfolio_id is not None:
self.portfolio_id = portfolio_id
self.holding_type = holding_type
self.instrument_scope = instrument_scope
self.instrument_uid = instrument_uid
self.sub_holding_keys = sub_holding_keys
self.currency = currency
self.transaction_id = transaction_id
self.movement_name = movement_name
if effective_date is not None:
self.effective_date = effective_date
if units is not None:
self.units = units
if start is not None:
self.start = start
if flows is not None:
self.flows = flows
if gains is not None:
self.gains = gains
if carry is not None:
self.carry = carry
if end is not None:
self.end = end
self.properties = properties
self.group_id = group_id
@property
def portfolio_id(self):
"""Gets the portfolio_id of this A2BMovementRecord. # noqa: E501
:return: The portfolio_id of this A2BMovementRecord. # noqa: E501
:rtype: lusid_asyncio.ResourceId
"""
return self._portfolio_id
@portfolio_id.setter
def portfolio_id(self, portfolio_id):
"""Sets the portfolio_id of this A2BMovementRecord.
:param portfolio_id: The portfolio_id of this A2BMovementRecord. # noqa: E501
:type portfolio_id: lusid_asyncio.ResourceId
"""
self._portfolio_id = portfolio_id
@property
def holding_type(self):
"""Gets the holding_type of this A2BMovementRecord. # noqa: E501
The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. # noqa: E501
:return: The holding_type of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._holding_type
@holding_type.setter
def holding_type(self, holding_type):
"""Sets the holding_type of this A2BMovementRecord.
The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. # noqa: E501
:param holding_type: The holding_type of this A2BMovementRecord. # noqa: E501
:type holding_type: str
"""
self._holding_type = holding_type
@property
def instrument_scope(self):
"""Gets the instrument_scope of this A2BMovementRecord. # noqa: E501
The unique Lusid Instrument Id (LUID) of the instrument that the holding is in. # noqa: E501
:return: The instrument_scope of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._instrument_scope
@instrument_scope.setter
def instrument_scope(self, instrument_scope):
"""Sets the instrument_scope of this A2BMovementRecord.
The unique Lusid Instrument Id (LUID) of the instrument that the holding is in. # noqa: E501
:param instrument_scope: The instrument_scope of this A2BMovementRecord. # noqa: E501
:type instrument_scope: str
"""
self._instrument_scope = instrument_scope
@property
def instrument_uid(self):
"""Gets the instrument_uid of this A2BMovementRecord. # noqa: E501
The unique Lusid Instrument Id (LUID) of the instrument that the holding is in. # noqa: E501
:return: The instrument_uid of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._instrument_uid
@instrument_uid.setter
def instrument_uid(self, instrument_uid):
"""Sets the instrument_uid of this A2BMovementRecord.
The unique Lusid Instrument Id (LUID) of the instrument that the holding is in. # noqa: E501
:param instrument_uid: The instrument_uid of this A2BMovementRecord. # noqa: E501
:type instrument_uid: str
"""
self._instrument_uid = instrument_uid
@property
def sub_holding_keys(self):
"""Gets the sub_holding_keys of this A2BMovementRecord. # noqa: E501
The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. # noqa: E501
:return: The sub_holding_keys of this A2BMovementRecord. # noqa: E501
:rtype: dict[str, lusid_asyncio.PerpetualProperty]
"""
return self._sub_holding_keys
@sub_holding_keys.setter
def sub_holding_keys(self, sub_holding_keys):
"""Sets the sub_holding_keys of this A2BMovementRecord.
The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. # noqa: E501
:param sub_holding_keys: The sub_holding_keys of this A2BMovementRecord. # noqa: E501
:type sub_holding_keys: dict[str, lusid_asyncio.PerpetualProperty]
"""
self._sub_holding_keys = sub_holding_keys
@property
def currency(self):
"""Gets the currency of this A2BMovementRecord. # noqa: E501
The holding currency. # noqa: E501
:return: The currency of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this A2BMovementRecord.
The holding currency. # noqa: E501
:param currency: The currency of this A2BMovementRecord. # noqa: E501
:type currency: str
"""
self._currency = currency
@property
def transaction_id(self):
"""Gets the transaction_id of this A2BMovementRecord. # noqa: E501
The unique identifier for the transaction. # noqa: E501
:return: The transaction_id of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._transaction_id
@transaction_id.setter
def transaction_id(self, transaction_id):
"""Sets the transaction_id of this A2BMovementRecord.
The unique identifier for the transaction. # noqa: E501
:param transaction_id: The transaction_id of this A2BMovementRecord. # noqa: E501
:type transaction_id: str
"""
self._transaction_id = transaction_id
@property
def movement_name(self):
"""Gets the movement_name of this A2BMovementRecord. # noqa: E501
The name of the movement. # noqa: E501
:return: The movement_name of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._movement_name
@movement_name.setter
def movement_name(self, movement_name):
"""Sets the movement_name of this A2BMovementRecord.
The name of the movement. # noqa: E501
:param movement_name: The movement_name of this A2BMovementRecord. # noqa: E501
:type movement_name: str
"""
self._movement_name = movement_name
@property
def effective_date(self):
"""Gets the effective_date of this A2BMovementRecord. # noqa: E501
The date of the movement. # noqa: E501
:return: The effective_date of this A2BMovementRecord. # noqa: E501
:rtype: datetime
"""
return self._effective_date
@effective_date.setter
def effective_date(self, effective_date):
"""Sets the effective_date of this A2BMovementRecord.
The date of the movement. # noqa: E501
:param effective_date: The effective_date of this A2BMovementRecord. # noqa: E501
:type effective_date: datetime
"""
self._effective_date = effective_date
@property
def units(self):
"""Gets the units of this A2BMovementRecord. # noqa: E501
The number of units of the instrument that are affected by the movement. # noqa: E501
:return: The units of this A2BMovementRecord. # noqa: E501
:rtype: float
"""
return self._units
@units.setter
def units(self, units):
"""Sets the units of this A2BMovementRecord.
The number of units of the instrument that are affected by the movement. # noqa: E501
:param units: The units of this A2BMovementRecord. # noqa: E501
:type units: float
"""
self._units = units
@property
def start(self):
"""Gets the start of this A2BMovementRecord. # noqa: E501
:return: The start of this A2BMovementRecord. # noqa: E501
:rtype: lusid_asyncio.A2BCategory
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this A2BMovementRecord.
:param start: The start of this A2BMovementRecord. # noqa: E501
:type start: lusid_asyncio.A2BCategory
"""
self._start = start
@property
def flows(self):
"""Gets the flows of this A2BMovementRecord. # noqa: E501
:return: The flows of this A2BMovementRecord. # noqa: E501
:rtype: lusid_asyncio.A2BCategory
"""
return self._flows
@flows.setter
def flows(self, flows):
"""Sets the flows of this A2BMovementRecord.
:param flows: The flows of this A2BMovementRecord. # noqa: E501
:type flows: lusid_asyncio.A2BCategory
"""
self._flows = flows
@property
def gains(self):
"""Gets the gains of this A2BMovementRecord. # noqa: E501
:return: The gains of this A2BMovementRecord. # noqa: E501
:rtype: lusid_asyncio.A2BCategory
"""
return self._gains
@gains.setter
def gains(self, gains):
"""Sets the gains of this A2BMovementRecord.
:param gains: The gains of this A2BMovementRecord. # noqa: E501
:type gains: lusid_asyncio.A2BCategory
"""
self._gains = gains
@property
def carry(self):
"""Gets the carry of this A2BMovementRecord. # noqa: E501
:return: The carry of this A2BMovementRecord. # noqa: E501
:rtype: lusid_asyncio.A2BCategory
"""
return self._carry
@carry.setter
def carry(self, carry):
"""Sets the carry of this A2BMovementRecord.
:param carry: The carry of this A2BMovementRecord. # noqa: E501
:type carry: lusid_asyncio.A2BCategory
"""
self._carry = carry
@property
def end(self):
"""Gets the end of this A2BMovementRecord. # noqa: E501
:return: The end of this A2BMovementRecord. # noqa: E501
:rtype: lusid_asyncio.A2BCategory
"""
return self._end
@end.setter
def end(self, end):
"""Sets the end of this A2BMovementRecord.
:param end: The end of this A2BMovementRecord. # noqa: E501
:type end: lusid_asyncio.A2BCategory
"""
self._end = end
@property
def properties(self):
"""Gets the properties of this A2BMovementRecord. # noqa: E501
The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' domain. # noqa: E501
:return: The properties of this A2BMovementRecord. # noqa: E501
:rtype: dict[str, lusid_asyncio.ModelProperty]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this A2BMovementRecord.
The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' domain. # noqa: E501
:param properties: The properties of this A2BMovementRecord. # noqa: E501
:type properties: dict[str, lusid_asyncio.ModelProperty]
"""
self._properties = properties
@property
def group_id(self):
"""Gets the group_id of this A2BMovementRecord. # noqa: E501
Arbitrary string that can be used to cross reference an entry in the A2B report with activity in the A2B-Movements. This should be used purely as a token. The content should not be relied upon. # noqa: E501
:return: The group_id of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this A2BMovementRecord.
Arbitrary string that can be used to cross reference an entry in the A2B report with activity in the A2B-Movements. This should be used purely as a token. The content should not be relied upon. # noqa: E501
:param group_id: The group_id of this A2BMovementRecord. # noqa: E501
:type group_id: str
"""
self._group_id = group_id
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, A2BMovementRecord):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, A2BMovementRecord):
return True
return self.to_dict() != other.to_dict()
| 33.199678
| 352
| 0.63113
|
2d0e3dd28e79053c76397d829edb8c4830f51983
| 4,249
|
py
|
Python
|
fabric_quick_setup/mod.py
|
max-niederman/fabric-quick-setup
|
40c959c6cd2295c679576680fab3cda2b15222f5
|
[
"MIT"
] | 2
|
2020-06-03T04:38:22.000Z
|
2020-07-07T03:37:38.000Z
|
fabric_quick_setup/mod.py
|
max-niederman/fabric-quick-setup
|
40c959c6cd2295c679576680fab3cda2b15222f5
|
[
"MIT"
] | null | null | null |
fabric_quick_setup/mod.py
|
max-niederman/fabric-quick-setup
|
40c959c6cd2295c679576680fab3cda2b15222f5
|
[
"MIT"
] | null | null | null |
# Mod.py
import requests
import jenkinsapi
from jenkinsapi.jenkins import Jenkins
import json
import re
import lxml
from bs4 import BeautifulSoup
from pprint import pprint
def download(url: str, out='', name=None):
r = requests.get(url)
filename = out + name if name else out + url.split('/')[-1]
with open(filename, 'wb') as outfile:
outfile.write(r.content)
# Errors
class InvalidModResourceError(Exception):
pass
class ModVersionNotFoundError(InvalidModResourceError):
pass
# Mod Class
class Mod:
def __init__(self, resource: dict):
self.resource = resource
def install(self, mc_dir: str, mc_version: str):
if self.resource['type'] == 'github':
# Get Latest Asset for Minecraft Version
if 'release' in self.resource['filters']:
release = requests.get(f'https://api.github.com/repos/{self.resource["repo"]}/releases/{self.resource["filters"]["release"]}').json()
assets = release.assets
else:
# Get all release assets for repo
releases = requests.get(f'https://api.github.com/repos/{self.resource["repo"]}/releases').json()
assets = list()
releases.reverse()
for release in releases:
assets += release['assets']
# Filter assets and get latest asset
if 'mc-version' in self.resource['filters']:
assets = [x for x in assets if mc_version in x['name']]
if assets:
asset = assets[0]
else:
raise ModVersionNotFoundError('No assets were found for this version of Minecraft')
download(asset['browser_download_url'], out=f'{mc_dir}\mods\\')
elif self.resource['type'] == 'fabric':
# Snapshot to eventual release version
fabric_versions = requests.get('https://meta.fabricmc.net/v2/versions/game').json()
mc_versions = [v['version'] for v in fabric_versions]
f = re.compile('\d.\d\d$')
seen_version = False
if f.match(mc_version):
release_version = mc_version
else:
for version in mc_versions:
if version == mc_version:
seen_version = True
if f.match(version) and seen_version:
release_version = version[:-1] + str(int(version[-1]) + 1)
break
# Get Jenkins Artifact and Download
jenkins = Jenkins('https://jenkins.modmuss50.me')
job = jenkins.get_job(f'Fabric/API {release_version}')
build = job.get_last_good_build()
artifacts = build.get_artifact_dict()
artifact = list(artifacts.values())[-1]
artifact.save_to_dir(f'{mc_dir}\mods\\')
elif self.resource['type'] == 'optifine':
# Get optifine.net downloads page
with requests.get('https://www.optifine.net/downloads') as r:
downloads_page = BeautifulSoup(r.text, 'lxml')
# Get list of mirrors and get mirror of latest release for Minecraft version
mirrors = downloads_page.select('.downloadLineMirror')
mirrors = [x for x in mirrors if mc_version in x.a['href']]
if mirrors:
mirror = mirrors[0]
else:
raise ModVersionNotFoundError('No releases were found for this version of Minecraft')
# Get mirror page
with requests.get(mirror.a['href']) as r:
mirror_page = BeautifulSoup(r.text, 'lxml')
# Get download link
download_elem = mirror_page.find('span', {'id': 'Download'})
download(f'https://optifine.net/{download_elem.a["href"]}', out=f'{mc_dir}\mods\\', name=download_elem.get_text()[11:-1])
elif self.resource['url']:
download(self.resource['url'], out=f'{mc_dir}\mods\\')
else:
raise InvalidModResourceError('No valid mod resource data was found')
| 40.466667
| 149
| 0.562721
|
bc7f5fd8611c570b4aac667431aa747cee3dbc52
| 46,205
|
py
|
Python
|
ibm_cloud_networking_services/zone_firewall_access_rules_v1.py
|
IBM/networking-services-python-sdk
|
a19e47db6a5971562a502982d69a5868997245f3
|
[
"Apache-2.0"
] | 1
|
2022-03-15T02:13:25.000Z
|
2022-03-15T02:13:25.000Z
|
ibm_cloud_networking_services/zone_firewall_access_rules_v1.py
|
IBM/networking-services-python-sdk
|
a19e47db6a5971562a502982d69a5868997245f3
|
[
"Apache-2.0"
] | 57
|
2020-06-24T06:58:01.000Z
|
2022-03-28T14:52:33.000Z
|
ibm_cloud_networking_services/zone_firewall_access_rules_v1.py
|
IBM/networking-services-python-sdk
|
a19e47db6a5971562a502982d69a5868997245f3
|
[
"Apache-2.0"
] | 10
|
2020-06-23T04:09:28.000Z
|
2022-03-26T18:20:35.000Z
|
# coding: utf-8
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Zone Firewall Access Rules
"""
from enum import Enum
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class ZoneFirewallAccessRulesV1(BaseService):
"""The Zone Firewall Access Rules V1 service."""
DEFAULT_SERVICE_URL = 'https://api.cis.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'zone_firewall_access_rules'
@classmethod
def new_instance(cls,
crn: str,
zone_identifier: str,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'ZoneFirewallAccessRulesV1':
"""
Return a new client for the Zone Firewall Access Rules service using the
specified parameters and external configuration.
:param str crn: Full crn of the service instance.
:param str zone_identifier: Zone identifier (zone id).
"""
if crn is None:
raise ValueError('crn must be provided')
if zone_identifier is None:
raise ValueError('zone_identifier must be provided')
authenticator = get_authenticator_from_environment(service_name)
service = cls(
crn,
zone_identifier,
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
crn: str,
zone_identifier: str,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the Zone Firewall Access Rules service.
:param str crn: Full crn of the service instance.
:param str zone_identifier: Zone identifier (zone id).
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
if crn is None:
raise ValueError('crn must be provided')
if zone_identifier is None:
raise ValueError('zone_identifier must be provided')
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
self.crn = crn
self.zone_identifier = zone_identifier
#########################
# Zone Firewall Access Rules
#########################
def list_all_zone_access_rules(self,
*,
notes: str = None,
mode: str = None,
configuration_target: str = None,
configuration_value: str = None,
page: int = None,
per_page: int = None,
order: str = None,
direction: str = None,
match: str = None,
**kwargs
) -> DetailedResponse:
"""
List all firewall access rules.
List all firewall access rules for a zone.
:param str notes: (optional) Search access rules by note.(Not case
sensitive).
:param str mode: (optional) Search access rules by mode.
:param str configuration_target: (optional) Search access rules by
configuration target.
:param str configuration_value: (optional) Search access rules by
configuration value which can be IP, IPrange, or country code.
:param int page: (optional) Page number of paginated results.
:param int per_page: (optional) Maximum number of access rules per page.
:param str order: (optional) Field by which to order list of access rules.
:param str direction: (optional) Direction in which to order results
[ascending/descending order].
:param str match: (optional) Whether to match all (all) or atleast one
search parameter (any).
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ListZoneAccessRulesResp` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_all_zone_access_rules')
headers.update(sdk_headers)
params = {
'notes': notes,
'mode': mode,
'configuration.target': configuration_target,
'configuration.value': configuration_value,
'page': page,
'per_page': per_page,
'order': order,
'direction': direction,
'match': match
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/access_rules/rules'.format(
*self.encode_path_vars(self.crn, self.zone_identifier))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def create_zone_access_rule(self,
*,
mode: str = None,
notes: str = None,
configuration: 'ZoneAccessRuleInputConfiguration' = None,
**kwargs
) -> DetailedResponse:
"""
Create firewall access rule.
Create a new firewall access rule for a given zone under a service instance.
:param str mode: (optional) The action to apply to a matched request.
:param str notes: (optional) A personal note about the rule. Typically used
as a reminder or explanation for the rule.
:param ZoneAccessRuleInputConfiguration configuration: (optional)
Configuration object specifying access rule.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ZoneAccessRuleResp` object
"""
if configuration is not None:
configuration = convert_model(configuration)
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_zone_access_rule')
headers.update(sdk_headers)
data = {
'mode': mode,
'notes': notes,
'configuration': configuration
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/access_rules/rules'.format(
*self.encode_path_vars(self.crn, self.zone_identifier))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def delete_zone_access_rule(self,
accessrule_identifier: str,
**kwargs
) -> DetailedResponse:
"""
Delete firewall access rule.
Delete an access rule given its id.
:param str accessrule_identifier: Identifier of the access rule to be
deleted.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `DeleteZoneAccessRuleResp` object
"""
if accessrule_identifier is None:
raise ValueError('accessrule_identifier must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_zone_access_rule')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/access_rules/rules/{2}'.format(
*self.encode_path_vars(self.crn, self.zone_identifier, accessrule_identifier))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def get_zone_access_rule(self,
accessrule_identifier: str,
**kwargs
) -> DetailedResponse:
"""
Get firewall access rule.
Get the details of a firewall access rule for a given zone under a given service
instance.
:param str accessrule_identifier: Identifier of firewall access rule for
the given zone.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ZoneAccessRuleResp` object
"""
if accessrule_identifier is None:
raise ValueError('accessrule_identifier must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_zone_access_rule')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/access_rules/rules/{2}'.format(
*self.encode_path_vars(self.crn, self.zone_identifier, accessrule_identifier))
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def update_zone_access_rule(self,
accessrule_identifier: str,
*,
mode: str = None,
notes: str = None,
**kwargs
) -> DetailedResponse:
"""
Update firewall access rule.
Update an existing firewall access rule for a given zone under a given service
instance.
:param str accessrule_identifier: Identifier of firewall access rule.
:param str mode: (optional) The action to apply to a matched request.
:param str notes: (optional) A personal note about the rule. Typically used
as a reminder or explanation for the rule.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ZoneAccessRuleResp` object
"""
if accessrule_identifier is None:
raise ValueError('accessrule_identifier must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_zone_access_rule')
headers.update(sdk_headers)
data = {
'mode': mode,
'notes': notes
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/v1/{0}/zones/{1}/firewall/access_rules/rules/{2}'.format(
*self.encode_path_vars(self.crn, self.zone_identifier, accessrule_identifier))
request = self.prepare_request(method='PATCH',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
class ListAllZoneAccessRulesEnums:
"""
Enums for list_all_zone_access_rules parameters.
"""
class Mode(str, Enum):
"""
Search access rules by mode.
"""
BLOCK = 'block'
CHALLENGE = 'challenge'
WHITELIST = 'whitelist'
JS_CHALLENGE = 'js_challenge'
class ConfigurationTarget(str, Enum):
"""
Search access rules by configuration target.
"""
IP = 'ip'
IP_RANGE = 'ip_range'
ASN = 'asn'
COUNTRY = 'country'
class Order(str, Enum):
"""
Field by which to order list of access rules.
"""
CONFIGURATION_TARGET = 'configuration.target'
CONFIGURATION_VALUE = 'configuration.value'
MODE = 'mode'
class Direction(str, Enum):
"""
Direction in which to order results [ascending/descending order].
"""
ASC = 'asc'
DESC = 'desc'
class Match(str, Enum):
"""
Whether to match all (all) or atleast one search parameter (any).
"""
ANY = 'any'
ALL = 'all'
##############################################################################
# Models
##############################################################################
class DeleteZoneAccessRuleRespResult():
"""
Container for response information.
:attr str id: ID.
"""
def __init__(self,
id: str) -> None:
"""
Initialize a DeleteZoneAccessRuleRespResult object.
:param str id: ID.
"""
self.id = id
@classmethod
def from_dict(cls, _dict: Dict) -> 'DeleteZoneAccessRuleRespResult':
"""Initialize a DeleteZoneAccessRuleRespResult object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in DeleteZoneAccessRuleRespResult JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DeleteZoneAccessRuleRespResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DeleteZoneAccessRuleRespResult object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'DeleteZoneAccessRuleRespResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DeleteZoneAccessRuleRespResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ListZoneAccessRulesRespResultInfo():
"""
Statistics of results.
:attr int page: Page number.
:attr int per_page: Number of results per page.
:attr int count: Number of results.
:attr int total_count: Total number of results.
"""
def __init__(self,
page: int,
per_page: int,
count: int,
total_count: int) -> None:
"""
Initialize a ListZoneAccessRulesRespResultInfo object.
:param int page: Page number.
:param int per_page: Number of results per page.
:param int count: Number of results.
:param int total_count: Total number of results.
"""
self.page = page
self.per_page = per_page
self.count = count
self.total_count = total_count
@classmethod
def from_dict(cls, _dict: Dict) -> 'ListZoneAccessRulesRespResultInfo':
"""Initialize a ListZoneAccessRulesRespResultInfo object from a json dictionary."""
args = {}
if 'page' in _dict:
args['page'] = _dict.get('page')
else:
raise ValueError('Required property \'page\' not present in ListZoneAccessRulesRespResultInfo JSON')
if 'per_page' in _dict:
args['per_page'] = _dict.get('per_page')
else:
raise ValueError('Required property \'per_page\' not present in ListZoneAccessRulesRespResultInfo JSON')
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError('Required property \'count\' not present in ListZoneAccessRulesRespResultInfo JSON')
if 'total_count' in _dict:
args['total_count'] = _dict.get('total_count')
else:
raise ValueError('Required property \'total_count\' not present in ListZoneAccessRulesRespResultInfo JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ListZoneAccessRulesRespResultInfo object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'page') and self.page is not None:
_dict['page'] = self.page
if hasattr(self, 'per_page') and self.per_page is not None:
_dict['per_page'] = self.per_page
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
if hasattr(self, 'total_count') and self.total_count is not None:
_dict['total_count'] = self.total_count
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ListZoneAccessRulesRespResultInfo object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ListZoneAccessRulesRespResultInfo') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ListZoneAccessRulesRespResultInfo') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ZoneAccessRuleInputConfiguration():
"""
Configuration object specifying access rule.
:attr str target: The request property to target.
:attr str value: The value for the selected target.For ip the value is a valid
ip address.For ip_range the value specifies ip range limited to /16 and /24. For
asn the value is an AS number. For country the value is a country code for the
country.
"""
def __init__(self,
target: str,
value: str) -> None:
"""
Initialize a ZoneAccessRuleInputConfiguration object.
:param str target: The request property to target.
:param str value: The value for the selected target.For ip the value is a
valid ip address.For ip_range the value specifies ip range limited to /16
and /24. For asn the value is an AS number. For country the value is a
country code for the country.
"""
self.target = target
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'ZoneAccessRuleInputConfiguration':
"""Initialize a ZoneAccessRuleInputConfiguration object from a json dictionary."""
args = {}
if 'target' in _dict:
args['target'] = _dict.get('target')
else:
raise ValueError('Required property \'target\' not present in ZoneAccessRuleInputConfiguration JSON')
if 'value' in _dict:
args['value'] = _dict.get('value')
else:
raise ValueError('Required property \'value\' not present in ZoneAccessRuleInputConfiguration JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ZoneAccessRuleInputConfiguration object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'target') and self.target is not None:
_dict['target'] = self.target
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ZoneAccessRuleInputConfiguration object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ZoneAccessRuleInputConfiguration') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ZoneAccessRuleInputConfiguration') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TargetEnum(str, Enum):
"""
The request property to target.
"""
IP = 'ip'
IP_RANGE = 'ip_range'
ASN = 'asn'
COUNTRY = 'country'
class ZoneAccessRuleObjectConfiguration():
"""
configuration.
:attr str target: target.
:attr str value: Value for the given target. For ip the value is a valid ip
address.For ip_range the value specifies ip range limited to /16 and /24. For
asn the value is an AS number. For country the value is a country code for the
country.
"""
def __init__(self,
target: str,
value: str) -> None:
"""
Initialize a ZoneAccessRuleObjectConfiguration object.
:param str target: target.
:param str value: Value for the given target. For ip the value is a valid
ip address.For ip_range the value specifies ip range limited to /16 and
/24. For asn the value is an AS number. For country the value is a country
code for the country.
"""
self.target = target
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'ZoneAccessRuleObjectConfiguration':
"""Initialize a ZoneAccessRuleObjectConfiguration object from a json dictionary."""
args = {}
if 'target' in _dict:
args['target'] = _dict.get('target')
else:
raise ValueError('Required property \'target\' not present in ZoneAccessRuleObjectConfiguration JSON')
if 'value' in _dict:
args['value'] = _dict.get('value')
else:
raise ValueError('Required property \'value\' not present in ZoneAccessRuleObjectConfiguration JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ZoneAccessRuleObjectConfiguration object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'target') and self.target is not None:
_dict['target'] = self.target
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ZoneAccessRuleObjectConfiguration object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ZoneAccessRuleObjectConfiguration') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ZoneAccessRuleObjectConfiguration') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TargetEnum(str, Enum):
"""
target.
"""
IP = 'ip'
IP_RANGE = 'ip_range'
ASN = 'asn'
COUNTRY = 'country'
class ZoneAccessRuleObjectScope():
"""
The scope definition of the access rule.
:attr str type: The scope of the access rule, indicating if its applicable at
zone level("zone") or inherited from instance level("account").
"""
def __init__(self,
type: str) -> None:
"""
Initialize a ZoneAccessRuleObjectScope object.
:param str type: The scope of the access rule, indicating if its applicable
at zone level("zone") or inherited from instance level("account").
"""
self.type = type
@classmethod
def from_dict(cls, _dict: Dict) -> 'ZoneAccessRuleObjectScope':
"""Initialize a ZoneAccessRuleObjectScope object from a json dictionary."""
args = {}
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError('Required property \'type\' not present in ZoneAccessRuleObjectScope JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ZoneAccessRuleObjectScope object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ZoneAccessRuleObjectScope object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ZoneAccessRuleObjectScope') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ZoneAccessRuleObjectScope') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TypeEnum(str, Enum):
"""
The scope of the access rule, indicating if its applicable at zone level("zone")
or inherited from instance level("account").
"""
ACCOUNT = 'account'
ZONE = 'zone'
class DeleteZoneAccessRuleResp():
"""
delete access rule response.
:attr bool success: Operation success flag.
:attr List[List[str]] errors: Array of errors encountered.
:attr List[List[str]] messages: Array of messages returned.
:attr DeleteZoneAccessRuleRespResult result: Container for response information.
"""
def __init__(self,
success: bool,
errors: List[List[str]],
messages: List[List[str]],
result: 'DeleteZoneAccessRuleRespResult') -> None:
"""
Initialize a DeleteZoneAccessRuleResp object.
:param bool success: Operation success flag.
:param List[List[str]] errors: Array of errors encountered.
:param List[List[str]] messages: Array of messages returned.
:param DeleteZoneAccessRuleRespResult result: Container for response
information.
"""
self.success = success
self.errors = errors
self.messages = messages
self.result = result
@classmethod
def from_dict(cls, _dict: Dict) -> 'DeleteZoneAccessRuleResp':
"""Initialize a DeleteZoneAccessRuleResp object from a json dictionary."""
args = {}
if 'success' in _dict:
args['success'] = _dict.get('success')
else:
raise ValueError('Required property \'success\' not present in DeleteZoneAccessRuleResp JSON')
if 'errors' in _dict:
args['errors'] = _dict.get('errors')
else:
raise ValueError('Required property \'errors\' not present in DeleteZoneAccessRuleResp JSON')
if 'messages' in _dict:
args['messages'] = _dict.get('messages')
else:
raise ValueError('Required property \'messages\' not present in DeleteZoneAccessRuleResp JSON')
if 'result' in _dict:
args['result'] = DeleteZoneAccessRuleRespResult.from_dict(_dict.get('result'))
else:
raise ValueError('Required property \'result\' not present in DeleteZoneAccessRuleResp JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DeleteZoneAccessRuleResp object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'success') and self.success is not None:
_dict['success'] = self.success
if hasattr(self, 'errors') and self.errors is not None:
_dict['errors'] = self.errors
if hasattr(self, 'messages') and self.messages is not None:
_dict['messages'] = self.messages
if hasattr(self, 'result') and self.result is not None:
_dict['result'] = self.result.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DeleteZoneAccessRuleResp object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'DeleteZoneAccessRuleResp') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DeleteZoneAccessRuleResp') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ListZoneAccessRulesResp():
"""
list access rules response.
:attr bool success: Operation success flag.
:attr List[List[str]] errors: Array of errors encountered.
:attr List[List[str]] messages: Array of messages returned.
:attr List[ZoneAccessRuleObject] result: Container for response information.
:attr ListZoneAccessRulesRespResultInfo result_info: Statistics of results.
"""
def __init__(self,
success: bool,
errors: List[List[str]],
messages: List[List[str]],
result: List['ZoneAccessRuleObject'],
result_info: 'ListZoneAccessRulesRespResultInfo') -> None:
"""
Initialize a ListZoneAccessRulesResp object.
:param bool success: Operation success flag.
:param List[List[str]] errors: Array of errors encountered.
:param List[List[str]] messages: Array of messages returned.
:param List[ZoneAccessRuleObject] result: Container for response
information.
:param ListZoneAccessRulesRespResultInfo result_info: Statistics of
results.
"""
self.success = success
self.errors = errors
self.messages = messages
self.result = result
self.result_info = result_info
@classmethod
def from_dict(cls, _dict: Dict) -> 'ListZoneAccessRulesResp':
"""Initialize a ListZoneAccessRulesResp object from a json dictionary."""
args = {}
if 'success' in _dict:
args['success'] = _dict.get('success')
else:
raise ValueError('Required property \'success\' not present in ListZoneAccessRulesResp JSON')
if 'errors' in _dict:
args['errors'] = _dict.get('errors')
else:
raise ValueError('Required property \'errors\' not present in ListZoneAccessRulesResp JSON')
if 'messages' in _dict:
args['messages'] = _dict.get('messages')
else:
raise ValueError('Required property \'messages\' not present in ListZoneAccessRulesResp JSON')
if 'result' in _dict:
args['result'] = [ZoneAccessRuleObject.from_dict(x) for x in _dict.get('result')]
else:
raise ValueError('Required property \'result\' not present in ListZoneAccessRulesResp JSON')
if 'result_info' in _dict:
args['result_info'] = ListZoneAccessRulesRespResultInfo.from_dict(_dict.get('result_info'))
else:
raise ValueError('Required property \'result_info\' not present in ListZoneAccessRulesResp JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ListZoneAccessRulesResp object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'success') and self.success is not None:
_dict['success'] = self.success
if hasattr(self, 'errors') and self.errors is not None:
_dict['errors'] = self.errors
if hasattr(self, 'messages') and self.messages is not None:
_dict['messages'] = self.messages
if hasattr(self, 'result') and self.result is not None:
_dict['result'] = [x.to_dict() for x in self.result]
if hasattr(self, 'result_info') and self.result_info is not None:
_dict['result_info'] = self.result_info.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ListZoneAccessRulesResp object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ListZoneAccessRulesResp') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ListZoneAccessRulesResp') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ZoneAccessRuleObject():
"""
access rule object.
:attr str id: Identifier of the firewall access rule.
:attr str notes: A personal note about the rule. Typically used as a reminder or
explanation for the rule.
:attr List[str] allowed_modes: List of modes that are allowed.
:attr str mode: The action to be applied to a request matching the access rule.
:attr ZoneAccessRuleObjectScope scope: (optional) The scope definition of the
access rule.
:attr str created_on: The creation date-time of the firewall access rule.
:attr str modified_on: The modification date-time of the firewall access rule.
:attr ZoneAccessRuleObjectConfiguration configuration: configuration.
"""
def __init__(self,
id: str,
notes: str,
allowed_modes: List[str],
mode: str,
created_on: str,
modified_on: str,
configuration: 'ZoneAccessRuleObjectConfiguration',
*,
scope: 'ZoneAccessRuleObjectScope' = None) -> None:
"""
Initialize a ZoneAccessRuleObject object.
:param str id: Identifier of the firewall access rule.
:param str notes: A personal note about the rule. Typically used as a
reminder or explanation for the rule.
:param List[str] allowed_modes: List of modes that are allowed.
:param str mode: The action to be applied to a request matching the access
rule.
:param str created_on: The creation date-time of the firewall access rule.
:param str modified_on: The modification date-time of the firewall access
rule.
:param ZoneAccessRuleObjectConfiguration configuration: configuration.
:param ZoneAccessRuleObjectScope scope: (optional) The scope definition of
the access rule.
"""
self.id = id
self.notes = notes
self.allowed_modes = allowed_modes
self.mode = mode
self.scope = scope
self.created_on = created_on
self.modified_on = modified_on
self.configuration = configuration
@classmethod
def from_dict(cls, _dict: Dict) -> 'ZoneAccessRuleObject':
"""Initialize a ZoneAccessRuleObject object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in ZoneAccessRuleObject JSON')
if 'notes' in _dict:
args['notes'] = _dict.get('notes')
else:
raise ValueError('Required property \'notes\' not present in ZoneAccessRuleObject JSON')
if 'allowed_modes' in _dict:
args['allowed_modes'] = _dict.get('allowed_modes')
else:
raise ValueError('Required property \'allowed_modes\' not present in ZoneAccessRuleObject JSON')
if 'mode' in _dict:
args['mode'] = _dict.get('mode')
else:
raise ValueError('Required property \'mode\' not present in ZoneAccessRuleObject JSON')
if 'scope' in _dict:
args['scope'] = ZoneAccessRuleObjectScope.from_dict(_dict.get('scope'))
if 'created_on' in _dict:
args['created_on'] = _dict.get('created_on')
else:
raise ValueError('Required property \'created_on\' not present in ZoneAccessRuleObject JSON')
if 'modified_on' in _dict:
args['modified_on'] = _dict.get('modified_on')
else:
raise ValueError('Required property \'modified_on\' not present in ZoneAccessRuleObject JSON')
if 'configuration' in _dict:
args['configuration'] = ZoneAccessRuleObjectConfiguration.from_dict(_dict.get('configuration'))
else:
raise ValueError('Required property \'configuration\' not present in ZoneAccessRuleObject JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ZoneAccessRuleObject object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'notes') and self.notes is not None:
_dict['notes'] = self.notes
if hasattr(self, 'allowed_modes') and self.allowed_modes is not None:
_dict['allowed_modes'] = self.allowed_modes
if hasattr(self, 'mode') and self.mode is not None:
_dict['mode'] = self.mode
if hasattr(self, 'scope') and self.scope is not None:
_dict['scope'] = self.scope.to_dict()
if hasattr(self, 'created_on') and self.created_on is not None:
_dict['created_on'] = self.created_on
if hasattr(self, 'modified_on') and self.modified_on is not None:
_dict['modified_on'] = self.modified_on
if hasattr(self, 'configuration') and self.configuration is not None:
_dict['configuration'] = self.configuration.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ZoneAccessRuleObject object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ZoneAccessRuleObject') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ZoneAccessRuleObject') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class AllowedModesEnum(str, Enum):
"""
allowed_modes.
"""
BLOCK = 'block'
CHALLENGE = 'challenge'
WHITELIST = 'whitelist'
JS_CHALLENGE = 'js_challenge'
class ModeEnum(str, Enum):
"""
The action to be applied to a request matching the access rule.
"""
BLOCK = 'block'
CHALLENGE = 'challenge'
WHITELIST = 'whitelist'
JS_CHALLENGE = 'js_challenge'
class ZoneAccessRuleResp():
"""
access rule response.
:attr bool success: Operation success flag.
:attr List[List[str]] errors: Array of errors encountered.
:attr List[List[str]] messages: Array of messages returned.
:attr ZoneAccessRuleObject result: access rule object.
"""
def __init__(self,
success: bool,
errors: List[List[str]],
messages: List[List[str]],
result: 'ZoneAccessRuleObject') -> None:
"""
Initialize a ZoneAccessRuleResp object.
:param bool success: Operation success flag.
:param List[List[str]] errors: Array of errors encountered.
:param List[List[str]] messages: Array of messages returned.
:param ZoneAccessRuleObject result: access rule object.
"""
self.success = success
self.errors = errors
self.messages = messages
self.result = result
@classmethod
def from_dict(cls, _dict: Dict) -> 'ZoneAccessRuleResp':
"""Initialize a ZoneAccessRuleResp object from a json dictionary."""
args = {}
if 'success' in _dict:
args['success'] = _dict.get('success')
else:
raise ValueError('Required property \'success\' not present in ZoneAccessRuleResp JSON')
if 'errors' in _dict:
args['errors'] = _dict.get('errors')
else:
raise ValueError('Required property \'errors\' not present in ZoneAccessRuleResp JSON')
if 'messages' in _dict:
args['messages'] = _dict.get('messages')
else:
raise ValueError('Required property \'messages\' not present in ZoneAccessRuleResp JSON')
if 'result' in _dict:
args['result'] = ZoneAccessRuleObject.from_dict(_dict.get('result'))
else:
raise ValueError('Required property \'result\' not present in ZoneAccessRuleResp JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ZoneAccessRuleResp object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'success') and self.success is not None:
_dict['success'] = self.success
if hasattr(self, 'errors') and self.errors is not None:
_dict['errors'] = self.errors
if hasattr(self, 'messages') and self.messages is not None:
_dict['messages'] = self.messages
if hasattr(self, 'result') and self.result is not None:
_dict['result'] = self.result.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ZoneAccessRuleResp object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ZoneAccessRuleResp') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ZoneAccessRuleResp') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| 38.730092
| 119
| 0.608635
|
208dba7149d0bd3aeab96a0ae4db722a837a3917
| 127
|
py
|
Python
|
01-python/source code/01/07.py
|
lizhangjie316/ComputerVision
|
86d82358bd160074d154773df0284e1154a6d077
|
[
"Apache-2.0"
] | 1
|
2019-11-05T08:38:03.000Z
|
2019-11-05T08:38:03.000Z
|
01-python/source code/01/07.py
|
lizhangjie316/ComputerVision
|
86d82358bd160074d154773df0284e1154a6d077
|
[
"Apache-2.0"
] | 6
|
2020-11-18T22:13:33.000Z
|
2022-03-12T00:04:02.000Z
|
01-python/source code/01/07.py
|
lizhangjie316/ComputerVision
|
86d82358bd160074d154773df0284e1154a6d077
|
[
"Apache-2.0"
] | null | null | null |
a = [1,2,3,4,5]
multi_dia_a = [[1,2,3],
[2,3,4],
[3,4,5]]
print(a[2])
print(multi_dia_a[2][2])
| 14.111111
| 24
| 0.401575
|
03031a457ff622eaf8a03d92492a20933fee85da
| 10,705
|
py
|
Python
|
tests/contrib/utils/base_gcp_system_test_case.py
|
Nipica/airflow
|
211a71f8a6b9d808bd03af84bd77bf8ff0ef247f
|
[
"Apache-2.0"
] | null | null | null |
tests/contrib/utils/base_gcp_system_test_case.py
|
Nipica/airflow
|
211a71f8a6b9d808bd03af84bd77bf8ff0ef247f
|
[
"Apache-2.0"
] | 1
|
2019-01-14T17:12:47.000Z
|
2019-01-14T17:12:47.000Z
|
tests/contrib/utils/base_gcp_system_test_case.py
|
shubhamod/airflow
|
04f4622656656d4c55b69d460bbd2ed1379810c4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import unittest
from glob import glob
from shutil import move
from tempfile import mkdtemp
from airflow.utils import db as db_utils
from airflow import models, settings, AirflowException, LoggingMixin
from airflow.utils.timezone import datetime
from tests.contrib.utils.gcp_authenticator import GcpAuthenticator
from tests.contrib.utils.run_once_decorator import run_once
AIRFLOW_MAIN_FOLDER = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, os.pardir))
AIRFLOW_PARENT_FOLDER = os.path.realpath(os.path.join(AIRFLOW_MAIN_FOLDER,
os.pardir, os.pardir, os.pardir))
ENV_FILE_RETRIEVER = os.path.join(AIRFLOW_PARENT_FOLDER,
"get_system_test_environment_variables.py")
# Retrieve environment variables from parent directory retriever - it should be
# in the path ${AIRFLOW_SOOURCE_DIR}/../../get_system_test_environment_variables.py
# and it should print all the variables in form of key=value to the stdout
class RetrieveVariables:
@staticmethod
@run_once
def retrieve_variables():
if os.path.isfile(ENV_FILE_RETRIEVER):
if os.environ.get('AIRFLOW__CORE__UNIT_TEST_MODE'):
raise Exception("Please unset the AIRFLOW__CORE__UNIT_TEST_MODE")
variables = subprocess.check_output([ENV_FILE_RETRIEVER]).decode("utf-8")
print("Applying variables retrieved")
for line in variables.split("\n"):
try:
variable, key = line.split("=")
except ValueError:
continue
print("{}={}".format(variable, key))
os.environ[variable] = key
RetrieveVariables.retrieve_variables()
DEFAULT_DATE = datetime(2015, 1, 1)
CONTRIB_OPERATORS_EXAMPLES_DAG_FOLDER = os.path.join(
AIRFLOW_MAIN_FOLDER, "airflow", "contrib", "example_dags")
OPERATORS_EXAMPLES_DAG_FOLDER = os.path.join(
AIRFLOW_MAIN_FOLDER, "airflow", "example_dags")
AIRFLOW_HOME = os.environ.get('AIRFLOW_HOME',
os.path.join(os.path.expanduser('~'), 'airflow'))
DAG_FOLDER = os.path.join(AIRFLOW_HOME, "dags")
SKIP_TEST_WARNING = """
The test is only run when the test is run in with GCP-system-tests enabled
environment. You can enable it in one of two ways:
* Set GCP_CONFIG_DIR environment variable to point to the GCP configuration
directory which keeps variables.env file with environment variables to set
and keys directory which keeps service account keys in .json format
* Run this test within automated environment variable workspace where
config directory is checked out next to the airflow one.
""".format(__file__)
class BaseGcpSystemTestCase(unittest.TestCase, LoggingMixin):
def __init__(self,
method_name,
gcp_key,
project_extra=None):
super(BaseGcpSystemTestCase, self).__init__(methodName=method_name)
self.gcp_authenticator = GcpAuthenticator(gcp_key=gcp_key,
project_extra=project_extra)
self.setup_called = False
@staticmethod
def skip_check(key_name):
return GcpAuthenticator(key_name).full_key_path is None
def setUp(self):
self.gcp_authenticator.gcp_store_authentication()
self.gcp_authenticator.gcp_authenticate()
# We checked that authentication works. Ne we revoke it to make
# sure we are not relying on the default authentication
self.gcp_authenticator.gcp_revoke_authentication()
self.setup_called = True
# noinspection PyPep8Naming
def tearDown(self):
self.gcp_authenticator.gcp_restore_authentication()
class DagGcpSystemTestCase(BaseGcpSystemTestCase):
def __init__(self,
method_name,
dag_id,
gcp_key,
dag_name=None,
require_local_executor=False,
example_dags_folder=CONTRIB_OPERATORS_EXAMPLES_DAG_FOLDER,
project_extra=None):
super(DagGcpSystemTestCase, self).__init__(method_name=method_name,
gcp_key=gcp_key,
project_extra=project_extra)
self.dag_id = dag_id
self.dag_name = self.dag_id + '.py' if not dag_name else dag_name
self.example_dags_folder = example_dags_folder
self.require_local_executor = require_local_executor
self.temp_dir = None
@staticmethod
def _get_dag_folder():
return DAG_FOLDER
@staticmethod
def _get_files_to_link(path):
"""
Returns all file names (note - file names not paths)
that have the same base name as the .py dag file (for example dag_name.sql etc.)
:param path: path to the dag file.
:return: list of files matching the base name
"""
prefix, ext = os.path.splitext(path)
assert ext == '.py', "Dag name should be a .py file and is {} file".format(ext)
files_to_link = []
for file in glob(prefix + ".*"):
files_to_link.append(os.path.basename(file))
return files_to_link
def _symlink_dag_and_associated_files(self, remove=False):
target_folder = self._get_dag_folder()
source_path = os.path.join(self.example_dags_folder, self.dag_name)
for file_name in self._get_files_to_link(source_path):
source_path = os.path.join(self.example_dags_folder, file_name)
target_path = os.path.join(target_folder, file_name)
if remove:
try:
self.log.info("Remove symlink: {} -> {} ".format(
target_path, source_path))
os.remove(target_path)
except OSError:
pass
else:
if not os.path.exists(target_path):
self.log.info("Symlink: {} -> {} ".format(target_path, source_path))
os.symlink(source_path, target_path)
else:
self.log.info("Symlink {} already exists. Not symlinking it.".
format(target_path))
def _store_dags_to_temporary_directory(self):
dag_folder = self._get_dag_folder()
self.temp_dir = mkdtemp()
self.log.info("Storing DAGS from {} to temporary directory {}".
format(dag_folder, self.temp_dir))
try:
os.mkdir(dag_folder)
except OSError:
pass
for file in os.listdir(dag_folder):
move(os.path.join(dag_folder, file), os.path.join(self.temp_dir, file))
def _restore_dags_from_temporary_directory(self):
dag_folder = self._get_dag_folder()
self.log.info("Restoring DAGS to {} from temporary directory {}"
.format(dag_folder, self.temp_dir))
for file in os.listdir(self.temp_dir):
move(os.path.join(self.temp_dir, file), os.path.join(dag_folder, file))
def _run_dag(self):
self.log.info("Attempting to run DAG: {}".format(self.dag_id))
if not self.setup_called:
raise AirflowException("Please make sure to call super.setUp() in your "
"test class!")
dag_folder = self._get_dag_folder()
dag_bag = models.DagBag(dag_folder=dag_folder, include_examples=False)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = dag_bag.get_dag(self.dag_id)
if dag is None:
raise AirflowException(
"The Dag {} could not be found. It's either an import problem or "
"the dag {} was not symlinked to the DAGs folder. "
"The content of the {} folder is {}".
format(self.dag_id,
self.dag_id + ".py",
dag_folder,
os.listdir(dag_folder)))
dag.clear(reset_dag_runs=True)
dag.run(ignore_first_depends_on_past=True, verbose=True)
@staticmethod
def _check_local_executor_setup():
postgres_path = os.path.realpath(os.path.join(
AIRFLOW_MAIN_FOLDER,
"tests", "contrib", "operators", "postgres_local_executor.cfg"))
if postgres_path != os.environ.get('AIRFLOW_CONFIG'):
raise AirflowException(
"""
Please set AIRFLOW_CONFIG variable to '{}'
and make sure you have a Postgres server running locally and
airflow/airflow.db database created.
You can create the database via these commands:
'createuser root'
'createdb airflow/airflow.db`
""".format(postgres_path))
# noinspection PyPep8Naming
def setUp(self):
if self.require_local_executor:
self._check_local_executor_setup()
try:
# We want to avoid random errors while database got reset - those
# Are apparently triggered by parser trying to parse DAGs while
# The tables are dropped. We move the dags temporarily out of the dags folder
# and move them back after reset
self._store_dags_to_temporary_directory()
try:
db_utils.resetdb(settings.RBAC)
finally:
self._restore_dags_from_temporary_directory()
self._symlink_dag_and_associated_files()
super(DagGcpSystemTestCase, self).setUp()
except Exception as e:
# In case of any error during setup - restore the authentication
self.gcp_authenticator.gcp_restore_authentication()
raise e
def tearDown(self):
self._symlink_dag_and_associated_files(remove=True)
super(DagGcpSystemTestCase, self).tearDown()
| 41.173077
| 89
| 0.645026
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.